[PATCH 01/13][SRU][OEM-5.13] UBUNTU: SAUCE: intel ipu drivers first release

You-Sheng Yang vicamo.yang at canonical.com
Thu Jul 29 06:48:20 UTC 2021


From: Wang Yating <yating.wang at intel.com>

BugLink: https://bugs.launchpad.net/bugs/1921345

Signed-off-by: Wang Yating <yating.wang at intel.com>
(backported from
https://github.com/intel/ipu6-drivers/commit/ade34d8d514046f0d24879c95bfb5aa622b16073)
Signed-off-by: You-Sheng Yang <vicamo.yang at canonical.com>
---
 drivers/media/i2c/ov01a1s.c                   |  935 +++++++++
 drivers/media/pci/Kconfig                     |    2 +-
 drivers/media/pci/intel/Kconfig               |   33 +
 drivers/media/pci/intel/Makefile              |   13 +-
 drivers/media/pci/intel/ipu-bus.c             |  254 +++
 drivers/media/pci/intel/ipu-bus.h             |   67 +
 drivers/media/pci/intel/ipu-buttress.c        | 1518 +++++++++++++++
 drivers/media/pci/intel/ipu-buttress.h        |  126 ++
 drivers/media/pci/intel/ipu-cpd.c             |  465 +++++
 drivers/media/pci/intel/ipu-cpd.h             |  110 ++
 drivers/media/pci/intel/ipu-dma.c             |  407 ++++
 drivers/media/pci/intel/ipu-dma.h             |   19 +
 drivers/media/pci/intel/ipu-fw-com.c          |  496 +++++
 drivers/media/pci/intel/ipu-fw-com.h          |   48 +
 drivers/media/pci/intel/ipu-fw-isys.c         |  698 +++++++
 drivers/media/pci/intel/ipu-fw-isys.h         |  824 ++++++++
 drivers/media/pci/intel/ipu-fw-psys.c         |  433 +++++
 drivers/media/pci/intel/ipu-fw-psys.h         |  394 ++++
 .../media/pci/intel/ipu-isys-csi2-be-soc.c    |  261 +++
 drivers/media/pci/intel/ipu-isys-csi2-be.c    |  294 +++
 drivers/media/pci/intel/ipu-isys-csi2-be.h    |   66 +
 drivers/media/pci/intel/ipu-isys-csi2.c       |  659 +++++++
 drivers/media/pci/intel/ipu-isys-csi2.h       |  164 ++
 drivers/media/pci/intel/ipu-isys-media.h      |   77 +
 drivers/media/pci/intel/ipu-isys-queue.c      | 1063 +++++++++++
 drivers/media/pci/intel/ipu-isys-queue.h      |  142 ++
 drivers/media/pci/intel/ipu-isys-subdev.c     |  657 +++++++
 drivers/media/pci/intel/ipu-isys-subdev.h     |  153 ++
 drivers/media/pci/intel/ipu-isys-tpg.c        |  311 +++
 drivers/media/pci/intel/ipu-isys-tpg.h        |   99 +
 drivers/media/pci/intel/ipu-isys-video.c      | 1698 +++++++++++++++++
 drivers/media/pci/intel/ipu-isys-video.h      |  174 ++
 drivers/media/pci/intel/ipu-isys.c            | 1435 ++++++++++++++
 drivers/media/pci/intel/ipu-isys.h            |  231 +++
 drivers/media/pci/intel/ipu-mmu.c             |  787 ++++++++
 drivers/media/pci/intel/ipu-mmu.h             |   67 +
 drivers/media/pci/intel/ipu-pdata.h           |  251 +++
 drivers/media/pci/intel/ipu-psys-compat32.c   |  227 +++
 drivers/media/pci/intel/ipu-psys.c            | 1632 ++++++++++++++++
 drivers/media/pci/intel/ipu-psys.h            |  218 +++
 drivers/media/pci/intel/ipu-trace.c           |  880 +++++++++
 drivers/media/pci/intel/ipu-trace.h           |  312 +++
 drivers/media/pci/intel/ipu.c                 |  798 ++++++++
 drivers/media/pci/intel/ipu.h                 |  106 +
 drivers/media/pci/intel/ipu6/Makefile         |   59 +
 .../media/pci/intel/ipu6/ipu-fw-resources.c   |  103 +
 .../intel/ipu6/ipu-platform-buttress-regs.h   |  315 +++
 .../intel/ipu6/ipu-platform-isys-csi2-reg.h   |  277 +++
 .../media/pci/intel/ipu6/ipu-platform-isys.h  |   18 +
 .../media/pci/intel/ipu6/ipu-platform-psys.h  |   77 +
 .../media/pci/intel/ipu6/ipu-platform-regs.h  |  333 ++++
 .../pci/intel/ipu6/ipu-platform-resources.h   |   98 +
 drivers/media/pci/intel/ipu6/ipu-platform.h   |   33 +
 drivers/media/pci/intel/ipu6/ipu-resources.c  |  839 ++++++++
 .../media/pci/intel/ipu6/ipu6-fw-resources.c  |  626 ++++++
 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c |  526 +++++
 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h |   14 +
 drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c  |  211 ++
 drivers/media/pci/intel/ipu6/ipu6-isys-phy.c  |  650 +++++++
 drivers/media/pci/intel/ipu6/ipu6-isys-phy.h  |  161 ++
 drivers/media/pci/intel/ipu6/ipu6-isys.c      |  318 +++
 .../media/pci/intel/ipu6/ipu6-l-scheduler.c   |  618 ++++++
 .../pci/intel/ipu6/ipu6-platform-resources.h  |  197 ++
 drivers/media/pci/intel/ipu6/ipu6-ppg.c       |  553 ++++++
 drivers/media/pci/intel/ipu6/ipu6-ppg.h       |   38 +
 drivers/media/pci/intel/ipu6/ipu6-psys-gpc.c  |  218 +++
 drivers/media/pci/intel/ipu6/ipu6-psys.c      | 1049 ++++++++++
 drivers/media/pci/intel/ipu6/ipu6.c           |  385 ++++
 .../pci/intel/ipu6/ipu6se-fw-resources.c      |  354 ++++
 .../intel/ipu6/ipu6se-platform-resources.h    |  127 ++
 include/media/ipu-isys.h                      |   44 +
 include/uapi/linux/ipu-isys.h                 |   14 +
 include/uapi/linux/ipu-psys.h                 |  121 ++
 73 files changed, 27978 insertions(+), 2 deletions(-)
 create mode 100644 drivers/media/i2c/ov01a1s.c
 create mode 100644 drivers/media/pci/intel/Kconfig
 create mode 100644 drivers/media/pci/intel/ipu-bus.c
 create mode 100644 drivers/media/pci/intel/ipu-bus.h
 create mode 100644 drivers/media/pci/intel/ipu-buttress.c
 create mode 100644 drivers/media/pci/intel/ipu-buttress.h
 create mode 100644 drivers/media/pci/intel/ipu-cpd.c
 create mode 100644 drivers/media/pci/intel/ipu-cpd.h
 create mode 100644 drivers/media/pci/intel/ipu-dma.c
 create mode 100644 drivers/media/pci/intel/ipu-dma.h
 create mode 100644 drivers/media/pci/intel/ipu-fw-com.c
 create mode 100644 drivers/media/pci/intel/ipu-fw-com.h
 create mode 100644 drivers/media/pci/intel/ipu-fw-isys.c
 create mode 100644 drivers/media/pci/intel/ipu-fw-isys.h
 create mode 100644 drivers/media/pci/intel/ipu-fw-psys.c
 create mode 100644 drivers/media/pci/intel/ipu-fw-psys.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-csi2-be-soc.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-csi2-be.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-csi2-be.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-csi2.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-csi2.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-media.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-queue.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-queue.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-subdev.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-subdev.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-tpg.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-tpg.h
 create mode 100644 drivers/media/pci/intel/ipu-isys-video.c
 create mode 100644 drivers/media/pci/intel/ipu-isys-video.h
 create mode 100644 drivers/media/pci/intel/ipu-isys.c
 create mode 100644 drivers/media/pci/intel/ipu-isys.h
 create mode 100644 drivers/media/pci/intel/ipu-mmu.c
 create mode 100644 drivers/media/pci/intel/ipu-mmu.h
 create mode 100644 drivers/media/pci/intel/ipu-pdata.h
 create mode 100644 drivers/media/pci/intel/ipu-psys-compat32.c
 create mode 100644 drivers/media/pci/intel/ipu-psys.c
 create mode 100644 drivers/media/pci/intel/ipu-psys.h
 create mode 100644 drivers/media/pci/intel/ipu-trace.c
 create mode 100644 drivers/media/pci/intel/ipu-trace.h
 create mode 100644 drivers/media/pci/intel/ipu.c
 create mode 100644 drivers/media/pci/intel/ipu.h
 create mode 100644 drivers/media/pci/intel/ipu6/Makefile
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-fw-resources.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-buttress-regs.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-isys-csi2-reg.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-isys.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-psys.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-regs.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform-resources.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-platform.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu-resources.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-resources.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-phy.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-phy.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-l-scheduler.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-resources.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-ppg.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-ppg.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-psys-gpc.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-psys.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6se-fw-resources.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6se-platform-resources.h
 create mode 100644 include/media/ipu-isys.h
 create mode 100644 include/uapi/linux/ipu-isys.h
 create mode 100644 include/uapi/linux/ipu-psys.h

diff --git a/drivers/media/i2c/ov01a1s.c b/drivers/media/i2c/ov01a1s.c
new file mode 100644
index 000000000000..44c991053b12
--- /dev/null
+++ b/drivers/media/i2c/ov01a1s.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV01A1S_LINK_FREQ_400MHZ	400000000ULL
+#define OV01A1S_SCLK			72000000LL
+#define OV01A1S_MCLK			19200000
+#define OV01A1S_DATA_LANES		1
+#define OV01A1S_RGB_DEPTH		10
+
+#define OV01A1S_REG_CHIP_ID		0x300a
+#define OV01A1S_CHIP_ID			0x560141
+
+#define OV01A1S_REG_MODE_SELECT		0x0100
+#define OV01A1S_MODE_STANDBY		0x00
+#define OV01A1S_MODE_STREAMING		0x01
+
+/* vertical-timings from sensor */
+#define OV01A1S_REG_VTS			0x380e
+#define OV01A1S_VTS_DEF			0x0380
+#define OV01A1S_VTS_MIN			0x0380
+#define OV01A1S_VTS_MAX			0xffff
+
+/* horizontal-timings from sensor */
+#define OV01A1S_REG_HTS			0x380c
+
+/* Exposure controls from sensor */
+#define OV01A1S_REG_EXPOSURE		0x3501
+#define OV01A1S_EXPOSURE_MIN		4
+#define OV01A1S_EXPOSURE_MAX_MARGIN	8
+#define OV01A1S_EXPOSURE_STEP		1
+
+/* Analog gain controls from sensor */
+#define OV01A1S_REG_ANALOG_GAIN		0x3508
+#define OV01A1S_ANAL_GAIN_MIN		0x100
+#define OV01A1S_ANAL_GAIN_MAX		0xfff
+#define OV01A1S_ANAL_GAIN_STEP		1
+
+/* Digital gain controls from sensor */
+#define OV01A1S_REG_DGTL_GAIN		0x350A //24bits
+#define OV01A1S_DGTL_GAIN_MIN		0
+#define OV01A1S_DGTL_GAIN_MAX		0xfff
+#define OV01A1S_DGTL_GAIN_STEP		1
+#define OV01A1S_DGTL_GAIN_DEFAULT	1024
+
+/* Test Pattern Control */
+#define OV01A1S_REG_TEST_PATTERN		0x4503
+#define OV01A1S_TEST_PATTERN_ENABLE	BIT(7)
+#define OV01A1S_TEST_PATTERN_BAR_SHIFT	0
+
+enum {
+	OV01A1S_LINK_FREQ_400MHZ_INDEX,
+};
+
+struct ov01a1s_reg {
+	u16 address;
+	u8 val;
+};
+
+struct ov01a1s_reg_list {
+	u32 num_of_regs;
+	const struct ov01a1s_reg *regs;
+};
+
+struct ov01a1s_link_freq_config {
+	const struct ov01a1s_reg_list reg_list;
+};
+
+struct ov01a1s_mode {
+	/* Frame width in pixels */
+	u32 width;
+
+	/* Frame height in pixels */
+	u32 height;
+
+	/* Horizontal timining size */
+	u32 hts;
+
+	/* Default vertical timining size */
+	u32 vts_def;
+
+	/* Min vertical timining size */
+	u32 vts_min;
+
+	/* Link frequency needed for this resolution */
+	u32 link_freq_index;
+
+	/* Sensor register settings for this resolution */
+	const struct ov01a1s_reg_list reg_list;
+};
+
+static const struct ov01a1s_reg mipi_data_rate_720mbps[] = {
+};
+
+//RAW 10bit 1296x736_30fps_MIPI 480Mbps/lane
+static const struct ov01a1s_reg sensor_1296x800_setting[] = {
+	{0x0103, 0x01},
+	{0x0302, 0x00},
+	{0x0303, 0x06},
+	{0x0304, 0x01},
+	{0x0305, 0x90},
+	{0x0306, 0x00},
+	{0x0308, 0x01},
+	{0x0309, 0x00},
+	{0x030c, 0x01},
+	{0x0322, 0x01},
+	{0x0323, 0x06},
+	{0x0324, 0x01},
+	{0x0325, 0x68},
+	{0x3002, 0xa1},
+	{0x301e, 0xf0},
+	{0x3022, 0x01},
+	{0x3501, 0x03},
+	{0x3502, 0x78},
+	{0x3504, 0x0c},
+	{0x3508, 0x01},
+	{0x3509, 0x00},
+	{0x3601, 0xc0},
+	{0x3603, 0x71},
+	{0x3610, 0x68},
+	{0x3611, 0x86},
+	{0x3640, 0x10},
+	{0x3641, 0x80},
+	{0x3642, 0xdc},
+	{0x3646, 0x55},
+	{0x3647, 0x57},
+	{0x364b, 0x00},
+	{0x3653, 0x10},
+	{0x3655, 0x00},
+	{0x3656, 0x00},
+	{0x365f, 0x0f},
+	{0x3661, 0x45},
+	{0x3662, 0x24},
+	{0x3663, 0x11},
+	{0x3664, 0x07},
+	{0x3709, 0x34},
+	{0x370b, 0x6f},
+	{0x3714, 0x22},
+	{0x371b, 0x27},
+	{0x371c, 0x67},
+	{0x371d, 0xa7},
+	{0x371e, 0xe7},
+	{0x3730, 0x81},
+	{0x3733, 0x10},
+	{0x3734, 0x40},
+	{0x3737, 0x04},
+	{0x3739, 0x1c},
+	{0x3767, 0x00},
+	{0x376c, 0x81},
+	{0x3772, 0x14},
+	{0x37c2, 0x04},
+	{0x37d8, 0x03},
+	{0x37d9, 0x0c},
+	{0x37e0, 0x00},
+	{0x37e1, 0x08},
+	{0x37e2, 0x10},
+	{0x37e3, 0x04},
+	{0x37e4, 0x04},
+	{0x37e5, 0x03},
+	{0x37e6, 0x04},
+	{0x3800, 0x00},
+	{0x3801, 0x00},
+	{0x3802, 0x00},
+	{0x3803, 0x00},
+	{0x3804, 0x05},
+	{0x3805, 0x0f},
+	{0x3806, 0x03},
+	{0x3807, 0x2f},
+	{0x3808, 0x05},
+	{0x3809, 0x00},
+	{0x380a, 0x03},
+	{0x380b, 0x20},
+	{0x380c, 0x02},
+	{0x380d, 0xe8},
+	{0x380e, 0x03},
+	{0x380f, 0x80},
+	{0x3810, 0x00},
+	{0x3811, 0x09},
+	{0x3812, 0x00},
+	{0x3813, 0x08},
+	{0x3814, 0x01},
+	{0x3815, 0x01},
+	{0x3816, 0x01},
+	{0x3817, 0x01},
+	{0x3820, 0xa8},
+	{0x3822, 0x03},
+	{0x3832, 0x28},
+	{0x3833, 0x10},
+	{0x3b00, 0x00},
+	{0x3c80, 0x00},
+	{0x3c88, 0x02},
+	{0x3c8c, 0x07},
+	{0x3c8d, 0x40},
+	{0x3cc7, 0x80},
+	{0x4000, 0xc3},
+	{0x4001, 0xe0},
+	{0x4003, 0x40},
+	{0x4008, 0x02},
+	{0x4009, 0x19},
+	{0x400a, 0x01},
+	{0x400b, 0x6c},
+	{0x4011, 0x00},
+	{0x4041, 0x00},
+	{0x4300, 0xff},
+	{0x4301, 0x00},
+	{0x4302, 0x0f},
+	{0x4503, 0x00},
+    //{DATA_8BITS, {0x4601, 0x20},
+	{0x4601, 0x50},  // PC
+	{0x481f, 0x34},
+	{0x4825, 0x33},
+    //{DATA_8BITS, {0x4837, 0x11},
+	{0x4837, 0x14},   // PC
+	{0x4881, 0x40},
+	{0x4883, 0x01},
+	{0x4890, 0x00},
+	{0x4901, 0x00},
+	{0x4902, 0x00},
+	{0x4b00, 0x2a},
+	{0x4b0d, 0x00},
+	{0x450a, 0x04},
+	{0x450b, 0x00},
+    //{DATA_8BITS, {0x5000, 0x75},
+	{0x5000, 0x65},
+	{0x5004, 0x00},
+	{0x5080, 0x40},
+	{0x5200, 0x18},
+	{0x4837, 0x14},
+
+    // key setting for 19.2Mhz, 1296x736
+	{0x0305, 0xf4},
+    //{DATA_8BITS, {0x0323, 0x05},
+    //{DATA_8BITS, {0x0325, 0x2c},
+	{0x0325, 0xc2},                     //   PC
+	{0x3808, 0x05},
+	{0x3809, 0x10},  //00
+	{0x380a, 0x03},//{DATA_8BITS, {0x380a, 0x02},  //03
+	{0x380b, 0x20},//{DATA_8BITS, {0x380b, 0xe0},  //20
+	{0x380c, 0x05},  //02
+	{0x380d, 0xd0},  //e8
+	{0x380e, 0x03},
+	{0x380f, 0x80},
+	{0x3810, 0x00},
+	{0x3811, 0x00},  //09
+	{0x3812, 0x00},
+    //	{DATA_8BITS, {0x3813, 0x08},
+	{0x3813, 0x08}, // for demo
+
+	{0x3820, 0x88},
+	{0x373d, 0x24},
+};
+
+static const char * const ov01a1s_test_pattern_menu[] = {
+	"Disabled",
+	"Color Bar",
+	"Top-Bottom Darker Color Bar",
+	"Right-Left Darker Color Bar",
+	"Bottom-Top Darker Color Bar",
+};
+
+static const s64 link_freq_menu_items[] = {
+	OV01A1S_LINK_FREQ_400MHZ,
+};
+
+static const struct ov01a1s_link_freq_config link_freq_configs[] = {
+	[OV01A1S_LINK_FREQ_400MHZ_INDEX] = {
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+			.regs = mipi_data_rate_720mbps,
+		}
+	},
+};
+
+static const struct ov01a1s_mode supported_modes[] = {
+	{
+		.width = 1296,
+		.height = 800,
+		.hts = 1080,
+		.vts_def = OV01A1S_VTS_DEF,
+		.vts_min = OV01A1S_VTS_MIN,
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(sensor_1296x800_setting),
+			.regs = sensor_1296x800_setting,
+		},
+		.link_freq_index = OV01A1S_LINK_FREQ_400MHZ_INDEX,
+	},
+};
+
+struct ov01a1s {
+	struct v4l2_subdev sd;
+	struct media_pad pad;
+	struct v4l2_ctrl_handler ctrl_handler;
+
+	/* V4L2 Controls */
+	struct v4l2_ctrl *link_freq;
+	struct v4l2_ctrl *pixel_rate;
+	struct v4l2_ctrl *vblank;
+	struct v4l2_ctrl *hblank;
+	struct v4l2_ctrl *exposure;
+
+	/* Current mode */
+	const struct ov01a1s_mode *cur_mode;
+
+	/* To serialize asynchronus callbacks */
+	struct mutex mutex;
+
+	/* Streaming on/off */
+	bool streaming;
+};
+
+static inline struct ov01a1s *to_ov01a1s(struct v4l2_subdev *subdev)
+{
+	return container_of(subdev, struct ov01a1s, sd);
+}
+
+static u64 to_pixel_rate(u32 f_index)
+{
+	u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV01A1S_DATA_LANES;
+
+	do_div(pixel_rate, OV01A1S_RGB_DEPTH);
+
+	return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+	u64 ppl = hts * to_pixel_rate(f_index);
+
+	do_div(ppl, OV01A1S_SCLK);
+
+	return ppl;
+}
+
+static int ov01a1s_read_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 *val)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	struct i2c_msg msgs[2];
+	u8 addr_buf[2];
+	u8 data_buf[4] = {0};
+	int ret = 0;
+
+	if (len > sizeof(data_buf))
+		return -EINVAL;
+
+	put_unaligned_be16(reg, addr_buf);
+	msgs[0].addr = client->addr;
+	msgs[0].flags = 0;
+	msgs[0].len = sizeof(addr_buf);
+	msgs[0].buf = addr_buf;
+	msgs[1].addr = client->addr;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].len = len;
+	msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+	if (ret != ARRAY_SIZE(msgs))
+		return ret < 0 ? ret : -EIO;
+
+	*val = get_unaligned_be32(data_buf);
+
+	return 0;
+}
+
+static int ov01a1s_write_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 val)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	u8 buf[6];
+	int ret = 0;
+
+	if (len > 4)
+		return -EINVAL;
+
+	put_unaligned_be16(reg, buf);
+	put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+	ret = i2c_master_send(client, buf, len + 2);
+	if (ret != len + 2)
+		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+static int ov01a1s_write_reg_list(struct ov01a1s *ov01a1s,
+				 const struct ov01a1s_reg_list *r_list)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < r_list->num_of_regs; i++) {
+		ret = ov01a1s_write_reg(ov01a1s, r_list->regs[i].address, 1,
+				       r_list->regs[i].val);
+		if (ret) {
+			dev_err_ratelimited(&client->dev,
+					    "write reg 0x%4.4x return err = %d",
+					    r_list->regs[i].address, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ov01a1s_update_digital_gain(struct ov01a1s *ov01a1s, u32 d_gain)
+{
+	// set digital gain
+	u32 real = d_gain; // (1024 << 6) = 1X DG
+
+	real = (real << 6);
+
+	return ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DGTL_GAIN, 3, real);
+}
+
+static int ov01a1s_test_pattern(struct ov01a1s *ov01a1s, u32 pattern)
+{
+	if (pattern)
+		pattern = pattern << OV01A1S_TEST_PATTERN_BAR_SHIFT |
+			  OV01A1S_TEST_PATTERN_ENABLE;
+
+	return ov01a1s_write_reg(ov01a1s, OV01A1S_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int ov01a1s_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct ov01a1s *ov01a1s = container_of(ctrl->handler,
+					     struct ov01a1s, ctrl_handler);
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	s64 exposure_max;
+	int ret = 0;
+
+	/* Propagate change of current control to all related controls */
+	if (ctrl->id == V4L2_CID_VBLANK) {
+		/* Update max exposure while meeting expected vblanking */
+		exposure_max = ov01a1s->cur_mode->height + ctrl->val -
+			       OV01A1S_EXPOSURE_MAX_MARGIN;
+		__v4l2_ctrl_modify_range(ov01a1s->exposure,
+					 ov01a1s->exposure->minimum,
+					 exposure_max, ov01a1s->exposure->step,
+					 exposure_max);
+	}
+
+	/* V4L2 controls values will be applied only when power is already up */
+	if (!pm_runtime_get_if_in_use(&client->dev))
+		return 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_ANALOGUE_GAIN:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_ANALOG_GAIN, 2,
+				       ctrl->val << 4);
+		break;
+
+	case V4L2_CID_DIGITAL_GAIN:
+		ret = ov01a1s_update_digital_gain(ov01a1s, ctrl->val);
+		break;
+
+	case V4L2_CID_EXPOSURE:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_EXPOSURE, 2,
+				       ctrl->val);
+		break;
+
+	case V4L2_CID_VBLANK:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_VTS, 2,
+				       ov01a1s->cur_mode->height + ctrl->val);
+		break;
+
+	case V4L2_CID_TEST_PATTERN:
+		ret = ov01a1s_test_pattern(ov01a1s, ctrl->val);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	pm_runtime_put(&client->dev);
+
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops ov01a1s_ctrl_ops = {
+	.s_ctrl = ov01a1s_set_ctrl,
+};
+
+static int ov01a1s_init_controls(struct ov01a1s *ov01a1s)
+{
+	struct v4l2_ctrl_handler *ctrl_hdlr;
+	const struct ov01a1s_mode *cur_mode;
+	s64 exposure_max, h_blank, pixel_rate;
+	u32 vblank_min, vblank_max, vblank_default;
+	int size;
+	int ret = 0;
+
+	ctrl_hdlr = &ov01a1s->ctrl_handler;
+	ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+	if (ret)
+		return ret;
+
+	ctrl_hdlr->lock = &ov01a1s->mutex;
+	cur_mode = ov01a1s->cur_mode;
+	size = ARRAY_SIZE(link_freq_menu_items);
+
+	ov01a1s->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov01a1s_ctrl_ops,
+						   V4L2_CID_LINK_FREQ,
+						   size - 1, 0,
+						   link_freq_menu_items);
+	if (ov01a1s->link_freq)
+		ov01a1s->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+	pixel_rate = to_pixel_rate(OV01A1S_LINK_FREQ_400MHZ_INDEX);
+	ov01a1s->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					       V4L2_CID_PIXEL_RATE, 0,
+					       pixel_rate, 1, pixel_rate);
+
+	vblank_min = cur_mode->vts_min - cur_mode->height;
+	vblank_max = OV01A1S_VTS_MAX - cur_mode->height;
+	vblank_default = cur_mode->vts_def - cur_mode->height;
+	ov01a1s->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					   V4L2_CID_VBLANK, vblank_min,
+					   vblank_max, 1, vblank_default);
+
+	h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index);
+	h_blank -= cur_mode->width;
+	ov01a1s->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					   V4L2_CID_HBLANK, h_blank, h_blank, 1,
+					   h_blank);
+	if (ov01a1s->hblank)
+		ov01a1s->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+	v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+			  OV01A1S_ANAL_GAIN_MIN, OV01A1S_ANAL_GAIN_MAX,
+			  OV01A1S_ANAL_GAIN_STEP, OV01A1S_ANAL_GAIN_MIN);
+	v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+			  OV01A1S_DGTL_GAIN_MIN, OV01A1S_DGTL_GAIN_MAX,
+			  OV01A1S_DGTL_GAIN_STEP, OV01A1S_DGTL_GAIN_DEFAULT);
+	exposure_max = cur_mode->vts_def - OV01A1S_EXPOSURE_MAX_MARGIN;
+	ov01a1s->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					     V4L2_CID_EXPOSURE,
+					     OV01A1S_EXPOSURE_MIN, exposure_max,
+					     OV01A1S_EXPOSURE_STEP,
+					     exposure_max);
+	v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a1s_ctrl_ops,
+				     V4L2_CID_TEST_PATTERN,
+				     ARRAY_SIZE(ov01a1s_test_pattern_menu) - 1,
+				     0, 0, ov01a1s_test_pattern_menu);
+	if (ctrl_hdlr->error)
+		return ctrl_hdlr->error;
+
+	ov01a1s->sd.ctrl_handler = ctrl_hdlr;
+
+	return 0;
+}
+
+static void ov01a1s_update_pad_format(const struct ov01a1s_mode *mode,
+				     struct v4l2_mbus_framefmt *fmt)
+{
+	fmt->width = mode->width;
+	fmt->height = mode->height;
+	fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov01a1s_start_streaming(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	const struct ov01a1s_reg_list *reg_list;
+	int link_freq_index;
+	int ret = 0;
+
+	link_freq_index = ov01a1s->cur_mode->link_freq_index;
+	reg_list = &link_freq_configs[link_freq_index].reg_list;
+	ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+	if (ret) {
+		dev_err(&client->dev, "failed to set plls");
+		return ret;
+	}
+
+	reg_list = &ov01a1s->cur_mode->reg_list;
+	ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+	if (ret) {
+		dev_err(&client->dev, "failed to set mode");
+		return ret;
+	}
+
+	ret = __v4l2_ctrl_handler_setup(ov01a1s->sd.ctrl_handler);
+	if (ret)
+		return ret;
+
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+			       OV01A1S_MODE_STREAMING);
+	if (ret)
+		dev_err(&client->dev, "failed to start streaming");
+
+	return ret;
+}
+
+static void ov01a1s_stop_streaming(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+
+	if (ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+			     OV01A1S_MODE_STANDBY))
+		dev_err(&client->dev, "failed to stop streaming");
+}
+
+static int ov01a1s_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	int ret = 0;
+
+	if (ov01a1s->streaming == enable)
+		return 0;
+
+	mutex_lock(&ov01a1s->mutex);
+	if (enable) {
+		ret = pm_runtime_get_sync(&client->dev);
+		if (ret < 0) {
+			pm_runtime_put_noidle(&client->dev);
+			mutex_unlock(&ov01a1s->mutex);
+			return ret;
+		}
+
+		ret = ov01a1s_start_streaming(ov01a1s);
+		if (ret) {
+			enable = 0;
+			ov01a1s_stop_streaming(ov01a1s);
+			pm_runtime_put(&client->dev);
+		}
+	} else {
+		ov01a1s_stop_streaming(ov01a1s);
+		pm_runtime_put(&client->dev);
+	}
+
+	ov01a1s->streaming = enable;
+	mutex_unlock(&ov01a1s->mutex);
+
+	return ret;
+}
+
+static int __maybe_unused ov01a1s_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+	if (ov01a1s->streaming)
+		ov01a1s_stop_streaming(ov01a1s);
+
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int __maybe_unused ov01a1s_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	int ret = 0;
+
+	mutex_lock(&ov01a1s->mutex);
+	if (!ov01a1s->streaming)
+		goto exit;
+
+	ret = ov01a1s_start_streaming(ov01a1s);
+	if (ret) {
+		ov01a1s->streaming = false;
+		ov01a1s_stop_streaming(ov01a1s);
+	}
+
+exit:
+	mutex_unlock(&ov01a1s->mutex);
+	return ret;
+}
+
+static int ov01a1s_set_format(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	const struct ov01a1s_mode *mode;
+	s32 vblank_def, h_blank;
+
+	mode = v4l2_find_nearest_size(supported_modes,
+				      ARRAY_SIZE(supported_modes), width,
+				      height, fmt->format.width,
+				      fmt->format.height);
+
+	mutex_lock(&ov01a1s->mutex);
+	ov01a1s_update_pad_format(mode, &fmt->format);
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		*v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+	} else {
+		ov01a1s->cur_mode = mode;
+		__v4l2_ctrl_s_ctrl(ov01a1s->link_freq, mode->link_freq_index);
+		__v4l2_ctrl_s_ctrl_int64(ov01a1s->pixel_rate,
+					 to_pixel_rate(mode->link_freq_index));
+
+		/* Update limits and set FPS to default */
+		vblank_def = mode->vts_def - mode->height;
+		__v4l2_ctrl_modify_range(ov01a1s->vblank,
+					 mode->vts_min - mode->height,
+					 OV01A1S_VTS_MAX - mode->height, 1,
+					 vblank_def);
+		__v4l2_ctrl_s_ctrl(ov01a1s->vblank, vblank_def);
+		h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+			  mode->width;
+		__v4l2_ctrl_modify_range(ov01a1s->hblank, h_blank, h_blank, 1,
+					 h_blank);
+	}
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int ov01a1s_get_format(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+		fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd, cfg,
+							  fmt->pad);
+	else
+		ov01a1s_update_pad_format(ov01a1s->cur_mode, &fmt->format);
+
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int ov01a1s_enum_mbus_code(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_mbus_code_enum *code)
+{
+	if (code->index > 0)
+		return -EINVAL;
+
+	code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+	return 0;
+}
+
+static int ov01a1s_enum_frame_size(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg,
+				  struct v4l2_subdev_frame_size_enum *fse)
+{
+	if (fse->index >= ARRAY_SIZE(supported_modes))
+		return -EINVAL;
+
+	if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+		return -EINVAL;
+
+	fse->min_width = supported_modes[fse->index].width;
+	fse->max_width = fse->min_width;
+	fse->min_height = supported_modes[fse->index].height;
+	fse->max_height = fse->min_height;
+
+	return 0;
+}
+
+static int ov01a1s_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+	ov01a1s_update_pad_format(&supported_modes[0],
+				 v4l2_subdev_get_try_format(sd, fh->pad, 0));
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov01a1s_video_ops = {
+	.s_stream = ov01a1s_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov01a1s_pad_ops = {
+	.set_fmt = ov01a1s_set_format,
+	.get_fmt = ov01a1s_get_format,
+	.enum_mbus_code = ov01a1s_enum_mbus_code,
+	.enum_frame_size = ov01a1s_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov01a1s_subdev_ops = {
+	.video = &ov01a1s_video_ops,
+	.pad = &ov01a1s_pad_ops,
+};
+
+static const struct media_entity_operations ov01a1s_subdev_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov01a1s_internal_ops = {
+	.open = ov01a1s_open,
+};
+
+static int ov01a1s_identify_module(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+	int ret;
+	u32 val;
+
+	ret = ov01a1s_read_reg(ov01a1s, OV01A1S_REG_CHIP_ID, 3, &val);
+	if (ret)
+		return ret;
+
+	if (val != OV01A1S_CHIP_ID) {
+		dev_err(&client->dev, "chip id mismatch: %x!=%x",
+			OV01A1S_CHIP_ID, val);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static int ov01a1s_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	v4l2_async_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+	v4l2_ctrl_handler_free(sd->ctrl_handler);
+	pm_runtime_disable(&client->dev);
+	mutex_destroy(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int ov01a1s_probe(struct i2c_client *client)
+{
+	struct ov01a1s *ov01a1s;
+	int ret = 0;
+
+	ov01a1s = devm_kzalloc(&client->dev, sizeof(*ov01a1s), GFP_KERNEL);
+	if (!ov01a1s)
+		return -ENOMEM;
+
+	v4l2_i2c_subdev_init(&ov01a1s->sd, client, &ov01a1s_subdev_ops);
+	ret = ov01a1s_identify_module(ov01a1s);
+	if (ret) {
+		dev_err(&client->dev, "failed to find sensor: %d", ret);
+		return ret;
+	}
+
+	mutex_init(&ov01a1s->mutex);
+	ov01a1s->cur_mode = &supported_modes[0];
+	ret = ov01a1s_init_controls(ov01a1s);
+	if (ret) {
+		dev_err(&client->dev, "failed to init controls: %d", ret);
+		goto probe_error_v4l2_ctrl_handler_free;
+	}
+
+	ov01a1s->sd.internal_ops = &ov01a1s_internal_ops;
+	ov01a1s->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	ov01a1s->sd.entity.ops = &ov01a1s_subdev_entity_ops;
+	ov01a1s->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+	ov01a1s->pad.flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_pads_init(&ov01a1s->sd.entity, 1, &ov01a1s->pad);
+	if (ret) {
+		dev_err(&client->dev, "failed to init entity pads: %d", ret);
+		goto probe_error_v4l2_ctrl_handler_free;
+	}
+
+	ret = v4l2_async_register_subdev_sensor_common(&ov01a1s->sd);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+			ret);
+		goto probe_error_media_entity_cleanup;
+	}
+
+	/*
+	 * Device is already turned on by i2c-core with ACPI domain PM.
+	 * Enable runtime PM and turn off the device.
+	 */
+	pm_runtime_set_active(&client->dev);
+	pm_runtime_enable(&client->dev);
+	pm_runtime_idle(&client->dev);
+
+	return 0;
+
+probe_error_media_entity_cleanup:
+	media_entity_cleanup(&ov01a1s->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+	v4l2_ctrl_handler_free(ov01a1s->sd.ctrl_handler);
+	mutex_destroy(&ov01a1s->mutex);
+
+	return ret;
+}
+
+static const struct dev_pm_ops ov01a1s_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(ov01a1s_suspend, ov01a1s_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov01a1s_acpi_ids[] = {
+	{"OVTI01AF"}, /* OVTI01AF*/
+	{}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov01a1s_acpi_ids);
+#endif
+
+static struct i2c_driver ov01a1s_i2c_driver = {
+	.driver = {
+		.name = "ov01a1s", /* ov01a1s*/
+		.pm = &ov01a1s_pm_ops,
+		.acpi_match_table = ACPI_PTR(ov01a1s_acpi_ids),
+	},
+	.probe_new = ov01a1s_probe,
+	.remove = ov01a1s_remove,
+};
+
+module_i2c_driver(ov01a1s_i2c_driver);
+
+MODULE_AUTHOR("Lai, Jim <jim.lai at intel.com>");
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu at intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu at intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao at intel.com>");
+MODULE_DESCRIPTION("OmniVision OV01A1S sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 2cd8e328dda9..d144663debda 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -55,7 +55,7 @@ source "drivers/media/pci/smipcie/Kconfig"
 source "drivers/media/pci/netup_unidvb/Kconfig"
 endif
 
-source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/Kconfig"
 
 config VIDEO_PCI_SKELETON
 	tristate "Skeleton PCI V4L2 driver"
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
new file mode 100644
index 000000000000..eb19872b1df0
--- /dev/null
+++ b/drivers/media/pci/intel/Kconfig
@@ -0,0 +1,33 @@
+config VIDEO_INTEL_IPU
+	tristate "Intel IPU driver"
+	depends on ACPI
+	depends on MEDIA_SUPPORT
+	depends on MEDIA_PCI_SUPPORT
+	select IOMMU_API
+	select IOMMU_IOVA
+	select X86_DEV_DMA_OPS if X86
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_FWNODE
+	select PHYS_ADDR_T_64BIT
+	select COMMON_CLK
+	help
+	  This is the Intel imaging processing unit, found in Intel SoCs and
+	  used for capturing images and video from a camera sensor.
+
+	  To compile this driver, say Y here!
+
+choice
+	prompt "intel ipu generation type"
+	depends on VIDEO_INTEL_IPU
+	default VIDEO_INTEL_IPU6
+
+config VIDEO_INTEL_IPU6
+	bool "Compile for IPU6 driver"
+	help
+	  Sixth generation Intel imaging processing unit found in Intel
+	  SoCs.
+
+	  To compile this driver, say Y here!
+endchoice
+
+source "drivers/media/pci/intel/ipu3/Kconfig"
diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
index 0b4236c4db49..a0ccb70023e5 100644
--- a/drivers/media/pci/intel/Makefile
+++ b/drivers/media/pci/intel/Makefile
@@ -3,4 +3,15 @@
 # Makefile for the IPU3 cio2 and ImGU drivers
 #
 
-obj-y	+= ipu3/
+# force check the compile warning to make sure zero warnings
+# note we may have build issue when gcc upgraded.
+subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
+subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror
+
+obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3/
+#obj-$(CONFIG_VIDEO_INTEL_IPU4)	 += ipu4/
+#obj-$(CONFIG_VIDEO_INTEL_IPU4P) += ipu4/
+obj-$(CONFIG_VIDEO_INTEL_IPU6)	+= ipu6/
diff --git a/drivers/media/pci/intel/ipu-bus.c b/drivers/media/pci/intel/ipu-bus.c
new file mode 100644
index 000000000000..3bc7277ae16b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-bus.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "ipu.h"
+#include "ipu-platform.h"
+#include "ipu-dma.h"
+
+#ifdef CONFIG_PM
+static struct bus_type ipu_bus;
+
+static int bus_pm_runtime_suspend(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	int rval;
+
+	rval = pm_generic_runtime_suspend(dev);
+	if (rval)
+		return rval;
+
+	rval = ipu_buttress_power(dev, adev->ctrl, false);
+	dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval);
+	if (!rval)
+		return 0;
+
+	dev_err(dev, "power down failed!\n");
+
+	/* Powering down failed, attempt to resume device now */
+	rval = pm_generic_runtime_resume(dev);
+	if (!rval)
+		return -EBUSY;
+
+	return -EIO;
+}
+
+static int bus_pm_runtime_resume(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	int rval;
+
+	rval = ipu_buttress_power(dev, adev->ctrl, true);
+	dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval);
+	if (rval)
+		return rval;
+
+	rval = pm_generic_runtime_resume(dev);
+	dev_dbg(dev, "%s: resume %d\n", __func__, rval);
+	if (rval)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	ipu_buttress_power(dev, adev->ctrl, false);
+
+	return -EBUSY;
+}
+
+static const struct dev_pm_ops ipu_bus_pm_ops = {
+	.runtime_suspend = bus_pm_runtime_suspend,
+	.runtime_resume = bus_pm_runtime_resume,
+};
+
+#define IPU_BUS_PM_OPS	(&ipu_bus_pm_ops)
+#else
+#define IPU_BUS_PM_OPS	NULL
+#endif
+
+static int ipu_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct ipu_bus_driver *adrv = to_ipu_bus_driver(drv);
+
+	dev_dbg(dev, "bus match: \"%s\" --- \"%s\"\n", dev_name(dev),
+		adrv->wanted);
+
+	return !strncmp(dev_name(dev), adrv->wanted, strlen(adrv->wanted));
+}
+
+static int ipu_bus_probe(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver);
+	int rval;
+
+	dev_dbg(dev, "bus probe dev %s\n", dev_name(dev));
+
+	adev->adrv = adrv;
+	if (!adrv->probe) {
+		rval = -ENODEV;
+		goto out_err;
+	}
+	rval = pm_runtime_get_sync(&adev->dev);
+	if (rval < 0) {
+		dev_err(&adev->dev, "Failed to get runtime PM\n");
+		goto out_err;
+	}
+
+	rval = adrv->probe(adev);
+	pm_runtime_put(&adev->dev);
+
+	if (rval)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	ipu_bus_set_drvdata(adev, NULL);
+	adev->adrv = NULL;
+
+	return rval;
+}
+
+static int ipu_bus_remove(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver);
+
+	if (adrv->remove)
+		adrv->remove(adev);
+
+	return 0;
+}
+
+static struct bus_type ipu_bus = {
+	.name = IPU_BUS_NAME,
+	.match = ipu_bus_match,
+	.probe = ipu_bus_probe,
+	.remove = ipu_bus_remove,
+	.pm = IPU_BUS_PM_OPS,
+};
+
+static struct mutex ipu_bus_mutex;
+
+static void ipu_bus_release(struct device *dev)
+{
+}
+
+struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev,
+					  struct device *parent, void *pdata,
+					  struct ipu_buttress_ctrl *ctrl,
+					  char *name, unsigned int nr)
+{
+	struct ipu_bus_device *adev;
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+	int rval;
+
+	adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+	if (!adev)
+		return ERR_PTR(-ENOMEM);
+
+	adev->dev.parent = parent;
+	adev->dev.bus = &ipu_bus;
+	adev->dev.release = ipu_bus_release;
+	adev->dev.dma_ops = &ipu_dma_ops;
+	adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ?
+				      IPU_MMU_ADDRESS_BITS :
+				      IPU_MMU_ADDRESS_BITS_NON_SECURE);
+	adev->dev.dma_mask = &adev->dma_mask;
+	adev->dev.coherent_dma_mask = adev->dma_mask;
+	adev->ctrl = ctrl;
+	adev->pdata = pdata;
+	adev->isp = isp;
+	mutex_init(&adev->resume_lock);
+	dev_set_name(&adev->dev, "%s%d", name, nr);
+
+	rval = device_register(&adev->dev);
+	if (rval) {
+		put_device(&adev->dev);
+		return ERR_PTR(rval);
+	}
+
+	mutex_lock(&ipu_bus_mutex);
+	list_add(&adev->list, &isp->devices);
+	mutex_unlock(&ipu_bus_mutex);
+
+	pm_runtime_allow(&adev->dev);
+	pm_runtime_enable(&adev->dev);
+
+	return adev;
+}
+
+void ipu_bus_del_devices(struct pci_dev *pdev)
+{
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+	struct ipu_bus_device *adev, *save;
+
+	mutex_lock(&ipu_bus_mutex);
+
+	list_for_each_entry_safe(adev, save, &isp->devices, list) {
+		list_del(&adev->list);
+		device_unregister(&adev->dev);
+	}
+
+	mutex_unlock(&ipu_bus_mutex);
+}
+
+int ipu_bus_register_driver(struct ipu_bus_driver *adrv)
+{
+	adrv->drv.bus = &ipu_bus;
+	return driver_register(&adrv->drv);
+}
+EXPORT_SYMBOL(ipu_bus_register_driver);
+
+int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv)
+{
+	driver_unregister(&adrv->drv);
+	return 0;
+}
+EXPORT_SYMBOL(ipu_bus_unregister_driver);
+
+int ipu_bus_register(void)
+{
+	mutex_init(&ipu_bus_mutex);
+	return bus_register(&ipu_bus);
+}
+
+void ipu_bus_unregister(void)
+{
+	mutex_destroy(&ipu_bus_mutex);
+	return bus_unregister(&ipu_bus);
+}
+
+static int flr_rpm_recovery(struct device *dev, void *p)
+{
+	dev_dbg(dev, "FLR recovery call\n");
+	/*
+	 * We are not necessarily going through device from child to
+	 * parent. runtime PM refuses to change state for parent if the child
+	 * is still active. At FLR (full reset for whole IPU) that doesn't
+	 * matter. Everything has been power gated by HW during the FLR cycle
+	 * and we are just cleaning up SW state. Thus, ignore child during
+	 * set_suspended.
+	 */
+	pm_suspend_ignore_children(dev, true);
+	pm_runtime_set_suspended(dev);
+	pm_suspend_ignore_children(dev, false);
+
+	return 0;
+}
+
+int ipu_bus_flr_recovery(void)
+{
+	bus_for_each_dev(&ipu_bus, NULL, NULL, flr_rpm_recovery);
+	return 0;
+}
diff --git a/drivers/media/pci/intel/ipu-bus.h b/drivers/media/pci/intel/ipu-bus.h
new file mode 100644
index 000000000000..1108cd377705
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-bus.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_BUS_H
+#define IPU_BUS_H
+
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#define IPU_BUS_NAME	IPU_NAME "-bus"
+
+struct ipu_buttress_ctrl;
+struct ipu_subsystem_trace_config;
+
+struct ipu_bus_device {
+	struct device dev;
+	struct list_head list;
+	void *pdata;
+	struct ipu_bus_driver *adrv;
+	struct ipu_mmu *mmu;
+	struct ipu_device *isp;
+	struct ipu_subsystem_trace_config *trace_cfg;
+	struct ipu_buttress_ctrl *ctrl;
+	u64 dma_mask;
+	/* Protect runtime_resume calls on the dev */
+	struct mutex resume_lock;
+};
+
+#define to_ipu_bus_device(_dev) container_of(_dev, struct ipu_bus_device, dev)
+
+struct ipu_bus_driver {
+	struct device_driver drv;
+	const char *wanted;
+	int (*probe)(struct ipu_bus_device *adev);
+	void (*remove)(struct ipu_bus_device *adev);
+	irqreturn_t (*isr)(struct ipu_bus_device *adev);
+	irqreturn_t (*isr_threaded)(struct ipu_bus_device *adev);
+	bool wake_isr_thread;
+};
+
+#define to_ipu_bus_driver(_drv) container_of(_drv, struct ipu_bus_driver, drv)
+
+struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev,
+					  struct device *parent, void *pdata,
+					  struct ipu_buttress_ctrl *ctrl,
+					  char *name, unsigned int nr);
+void ipu_bus_del_devices(struct pci_dev *pdev);
+
+int ipu_bus_register_driver(struct ipu_bus_driver *adrv);
+int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv);
+
+int ipu_bus_register(void);
+void ipu_bus_unregister(void);
+
+#define module_ipu_bus_driver(drv)			\
+	module_driver(drv, ipu_bus_register_driver, \
+		ipu_bus_unregister_driver)
+
+#define ipu_bus_set_drvdata(adev, data) dev_set_drvdata(&(adev)->dev, data)
+#define ipu_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->dev)
+
+int ipu_bus_flr_recovery(void);
+
+#endif
diff --git a/drivers/media/pci/intel/ipu-buttress.c b/drivers/media/pci/intel/ipu-buttress.c
new file mode 100644
index 000000000000..77cf8556dc00
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-buttress.c
@@ -0,0 +1,1518 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include <media/ipu-isys.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-cpd.h"
+
+#define BOOTLOADER_STATUS_OFFSET       0x15c
+
+#define BOOTLOADER_MAGIC_KEY		0xb00710ad
+
+#define ENTRY	BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
+#define EXIT	BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
+#define QUERY	BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
+
+#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX	10
+
+#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT		5000000
+#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT	10000000
+#define BUTTRESS_CSE_FWRESET_TIMEOUT		100000
+
+#define BUTTRESS_IPC_TX_TIMEOUT			1000
+#define BUTTRESS_IPC_RX_TIMEOUT			1000
+#define BUTTRESS_IPC_VALIDITY_TIMEOUT		1000000
+#define BUTTRESS_TSC_SYNC_TIMEOUT		5000
+
+#define IPU_BUTTRESS_TSC_LIMIT	500	/* 26 us @ 19.2 MHz */
+#define IPU_BUTTRESS_TSC_RETRY	10
+
+#define BUTTRESS_CSE_IPC_RESET_RETRY	4
+
+#define BUTTRESS_IPC_CMD_SEND_RETRY	1
+
+static const struct ipu_buttress_sensor_clk_freq sensor_clk_freqs[] = {
+	{6750000, BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ},
+	{8000000, BUTTRESS_SENSOR_CLK_FREQ_8MHZ},
+	{9600000, BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ},
+	{12000000, BUTTRESS_SENSOR_CLK_FREQ_12MHZ},
+	{13600000, BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ},
+	{14400000, BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ},
+	{15800000, BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ},
+	{16200000, BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ},
+	{17300000, BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ},
+	{18600000, BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ},
+	{19200000, BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ},
+	{24000000, BUTTRESS_SENSOR_CLK_FREQ_24MHZ},
+	{26000000, BUTTRESS_SENSOR_CLK_FREQ_26MHZ},
+	{27000000, BUTTRESS_SENSOR_CLK_FREQ_27MHZ}
+};
+
+static const u32 ipu_adev_irq_mask[] = {
+	BUTTRESS_ISR_IS_IRQ, BUTTRESS_ISR_PS_IRQ
+};
+
+int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc)
+{
+	struct ipu_buttress *b = &isp->buttress;
+	unsigned long tout_jfs;
+	unsigned int tout = 500;
+	u32 val = 0, csr_in_clr;
+
+	if (!isp->secure_mode) {
+		dev_info(&isp->pdev->dev, "Skip ipc reset for non-secure mode");
+		return 0;
+	}
+
+	mutex_lock(&b->ipc_mutex);
+
+	/* Clear-by-1 CSR (all bits), corresponding internal states. */
+	val = readl(isp->base + ipc->csr_in);
+	writel(val, isp->base + ipc->csr_in);
+
+	/* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
+	writel(ENTRY, isp->base + ipc->csr_out);
+
+	/*
+	 * Clear-by-1 all CSR bits EXCEPT following
+	 * bits:
+	 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+	 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+	 * C. Possibly custom bits, depending on
+	 * their role.
+	 */
+	csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
+		BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
+		BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
+
+	/*
+	 * How long we should wait here?
+	 */
+	tout_jfs = jiffies + msecs_to_jiffies(tout);
+	do {
+		val = readl(isp->base + ipc->csr_in);
+		dev_dbg(&isp->pdev->dev, "%s: csr_in = %x\n", __func__, val);
+		if (val & ENTRY) {
+			if (val & EXIT) {
+				dev_dbg(&isp->pdev->dev,
+					"%s:%s & %s\n", __func__,
+					"IPC_PEER_COMP_ACTIONS_RST_PHASE1",
+					"IPC_PEER_COMP_ACTIONS_RST_PHASE2");
+				/*
+				 * 1) Clear-by-1 CSR bits
+				 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+				 * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
+				 * 2) Set peer CSR bit
+				 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+				 */
+				writel(ENTRY | EXIT,
+				       isp->base + ipc->csr_in);
+
+				writel(QUERY, isp->base + ipc->csr_out);
+
+				tout_jfs = jiffies + msecs_to_jiffies(tout);
+				continue;
+			} else {
+				dev_dbg(&isp->pdev->dev,
+					"%s:IPC_PEER_COMP_ACTIONS_RST_PHASE1\n",
+					__func__);
+				/*
+				 * 1) Clear-by-1 CSR bits
+				 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+				 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
+				 * 2) Set peer CSR bit
+				 * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+				 */
+				writel(ENTRY | QUERY,
+				       isp->base + ipc->csr_in);
+
+				writel(ENTRY, isp->base + ipc->csr_out);
+
+				tout_jfs = jiffies + msecs_to_jiffies(tout);
+				continue;
+			}
+		} else if (val & EXIT) {
+			dev_dbg(&isp->pdev->dev,
+				"%s: IPC_PEER_COMP_ACTIONS_RST_PHASE2\n",
+				__func__);
+			/*
+			 * Clear-by-1 CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 * 1) Clear incoming doorbell.
+			 * 2) Clear-by-1 all CSR bits EXCEPT following
+			 * bits:
+			 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+			 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 * C. Possibly custom bits, depending on
+			 * their role.
+			 * 3) Set peer CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 */
+			writel(EXIT, isp->base + ipc->csr_in);
+
+			writel(0, isp->base + ipc->db0_in);
+
+			writel(csr_in_clr, isp->base + ipc->csr_in);
+
+			writel(EXIT, isp->base + ipc->csr_out);
+
+			/*
+			 * Read csr_in again to make sure if RST_PHASE2 is done.
+			 * If csr_in is QUERY, it should be handled again.
+			 */
+			usleep_range(100, 500);
+			val = readl(isp->base + ipc->csr_in);
+			if (val & QUERY) {
+				dev_dbg(&isp->pdev->dev,
+					"%s: RST_PHASE2 retry csr_in = %x\n",
+					__func__, val);
+				continue;
+			}
+
+			mutex_unlock(&b->ipc_mutex);
+
+			return 0;
+		} else if (val & QUERY) {
+			dev_dbg(&isp->pdev->dev,
+				"%s: %s\n", __func__,
+				"IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE");
+			/*
+			 * 1) Clear-by-1 CSR bit
+			 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+			 * 2) Set peer CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE1
+			 */
+			writel(QUERY, isp->base + ipc->csr_in);
+
+			writel(ENTRY, isp->base + ipc->csr_out);
+
+			tout_jfs = jiffies + msecs_to_jiffies(tout);
+		}
+		usleep_range(100, 500);
+	} while (!time_after(jiffies, tout_jfs));
+
+	mutex_unlock(&b->ipc_mutex);
+
+	dev_err(&isp->pdev->dev, "Timed out while waiting for CSE!\n");
+
+	return -ETIMEDOUT;
+}
+
+static void
+ipu_buttress_ipc_validity_close(struct ipu_device *isp,
+				struct ipu_buttress_ipc *ipc)
+{
+	/* Set bit 5 in CSE CSR */
+	writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
+	       isp->base + ipc->csr_out);
+}
+
+static int
+ipu_buttress_ipc_validity_open(struct ipu_device *isp,
+			       struct ipu_buttress_ipc *ipc)
+{
+	unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
+	unsigned int tout = BUTTRESS_IPC_VALIDITY_TIMEOUT;
+	void __iomem *addr;
+	int ret;
+	u32 val;
+
+	/* Set bit 3 in CSE CSR */
+	writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
+	       isp->base + ipc->csr_out);
+
+	addr = isp->base + ipc->csr_in;
+	ret = readl_poll_timeout(addr, val, val & mask, 200, tout);
+	if (ret) {
+		val = readl(addr);
+		dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x!\n", val);
+		ipu_buttress_ipc_validity_close(isp, ipc);
+	}
+
+	return ret;
+}
+
+static void ipu_buttress_ipc_recv(struct ipu_device *isp,
+				  struct ipu_buttress_ipc *ipc, u32 *ipc_msg)
+{
+	if (ipc_msg)
+		*ipc_msg = readl(isp->base + ipc->data0_in);
+	writel(0, isp->base + ipc->db0_in);
+}
+
+static int ipu_buttress_ipc_send_bulk(struct ipu_device *isp,
+				      enum ipu_buttress_ipc_domain ipc_domain,
+				      struct ipu_ipc_buttress_bulk_msg *msgs,
+				      u32 size)
+{
+	struct ipu_buttress *b = &isp->buttress;
+	struct ipu_buttress_ipc *ipc;
+	unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
+	u32 val;
+	int ret;
+	int tout;
+	unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+	ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish;
+
+	mutex_lock(&b->ipc_mutex);
+
+	ret = ipu_buttress_ipc_validity_open(isp, ipc);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "IPC validity open failed\n");
+		goto out;
+	}
+
+	tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT);
+	rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT);
+
+	for (i = 0; i < size; i++) {
+		reinit_completion(&ipc->send_complete);
+		if (msgs[i].require_resp)
+			reinit_completion(&ipc->recv_complete);
+
+		dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
+			msgs[i].cmd);
+		writel(msgs[i].cmd, isp->base + ipc->data0_out);
+
+		val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
+
+		writel(val, isp->base + ipc->db0_out);
+
+		tout = wait_for_completion_timeout(&ipc->send_complete,
+						   tx_timeout_jiffies);
+		if (!tout) {
+			dev_err(&isp->pdev->dev, "send IPC response timeout\n");
+			if (!retry--) {
+				ret = -ETIMEDOUT;
+				goto out;
+			}
+
+			/*
+			 * WORKAROUND: Sometimes CSE is not
+			 * responding on first try, try again.
+			 */
+			writel(0, isp->base + ipc->db0_out);
+			i--;
+			continue;
+		}
+
+		retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+		if (!msgs[i].require_resp)
+			continue;
+
+		tout = wait_for_completion_timeout(&ipc->recv_complete,
+						   rx_timeout_jiffies);
+		if (!tout) {
+			dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		if (ipc->nack_mask &&
+		    (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
+			dev_err(&isp->pdev->dev,
+				"IPC NACK for cmd 0x%x\n", msgs[i].cmd);
+			ret = -ENODEV;
+			goto out;
+		}
+
+		if (ipc->recv_data != msgs[i].expected_resp) {
+			dev_err(&isp->pdev->dev,
+				"expected resp: 0x%x, IPC response: 0x%x ",
+				msgs[i].expected_resp, ipc->recv_data);
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	dev_dbg(&isp->pdev->dev, "bulk IPC commands completed\n");
+
+out:
+	ipu_buttress_ipc_validity_close(isp, ipc);
+	mutex_unlock(&b->ipc_mutex);
+	return ret;
+}
+
+static int
+ipu_buttress_ipc_send(struct ipu_device *isp,
+		      enum ipu_buttress_ipc_domain ipc_domain,
+		      u32 ipc_msg, u32 size, bool require_resp,
+		      u32 expected_resp)
+{
+	struct ipu_ipc_buttress_bulk_msg msg = {
+		.cmd = ipc_msg,
+		.cmd_size = size,
+		.require_resp = require_resp,
+		.expected_resp = expected_resp,
+	};
+
+	return ipu_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1);
+}
+
+static irqreturn_t ipu_buttress_call_isr(struct ipu_bus_device *adev)
+{
+	irqreturn_t ret = IRQ_WAKE_THREAD;
+
+	if (!adev || !adev->adrv)
+		return IRQ_NONE;
+
+	if (adev->adrv->isr)
+		ret = adev->adrv->isr(adev);
+
+	if (ret == IRQ_WAKE_THREAD && !adev->adrv->isr_threaded)
+		ret = IRQ_NONE;
+
+	adev->adrv->wake_isr_thread = (ret == IRQ_WAKE_THREAD);
+
+	return ret;
+}
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr)
+{
+	struct ipu_device *isp = isp_ptr;
+	struct ipu_bus_device *adev[] = { isp->isys, isp->psys };
+	struct ipu_buttress *b = &isp->buttress;
+	irqreturn_t ret = IRQ_NONE;
+	u32 disable_irqs = 0;
+	u32 irq_status;
+	u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
+	unsigned int i;
+
+	pm_runtime_get(&isp->pdev->dev);
+
+	if (!pm_runtime_active(&isp->pdev->dev)) {
+		irq_status = readl(isp->base + reg_irq_sts);
+		writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+		pm_runtime_put(&isp->pdev->dev);
+		return IRQ_HANDLED;
+	}
+
+	irq_status = readl(isp->base + reg_irq_sts);
+	if (!irq_status) {
+		pm_runtime_put(&isp->pdev->dev);
+		return IRQ_NONE;
+	}
+
+	do {
+		writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+
+		for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) {
+			if (irq_status & ipu_adev_irq_mask[i]) {
+				irqreturn_t r = ipu_buttress_call_isr(adev[i]);
+
+				if (r == IRQ_WAKE_THREAD) {
+					ret = IRQ_WAKE_THREAD;
+					disable_irqs |= ipu_adev_irq_mask[i];
+				} else if (ret == IRQ_NONE &&
+					   r == IRQ_HANDLED) {
+					ret = IRQ_HANDLED;
+				}
+			}
+		}
+
+		if (irq_status & (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING |
+				  BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING |
+				  BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE |
+				  BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH |
+				  BUTTRESS_ISR_SAI_VIOLATION) &&
+		    ret == IRQ_NONE)
+			ret = IRQ_HANDLED;
+
+		if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
+			ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
+			complete(&b->cse.recv_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n");
+			ipu_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data);
+			complete(&b->ish.recv_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+			complete(&b->cse.send_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+			complete(&b->ish.send_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
+		    ipu_buttress_get_secure_mode(isp)) {
+			dev_err(&isp->pdev->dev,
+				"BUTTRESS_ISR_SAI_VIOLATION\n");
+			WARN_ON(1);
+		}
+
+		irq_status = readl(isp->base + reg_irq_sts);
+	} while (irq_status && !isp->flr_done);
+
+	if (disable_irqs)
+		writel(BUTTRESS_IRQS & ~disable_irqs,
+		       isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	pm_runtime_put(&isp->pdev->dev);
+
+	return ret;
+}
+
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr)
+{
+	struct ipu_device *isp = isp_ptr;
+	struct ipu_bus_device *adev[] = { isp->isys, isp->psys };
+	irqreturn_t ret = IRQ_NONE;
+	unsigned int i;
+
+	dev_dbg(&isp->pdev->dev, "isr: Buttress threaded interrupt handler\n");
+
+	for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) {
+		if (adev[i] && adev[i]->adrv &&
+		    adev[i]->adrv->wake_isr_thread &&
+		    adev[i]->adrv->isr_threaded(adev[i]) == IRQ_HANDLED)
+			ret = IRQ_HANDLED;
+	}
+
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	return ret;
+}
+
+int ipu_buttress_power(struct device *dev,
+		       struct ipu_buttress_ctrl *ctrl, bool on)
+{
+	struct ipu_device *isp = to_ipu_bus_device(dev)->isp;
+	u32 pwr_sts, val;
+	int ret = 0;
+
+	if (!ctrl)
+		return 0;
+
+	/* Until FLR completion nothing is expected to work */
+	if (isp->flr_done)
+		return 0;
+
+	mutex_lock(&isp->buttress.power_mutex);
+
+	if (!on) {
+		val = 0;
+		pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+	} else {
+		val = BUTTRESS_FREQ_CTL_START |
+			ctrl->divisor << ctrl->divisor_shift |
+			ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT;
+
+		pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+	}
+
+	writel(val, isp->base + ctrl->freq_ctl);
+
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
+				 val, ((val & ctrl->pwr_sts_mask) == pwr_sts),
+				 100, BUTTRESS_POWER_TIMEOUT);
+	if (ret)
+		dev_err(&isp->pdev->dev,
+			"Change power status timeout with 0x%x\n", val);
+
+	ctrl->started = !ret && on;
+
+	mutex_unlock(&isp->buttress.power_mutex);
+
+	return ret;
+}
+
+static bool secure_mode_enable = 1;
+module_param(secure_mode_enable, bool, 0660);
+MODULE_PARM_DESC(secure_mode, "IPU secure mode enable");
+
+void ipu_buttress_set_secure_mode(struct ipu_device *isp)
+{
+	u8 retry = 100;
+	u32 val, read;
+
+	/*
+	 * HACK to disable possible secure mode. This can be
+	 * reverted when CSE is disabling the secure mode
+	 */
+	read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	if (secure_mode_enable)
+		val = read |= BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+	else
+		val = read & ~BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+
+	if (val == read)
+		return;
+
+	writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	/* In B0, for some registers in buttress, because of a hw bug, write
+	 * might not succeed at first attempt. Write twice until the
+	 * write is successful
+	 */
+	writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	while (retry--) {
+		read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+		if (read == val)
+			break;
+
+		writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+		if (retry == 0)
+			dev_err(&isp->pdev->dev,
+				"update security control register failed\n");
+	}
+}
+
+bool ipu_buttress_get_secure_mode(struct ipu_device *isp)
+{
+	u32 val;
+
+	val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+}
+
+bool ipu_buttress_auth_done(struct ipu_device *isp)
+{
+	u32 val;
+
+	if (!isp->secure_mode)
+		return 1;
+
+	val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	return (val & BUTTRESS_SECURITY_CTL_FW_SETUP_MASK) ==
+	    BUTTRESS_SECURITY_CTL_AUTH_DONE;
+}
+EXPORT_SYMBOL(ipu_buttress_auth_done);
+
+static void ipu_buttress_set_psys_ratio(struct ipu_device *isp,
+					unsigned int psys_divisor,
+					unsigned int psys_qos_floor)
+{
+	struct ipu_buttress_ctrl *ctrl = isp->psys->ctrl;
+
+	mutex_lock(&isp->buttress.power_mutex);
+
+	if (ctrl->divisor == psys_divisor && ctrl->qos_floor == psys_qos_floor)
+		goto out_mutex_unlock;
+
+	ctrl->divisor = psys_divisor;
+	ctrl->qos_floor = psys_qos_floor;
+
+	if (ctrl->started) {
+		/*
+		 * According to documentation driver initiates DVFS
+		 * transition by writing wanted ratio, floor ratio and start
+		 * bit. No need to stop PS first
+		 */
+		writel(BUTTRESS_FREQ_CTL_START |
+		       ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT |
+		       psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL);
+	}
+
+out_mutex_unlock:
+	mutex_unlock(&isp->buttress.power_mutex);
+}
+
+static void ipu_buttress_set_psys_freq(struct ipu_device *isp,
+				       unsigned int freq)
+{
+	unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP;
+
+	if (isp->buttress.psys_force_ratio)
+		return;
+
+	ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio);
+}
+
+void
+ipu_buttress_add_psys_constraint(struct ipu_device *isp,
+				 struct ipu_buttress_constraint *constraint)
+{
+	struct ipu_buttress *b = &isp->buttress;
+
+	mutex_lock(&b->cons_mutex);
+	list_add(&constraint->list, &b->constraints);
+
+	if (constraint->min_freq > b->psys_min_freq) {
+		isp->buttress.psys_min_freq = min(constraint->min_freq,
+						  b->psys_fused_freqs.max_freq);
+		ipu_buttress_set_psys_freq(isp, b->psys_min_freq);
+	}
+	mutex_unlock(&b->cons_mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_add_psys_constraint);
+
+void
+ipu_buttress_remove_psys_constraint(struct ipu_device *isp,
+				    struct ipu_buttress_constraint *constraint)
+{
+	struct ipu_buttress *b = &isp->buttress;
+	struct ipu_buttress_constraint *c;
+	unsigned int min_freq = 0;
+
+	mutex_lock(&b->cons_mutex);
+	list_del(&constraint->list);
+
+	if (constraint->min_freq >= b->psys_min_freq) {
+		list_for_each_entry(c, &b->constraints, list)
+			if (c->min_freq > min_freq)
+				min_freq = c->min_freq;
+
+		b->psys_min_freq = clamp(min_freq,
+					 b->psys_fused_freqs.efficient_freq,
+					 b->psys_fused_freqs.max_freq);
+		ipu_buttress_set_psys_freq(isp, b->psys_min_freq);
+	}
+	mutex_unlock(&b->cons_mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_remove_psys_constraint);
+
+int ipu_buttress_reset_authentication(struct ipu_device *isp)
+{
+	int ret;
+	u32 val;
+
+	if (!isp->secure_mode) {
+		dev_dbg(&isp->pdev->dev,
+			"Non-secure mode -> skip authentication\n");
+		return 0;
+	}
+
+	writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
+	       BUTTRESS_REG_FW_RESET_CTL);
+
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
+				 val & BUTTRESS_FW_RESET_CTL_DONE, 500,
+				 BUTTRESS_CSE_FWRESET_TIMEOUT);
+	if (ret) {
+		dev_err(&isp->pdev->dev,
+			"Timed out while resetting authentication state!\n");
+	} else {
+		dev_info(&isp->pdev->dev,
+			 "FW reset for authentication done!\n");
+		writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
+		/* leave some time for HW restore */
+		usleep_range(800, 1000);
+	}
+
+	return ret;
+}
+
+int ipu_buttress_map_fw_image(struct ipu_bus_device *sys,
+			      const struct firmware *fw, struct sg_table *sgt)
+{
+	struct page **pages;
+	const void *addr;
+	unsigned long n_pages, i;
+	int rval;
+
+	n_pages = PAGE_ALIGN(fw->size) >> PAGE_SHIFT;
+
+	pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	addr = fw->data;
+	for (i = 0; i < n_pages; i++) {
+		struct page *p = vmalloc_to_page(addr);
+
+		if (!p) {
+			rval = -ENODEV;
+			goto out;
+		}
+		pages[i] = p;
+		addr += PAGE_SIZE;
+	}
+
+	rval = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
+					 GFP_KERNEL);
+	if (rval) {
+		rval = -ENOMEM;
+		goto out;
+	}
+
+	n_pages = dma_map_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+	if (n_pages != sgt->nents) {
+		rval = -ENOMEM;
+		sg_free_table(sgt);
+		goto out;
+	}
+
+	dma_sync_sg_for_device(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+
+out:
+	kfree(pages);
+
+	return rval;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_map_fw_image);
+
+int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys,
+				struct sg_table *sgt)
+{
+	dma_unmap_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+	sg_free_table(sgt);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_unmap_fw_image);
+
+int ipu_buttress_authenticate(struct ipu_device *isp)
+{
+	struct ipu_psys_pdata *psys_pdata;
+	struct ipu_buttress *b = &isp->buttress;
+	u32 data, mask, done, fail;
+	int rval;
+
+	if (!isp->secure_mode) {
+		dev_dbg(&isp->pdev->dev,
+			"Non-secure mode -> skip authentication\n");
+		return 0;
+	}
+
+	psys_pdata = isp->psys->pdata;
+
+	mutex_lock(&b->auth_mutex);
+
+	if (ipu_buttress_auth_done(isp)) {
+		rval = 0;
+		goto iunit_power_off;
+	}
+
+	/*
+	 * Write address of FIT table to FW_SOURCE register
+	 * Let's use fw address. I.e. not using FIT table yet
+	 */
+	data = lower_32_bits(isp->pkg_dir_dma_addr);
+	writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
+
+	data = upper_32_bits(isp->pkg_dir_dma_addr);
+	writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
+
+	/*
+	 * Write boot_load into IU2CSEDATA0
+	 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+	 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+	 */
+	dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
+	rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE,
+				     BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
+				     1, 1,
+				     BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+		goto iunit_power_off;
+	}
+
+	mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
+	done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
+	fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
+	rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+				  ((data & mask) == done ||
+				   (data & mask) == fail), 500,
+				  BUTTRESS_CSE_BOOTLOAD_TIMEOUT);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "CSE boot_load timeout.\n");
+		goto iunit_power_off;
+	}
+
+	data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask;
+	if (data == fail) {
+		dev_err(&isp->pdev->dev, "CSE auth failed\n");
+		rval = -EINVAL;
+		goto iunit_power_off;
+	}
+
+	rval = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
+				  data, data == BOOTLOADER_MAGIC_KEY, 500,
+				  BUTTRESS_CSE_BOOTLOAD_TIMEOUT);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Expect magic number timeout 0x%x.\n",
+			data);
+		goto iunit_power_off;
+	}
+
+	/*
+	 * Write authenticate_run into IU2CSEDATA0
+	 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+	 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+	 */
+	dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
+	rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE,
+				     BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
+				     1, 1,
+				     BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
+		goto iunit_power_off;
+	}
+
+	done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
+	rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+				  ((data & mask) == done ||
+				   (data & mask) == fail), 500,
+				  BUTTRESS_CSE_AUTHENTICATE_TIMEOUT);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
+		goto iunit_power_off;
+	}
+
+	data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask;
+	if (data == fail) {
+		dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+		rval = -EINVAL;
+		goto iunit_power_off;
+	}
+
+	dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
+
+iunit_power_off:
+	mutex_unlock(&b->auth_mutex);
+
+	return rval;
+}
+EXPORT_SYMBOL(ipu_buttress_authenticate);
+
+static int ipu_buttress_send_tsc_request(struct ipu_device *isp)
+{
+	u32 val, mask, shift, done;
+	int ret;
+
+	mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
+	shift = BUTTRESS_PWR_STATE_HH_STATUS_SHIFT;
+
+	writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
+	       isp->base + BUTTRESS_REG_FABRIC_CMD);
+
+	val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
+	val = (val & mask) >> shift;
+	if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
+		dev_err(&isp->pdev->dev, "Start tsc sync failed!\n");
+		return -EINVAL;
+	}
+
+	done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
+				 ((val & mask) >> shift == done), 500,
+				 BUTTRESS_TSC_SYNC_TIMEOUT);
+	if (ret)
+		dev_err(&isp->pdev->dev, "Start tsc sync timeout!\n");
+
+	return ret;
+}
+
+int ipu_buttress_start_tsc_sync(struct ipu_device *isp)
+{
+	unsigned int i;
+
+	for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+		int ret;
+
+		ret = ipu_buttress_send_tsc_request(isp);
+		if (ret == -ETIMEDOUT) {
+			u32 val;
+			/* set tsw soft reset */
+			val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
+			val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
+			writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+			/* clear tsw soft reset */
+			val = val & (~BUTTRESS_TSW_CTL_SOFT_RESET);
+			writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+
+			continue;
+		}
+		return ret;
+	}
+
+	dev_err(&isp->pdev->dev, "TSC sync failed(timeout).\n");
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(ipu_buttress_start_tsc_sync);
+
+struct clk_ipu_sensor {
+	struct ipu_device *isp;
+	struct clk_hw hw;
+	unsigned int id;
+	unsigned long rate;
+};
+
+#define to_clk_ipu_sensor(_hw) container_of(_hw, struct clk_ipu_sensor, hw)
+
+static int ipu_buttress_clk_pll_prepare(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+	int ret;
+
+	/* Workaround needed to get sensor clock running in some cases */
+	ret = pm_runtime_get_sync(&ck->isp->isys->dev);
+	return ret >= 0 ? 0 : ret;
+}
+
+static void ipu_buttress_clk_pll_unprepare(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+
+	/* Workaround needed to get sensor clock stopped in some cases */
+	pm_runtime_put(&ck->isp->isys->dev);
+}
+
+static int ipu_buttress_clk_pll_enable(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+	u32 val;
+	unsigned int i;
+
+	/*
+	 * Start bit behaves like master clock request towards ICLK.
+	 * It is needed regardless of the 24 MHz or per clock out pll
+	 * setting.
+	 */
+	val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL);
+	val |= BUTTRESS_FREQ_CTL_START;
+	val &= ~BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(ck->id);
+	for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++)
+		if (sensor_clk_freqs[i].rate == ck->rate)
+			break;
+
+	if (i < ARRAY_SIZE(sensor_clk_freqs))
+		val |= sensor_clk_freqs[i].val <<
+		    BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(ck->id);
+	else
+		val |= BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(ck->id);
+
+	writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL);
+
+	return 0;
+}
+
+static void ipu_buttress_clk_pll_disable(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+	u32 val;
+	int i;
+
+	val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL);
+	for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) {
+		if (val &
+		    (1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i)))
+			return;
+	}
+
+	/* See enable control above */
+	val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL);
+	val &= ~BUTTRESS_FREQ_CTL_START;
+	writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL);
+}
+
+static int ipu_buttress_clk_enable(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+	u32 val;
+
+	val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL);
+	val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id);
+
+	/* Enable dynamic sensor clock */
+	val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(ck->id);
+	writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL);
+
+	return 0;
+}
+
+static void ipu_buttress_clk_disable(struct clk_hw *hw)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+	u32 val;
+
+	val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL);
+	val &= ~(1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id));
+	writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL);
+}
+
+static long ipu_buttress_clk_round_rate(struct clk_hw *hw,
+					unsigned long rate,
+					unsigned long *parent_rate)
+{
+	unsigned long best = ULONG_MAX;
+	unsigned long round_rate = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++) {
+		long diff = sensor_clk_freqs[i].rate - rate;
+
+		if (diff == 0)
+			return rate;
+
+		diff = abs(diff);
+		if (diff < best) {
+			best = diff;
+			round_rate = sensor_clk_freqs[i].rate;
+		}
+	}
+
+	return round_rate;
+}
+
+static unsigned long
+ipu_buttress_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+
+	return ck->rate;
+}
+
+static int ipu_buttress_clk_set_rate(struct clk_hw *hw,
+				     unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw);
+
+	/*
+	 * R    N       P       PVD     PLLout
+	 * 1    45      128     2       6.75
+	 * 1    40      96      2       8
+	 * 1    40      80      2       9.6
+	 * 1    15      20      4       14.4
+	 * 1    40      32      2       24
+	 * 1    65      48      1       26
+	 *
+	 */
+	ck->rate = rate;
+
+	return 0;
+}
+
+static const struct clk_ops ipu_buttress_clk_sensor_ops = {
+	.enable = ipu_buttress_clk_enable,
+	.disable = ipu_buttress_clk_disable,
+};
+
+static const struct clk_ops ipu_buttress_clk_sensor_ops_parent = {
+	.enable = ipu_buttress_clk_pll_enable,
+	.disable = ipu_buttress_clk_pll_disable,
+	.prepare = ipu_buttress_clk_pll_prepare,
+	.unprepare = ipu_buttress_clk_pll_unprepare,
+	.round_rate = ipu_buttress_clk_round_rate,
+	.recalc_rate = ipu_buttress_clk_recalc_rate,
+	.set_rate = ipu_buttress_clk_set_rate,
+};
+
+int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val)
+{
+	struct ipu_buttress *b = &isp->buttress;
+	u32 tsc_hi, tsc_lo_1, tsc_lo_2, tsc_lo_3, tsc_chk = 0;
+	unsigned long flags;
+	short retry = IPU_BUTTRESS_TSC_RETRY;
+
+	do {
+		spin_lock_irqsave(&b->tsc_lock, flags);
+		tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI);
+
+		/*
+		 * We are occasionally getting broken values from
+		 * HH. Reading 3 times and doing sanity check as a WA
+		 */
+		tsc_lo_1 = readl(isp->base + BUTTRESS_REG_TSC_LO);
+		tsc_lo_2 = readl(isp->base + BUTTRESS_REG_TSC_LO);
+		tsc_lo_3 = readl(isp->base + BUTTRESS_REG_TSC_LO);
+		tsc_chk = readl(isp->base + BUTTRESS_REG_TSC_HI);
+		spin_unlock_irqrestore(&b->tsc_lock, flags);
+		if (tsc_chk == tsc_hi && tsc_lo_2 &&
+		    tsc_lo_2 - tsc_lo_1 <= IPU_BUTTRESS_TSC_LIMIT &&
+		    tsc_lo_3 - tsc_lo_2 <= IPU_BUTTRESS_TSC_LIMIT) {
+			*val = (u64)tsc_hi << 32 | tsc_lo_2;
+			return 0;
+		}
+
+		/*
+		 * Trace error only if limit checkings fails at least
+		 *  by two consecutive readings.
+		 */
+		if (retry < IPU_BUTTRESS_TSC_RETRY - 1 && tsc_lo_2)
+			dev_err(&isp->pdev->dev,
+				"%s = %u, %s = %u, %s = %u, %s = %u, %s = %u",
+				"failure: tsc_hi", tsc_hi,
+				"tsc_chk", tsc_chk,
+				"tsc_lo_1", tsc_lo_1,
+				"tsc_lo_2", tsc_lo_2, "tsc_lo_3", tsc_lo_3);
+	} while (retry--);
+
+	if (!tsc_chk && !tsc_lo_2)
+		return -EIO;
+
+	WARN_ON_ONCE(1);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_tsc_read);
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ipu_buttress_reg_open(struct inode *inode, struct file *file)
+{
+	if (!inode->i_private)
+		return -EACCES;
+
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipu_buttress_reg_read(struct file *file, char __user *buf,
+				     size_t count, loff_t *ppos)
+{
+	struct debugfs_reg32 *reg = file->private_data;
+	u8 tmp[11];
+	u32 val = readl((void __iomem *)reg->offset);
+	int len = scnprintf(tmp, sizeof(tmp), "0x%08x", val);
+
+	return simple_read_from_buffer(buf, len, ppos, &tmp, len);
+}
+
+static ssize_t ipu_buttress_reg_write(struct file *file,
+				      const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	struct debugfs_reg32 *reg = file->private_data;
+	u32 val;
+	int rval;
+
+	rval = kstrtou32_from_user(buf, count, 0, &val);
+	if (rval)
+		return rval;
+
+	writel(val, (void __iomem *)reg->offset);
+
+	return count;
+}
+
+static struct debugfs_reg32 buttress_regs[] = {
+	{"IU2CSEDB0", BUTTRESS_REG_IU2CSEDB0},
+	{"IU2CSEDATA0", BUTTRESS_REG_IU2CSEDATA0},
+	{"CSE2IUDB0", BUTTRESS_REG_CSE2IUDB0},
+	{"CSE2IUDATA0", BUTTRESS_REG_CSE2IUDATA0},
+	{"CSE2IUCSR", BUTTRESS_REG_CSE2IUCSR},
+	{"IU2CSECSR", BUTTRESS_REG_IU2CSECSR},
+};
+
+static const struct file_operations ipu_buttress_reg_fops = {
+	.owner = THIS_MODULE,
+	.open = ipu_buttress_reg_open,
+	.read = ipu_buttress_reg_read,
+	.write = ipu_buttress_reg_write,
+};
+
+static int ipu_buttress_start_tsc_sync_set(void *data, u64 val)
+{
+	struct ipu_device *isp = data;
+
+	return ipu_buttress_start_tsc_sync(isp);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_start_tsc_sync_fops, NULL,
+			ipu_buttress_start_tsc_sync_set, "%llu\n");
+
+static int ipu_buttress_tsc_get(void *data, u64 *val)
+{
+	return ipu_buttress_tsc_read(data, val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_tsc_fops, ipu_buttress_tsc_get,
+			NULL, "%llu\n");
+
+static int ipu_buttress_psys_force_freq_get(void *data, u64 *val)
+{
+	struct ipu_device *isp = data;
+
+	*val = isp->buttress.psys_force_ratio * BUTTRESS_PS_FREQ_STEP;
+
+	return 0;
+}
+
+static int ipu_buttress_psys_force_freq_set(void *data, u64 val)
+{
+	struct ipu_device *isp = data;
+
+	if (val && (val < BUTTRESS_MIN_FORCE_PS_FREQ ||
+		    val > BUTTRESS_MAX_FORCE_PS_FREQ))
+		return -EINVAL;
+
+	do_div(val, BUTTRESS_PS_FREQ_STEP);
+	isp->buttress.psys_force_ratio = val;
+
+	if (isp->buttress.psys_force_ratio)
+		ipu_buttress_set_psys_ratio(isp,
+					    isp->buttress.psys_force_ratio,
+					    isp->buttress.psys_force_ratio);
+	else
+		ipu_buttress_set_psys_freq(isp, isp->buttress.psys_min_freq);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_force_freq_fops,
+			ipu_buttress_psys_force_freq_get,
+			ipu_buttress_psys_force_freq_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_freq_fops,
+			ipu_buttress_psys_freq_get, NULL, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_isys_freq_fops,
+			ipu_buttress_isys_freq_get, NULL, "%llu\n");
+
+int ipu_buttress_debugfs_init(struct ipu_device *isp)
+{
+	struct debugfs_reg32 *reg =
+	    devm_kcalloc(&isp->pdev->dev, ARRAY_SIZE(buttress_regs),
+			 sizeof(*reg), GFP_KERNEL);
+	struct dentry *dir, *file;
+	int i;
+
+	if (!reg)
+		return -ENOMEM;
+
+	dir = debugfs_create_dir("buttress", isp->ipu_dir);
+	if (!dir)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(buttress_regs); i++, reg++) {
+		reg->offset = (unsigned long)isp->base +
+		    buttress_regs[i].offset;
+		reg->name = buttress_regs[i].name;
+		file = debugfs_create_file(reg->name, 0700,
+					   dir, reg, &ipu_buttress_reg_fops);
+		if (!file)
+			goto err;
+	}
+
+	file = debugfs_create_file("start_tsc_sync", 0200, dir, isp,
+				   &ipu_buttress_start_tsc_sync_fops);
+	if (!file)
+		goto err;
+	file = debugfs_create_file("tsc", 0400, dir, isp,
+				   &ipu_buttress_tsc_fops);
+	if (!file)
+		goto err;
+	file = debugfs_create_file("psys_force_freq", 0700, dir, isp,
+				   &ipu_buttress_psys_force_freq_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("psys_freq", 0400, dir, isp,
+				   &ipu_buttress_psys_freq_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("isys_freq", 0400, dir, isp,
+				   &ipu_buttress_isys_freq_fops);
+	if (!file)
+		goto err;
+
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks)
+{
+	u64 ns = ticks * 10000;
+	/*
+	 * TSC clock frequency is 19.2MHz,
+	 * converting TSC tick count to ns is calculated by:
+	 * ns = ticks * 1000 000 000 / 19.2Mhz
+	 *    = ticks * 1000 000 000 / 19200000Hz
+	 *    = ticks * 10000 / 192 ns
+	 */
+	do_div(ns, 192);
+
+	return ns;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_tsc_ticks_to_ns);
+
+static ssize_t psys_fused_min_freq_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			isp->buttress.psys_fused_freqs.min_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_min_freq);
+
+static ssize_t psys_fused_max_freq_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			isp->buttress.psys_fused_freqs.max_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_max_freq);
+
+static ssize_t psys_fused_efficient_freq_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			isp->buttress.psys_fused_freqs.efficient_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_efficient_freq);
+
+int ipu_buttress_restore(struct ipu_device *isp)
+{
+	struct ipu_buttress *b = &isp->buttress;
+
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+	writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
+
+	return 0;
+}
+
+int ipu_buttress_init(struct ipu_device *isp)
+{
+	struct ipu_buttress *b = &isp->buttress;
+	int rval, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
+
+	mutex_init(&b->power_mutex);
+	mutex_init(&b->auth_mutex);
+	mutex_init(&b->cons_mutex);
+	mutex_init(&b->ipc_mutex);
+	spin_lock_init(&b->tsc_lock);
+	init_completion(&b->ish.send_complete);
+	init_completion(&b->cse.send_complete);
+	init_completion(&b->ish.recv_complete);
+	init_completion(&b->cse.recv_complete);
+
+	b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
+	b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
+	b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
+	b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
+	b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
+	b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
+	b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
+	b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
+
+	/* no ISH on IPU6 */
+	memset(&b->ish, 0, sizeof(b->ish));
+	INIT_LIST_HEAD(&b->constraints);
+
+	ipu_buttress_set_secure_mode(isp);
+	isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+	if (isp->secure_mode != secure_mode_enable)
+		dev_warn(&isp->pdev->dev, "Unable to set secure mode!\n");
+
+	dev_info(&isp->pdev->dev, "IPU in %s mode\n",
+		 isp->secure_mode ? "secure" : "non-secure");
+
+	b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	rval = device_create_file(&isp->pdev->dev,
+				  &dev_attr_psys_fused_min_freq);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Create min freq file failed\n");
+		goto err_mutex_destroy;
+	}
+
+	rval = device_create_file(&isp->pdev->dev,
+				  &dev_attr_psys_fused_max_freq);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Create max freq file failed\n");
+		goto err_remove_min_freq_file;
+	}
+
+	rval = device_create_file(&isp->pdev->dev,
+				  &dev_attr_psys_fused_efficient_freq);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Create efficient freq file failed\n");
+		goto err_remove_max_freq_file;
+	}
+
+	/*
+	 * We want to retry couple of time in case CSE initialization
+	 * is delayed for reason or another.
+	 */
+	do {
+		rval = ipu_buttress_ipc_reset(isp, &b->cse);
+		if (rval) {
+			dev_err(&isp->pdev->dev,
+				"IPC reset protocol failed, retry!\n");
+		} else {
+			dev_dbg(&isp->pdev->dev, "IPC reset completed!\n");
+			return 0;
+		}
+	} while (ipc_reset_retry--);
+
+	dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+
+err_remove_max_freq_file:
+	device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq);
+err_remove_min_freq_file:
+	device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq);
+err_mutex_destroy:
+	mutex_destroy(&b->power_mutex);
+	mutex_destroy(&b->auth_mutex);
+	mutex_destroy(&b->cons_mutex);
+	mutex_destroy(&b->ipc_mutex);
+
+	return rval;
+}
+
+void ipu_buttress_exit(struct ipu_device *isp)
+{
+	struct ipu_buttress *b = &isp->buttress;
+
+	writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	device_remove_file(&isp->pdev->dev,
+			   &dev_attr_psys_fused_efficient_freq);
+	device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq);
+	device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq);
+
+	mutex_destroy(&b->power_mutex);
+	mutex_destroy(&b->auth_mutex);
+	mutex_destroy(&b->cons_mutex);
+	mutex_destroy(&b->ipc_mutex);
+}
diff --git a/drivers/media/pci/intel/ipu-buttress.h b/drivers/media/pci/intel/ipu-buttress.h
new file mode 100644
index 000000000000..02c8b46947a1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-buttress.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_BUTTRESS_H
+#define IPU_BUTTRESS_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include "ipu.h"
+
+#define IPU_BUTTRESS_NUM_OF_SENS_CKS	3
+#define IPU_BUTTRESS_NUM_OF_PLL_CKS	3
+#define IPU_BUTTRESS_TSC_CLK		19200000
+
+#define BUTTRESS_POWER_TIMEOUT			200000
+
+#define BUTTRESS_PS_FREQ_STEP		25U
+#define BUTTRESS_MIN_FORCE_PS_FREQ	(BUTTRESS_PS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_PS_FREQ	(BUTTRESS_PS_FREQ_STEP * 32)
+
+struct ipu_buttress_ctrl {
+	u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
+	union {
+		unsigned int divisor;
+		unsigned int ratio;
+	};
+	union {
+		unsigned int divisor_shift;
+		unsigned int ratio_shift;
+	};
+	unsigned int qos_floor;
+	bool started;
+};
+
+struct ipu_buttress_fused_freqs {
+	unsigned int min_freq;
+	unsigned int max_freq;
+	unsigned int efficient_freq;
+};
+
+struct ipu_buttress_ipc {
+	struct completion send_complete;
+	struct completion recv_complete;
+	u32 nack;
+	u32 nack_mask;
+	u32 recv_data;
+	u32 csr_out;
+	u32 csr_in;
+	u32 db0_in;
+	u32 db0_out;
+	u32 data0_out;
+	u32 data0_in;
+};
+
+struct ipu_buttress {
+	struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex;
+	spinlock_t tsc_lock;	/* tsc lock */
+	struct ipu_buttress_ipc cse;
+	struct ipu_buttress_ipc ish;
+	struct list_head constraints;
+	struct ipu_buttress_fused_freqs psys_fused_freqs;
+	unsigned int psys_min_freq;
+	u32 wdt_cached_value;
+	u8 psys_force_ratio;
+	bool force_suspend;
+};
+
+struct ipu_buttress_sensor_clk_freq {
+	unsigned int rate;
+	unsigned int val;
+};
+
+struct firmware;
+
+enum ipu_buttress_ipc_domain {
+	IPU_BUTTRESS_IPC_CSE,
+	IPU_BUTTRESS_IPC_ISH,
+};
+
+struct ipu_buttress_constraint {
+	struct list_head list;
+	unsigned int min_freq;
+};
+
+struct ipu_ipc_buttress_bulk_msg {
+	u32 cmd;
+	u32 expected_resp;
+	bool require_resp;
+	u8 cmd_size;
+};
+
+int ipu_buttress_ipc_reset(struct ipu_device *isp,
+			   struct ipu_buttress_ipc *ipc);
+int ipu_buttress_map_fw_image(struct ipu_bus_device *sys,
+			      const struct firmware *fw, struct sg_table *sgt);
+int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys,
+				struct sg_table *sgt);
+int ipu_buttress_power(struct device *dev,
+		       struct ipu_buttress_ctrl *ctrl, bool on);
+void
+ipu_buttress_add_psys_constraint(struct ipu_device *isp,
+				 struct ipu_buttress_constraint *constraint);
+void
+ipu_buttress_remove_psys_constraint(struct ipu_device *isp,
+				    struct ipu_buttress_constraint *constraint);
+void ipu_buttress_set_secure_mode(struct ipu_device *isp);
+bool ipu_buttress_get_secure_mode(struct ipu_device *isp);
+int ipu_buttress_authenticate(struct ipu_device *isp);
+int ipu_buttress_reset_authentication(struct ipu_device *isp);
+bool ipu_buttress_auth_done(struct ipu_device *isp);
+int ipu_buttress_start_tsc_sync(struct ipu_device *isp);
+int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val);
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks);
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr);
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr);
+int ipu_buttress_debugfs_init(struct ipu_device *isp);
+int ipu_buttress_init(struct ipu_device *isp);
+void ipu_buttress_exit(struct ipu_device *isp);
+void ipu_buttress_csi_port_config(struct ipu_device *isp,
+				  u32 legacy, u32 combo);
+int ipu_buttress_restore(struct ipu_device *isp);
+
+int ipu_buttress_psys_freq_get(void *data, u64 *val);
+int ipu_buttress_isys_freq_get(void *data, u64 *val);
+#endif /* IPU_BUTTRESS_H */
diff --git a/drivers/media/pci/intel/ipu-cpd.c b/drivers/media/pci/intel/ipu-cpd.c
new file mode 100644
index 000000000000..cfd5f4013608
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-cpd.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2020 Intel Corporation
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "ipu.h"
+#include "ipu-cpd.h"
+
+/* 15 entries + header*/
+#define MAX_PKG_DIR_ENT_CNT		16
+/* 2 qword per entry/header */
+#define PKG_DIR_ENT_LEN			2
+/* PKG_DIR size in bytes */
+#define PKG_DIR_SIZE			((MAX_PKG_DIR_ENT_CNT) *	\
+					 (PKG_DIR_ENT_LEN) * sizeof(u64))
+#define PKG_DIR_ID_SHIFT		48
+#define PKG_DIR_ID_MASK			0x7f
+#define PKG_DIR_VERSION_SHIFT		32
+#define PKG_DIR_SIZE_MASK		0xfffff
+/* _IUPKDR_ */
+#define PKG_DIR_HDR_MARK		0x5f4955504b44525f
+
+/* $CPD */
+#define CPD_HDR_MARK			0x44504324
+
+/* Maximum size is 2K DWORDs */
+#define MAX_MANIFEST_SIZE		(2 * 1024 * sizeof(u32))
+
+/* Maximum size is 64k */
+#define MAX_METADATA_SIZE		(64 * 1024)
+
+#define MAX_COMPONENT_ID		127
+#define MAX_COMPONENT_VERSION		0xffff
+
+#define CPD_MANIFEST_IDX	0
+#define CPD_METADATA_IDX	1
+#define CPD_MODULEDATA_IDX	2
+
+static inline struct ipu_cpd_ent *ipu_cpd_get_entries(const void *cpd)
+{
+	const struct ipu_cpd_hdr *cpd_hdr = cpd;
+
+	return (struct ipu_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len);
+}
+
+#define ipu_cpd_get_entry(cpd, idx) (&ipu_cpd_get_entries(cpd)[idx])
+#define ipu_cpd_get_manifest(cpd) ipu_cpd_get_entry(cpd, CPD_MANIFEST_IDX)
+#define ipu_cpd_get_metadata(cpd) ipu_cpd_get_entry(cpd, CPD_METADATA_IDX)
+#define ipu_cpd_get_moduledata(cpd) ipu_cpd_get_entry(cpd, CPD_MODULEDATA_IDX)
+
+static const struct ipu_cpd_metadata_cmpnt *
+ipu_cpd_metadata_get_cmpnt(struct ipu_device *isp,
+			   const void *metadata,
+			   unsigned int metadata_size,
+			   u8 idx)
+{
+	const struct ipu_cpd_metadata_extn *extn;
+	const struct ipu_cpd_metadata_cmpnt *cmpnts;
+	int cmpnt_count;
+
+	extn = metadata;
+	cmpnts = metadata + sizeof(*extn);
+	cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts);
+
+	if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+		dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+			idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return &cmpnts[idx];
+}
+
+static u32 ipu_cpd_metadata_cmpnt_version(struct ipu_device *isp,
+					  const void *metadata,
+					  unsigned int metadata_size, u8 idx)
+{
+	const struct ipu_cpd_metadata_cmpnt *cmpnt =
+	    ipu_cpd_metadata_get_cmpnt(isp, metadata,
+				       metadata_size, idx);
+
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->ver;
+}
+
+static int ipu_cpd_metadata_get_cmpnt_id(struct ipu_device *isp,
+					 const void *metadata,
+					 unsigned int metadata_size, u8 idx)
+{
+	const struct ipu_cpd_metadata_cmpnt *cmpnt =
+	    ipu_cpd_metadata_get_cmpnt(isp, metadata,
+				       metadata_size, idx);
+
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->id;
+}
+
+static const struct ipu6_cpd_metadata_cmpnt *
+ipu6_cpd_metadata_get_cmpnt(struct ipu_device *isp,
+			    const void *metadata,
+			    unsigned int metadata_size,
+			    u8 idx)
+{
+	const struct ipu_cpd_metadata_extn *extn = metadata;
+	const struct ipu6_cpd_metadata_cmpnt *cmpnts = metadata + sizeof(*extn);
+	int cmpnt_count;
+
+	cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts);
+	if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+		dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+			idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return &cmpnts[idx];
+}
+
+static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu_device *isp,
+					   const void *metadata,
+					   unsigned int metadata_size, u8 idx)
+{
+	const struct ipu6_cpd_metadata_cmpnt *cmpnt =
+	    ipu6_cpd_metadata_get_cmpnt(isp, metadata,
+					metadata_size, idx);
+
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->ver;
+}
+
+static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu_device *isp,
+					  const void *metadata,
+					  unsigned int metadata_size, u8 idx)
+{
+	const struct ipu6_cpd_metadata_cmpnt *cmpnt =
+	    ipu6_cpd_metadata_get_cmpnt(isp, metadata,
+					metadata_size, idx);
+
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->id;
+}
+
+static int ipu_cpd_parse_module_data(struct ipu_device *isp,
+				     const void *module_data,
+				     unsigned int module_data_size,
+				     dma_addr_t dma_addr_module_data,
+				     u64 *pkg_dir,
+				     const void *metadata,
+				     unsigned int metadata_size)
+{
+	const struct ipu_cpd_module_data_hdr *module_data_hdr;
+	const struct ipu_cpd_hdr *dir_hdr;
+	const struct ipu_cpd_ent *dir_ent;
+	int i;
+	u8 len;
+
+	if (!module_data)
+		return -EINVAL;
+
+	module_data_hdr = module_data;
+	dir_hdr = module_data + module_data_hdr->hdr_len;
+	len = dir_hdr->hdr_len;
+	dir_ent = (struct ipu_cpd_ent *)(((u8 *)dir_hdr) + len);
+
+	pkg_dir[0] = PKG_DIR_HDR_MARK;
+	/* pkg_dir entry count = component count + pkg_dir header */
+	pkg_dir[1] = dir_hdr->ent_cnt + 1;
+
+	for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) {
+		u64 *p = &pkg_dir[PKG_DIR_ENT_LEN + i * PKG_DIR_ENT_LEN];
+		int ver, id;
+
+		*p++ = dma_addr_module_data + dir_ent->offset;
+
+		if (ipu_ver == IPU_VER_6)
+			id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata,
+							    metadata_size, i);
+		else
+			id = ipu_cpd_metadata_get_cmpnt_id(isp, metadata,
+							   metadata_size, i);
+
+		if (id < 0 || id > MAX_COMPONENT_ID) {
+			dev_err(&isp->pdev->dev,
+				"Failed to parse component id\n");
+			return -EINVAL;
+		}
+
+		if (ipu_ver == IPU_VER_6)
+			ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata,
+							      metadata_size, i);
+		else
+			ver = ipu_cpd_metadata_cmpnt_version(isp, metadata,
+							     metadata_size, i);
+
+		if (ver < 0 || ver > MAX_COMPONENT_VERSION) {
+			dev_err(&isp->pdev->dev,
+				"Failed to parse component version\n");
+			return -EINVAL;
+		}
+
+		/*
+		 * PKG_DIR Entry (type == id)
+		 * 63:56        55      54:48   47:32   31:24   23:0
+		 * Rsvd         Rsvd    Type    Version Rsvd    Size
+		 */
+		*p = dir_ent->len | (u64)id << PKG_DIR_ID_SHIFT |
+		    (u64)ver << PKG_DIR_VERSION_SHIFT;
+	}
+
+	return 0;
+}
+
+void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev,
+			     const void *src,
+			     dma_addr_t dma_addr_src,
+			     dma_addr_t *dma_addr, unsigned int *pkg_dir_size)
+{
+	struct ipu_device *isp = adev->isp;
+	const struct ipu_cpd_ent *ent, *man_ent, *met_ent;
+	u64 *pkg_dir;
+	unsigned int man_sz, met_sz;
+	void *pkg_dir_pos;
+	int ret;
+
+	man_ent = ipu_cpd_get_manifest(src);
+	man_sz = man_ent->len;
+
+	met_ent = ipu_cpd_get_metadata(src);
+	met_sz = met_ent->len;
+
+	*pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz;
+	pkg_dir = dma_alloc_attrs(&adev->dev, *pkg_dir_size, dma_addr,
+				  GFP_KERNEL,
+				  0);
+	if (!pkg_dir)
+		return pkg_dir;
+
+	/*
+	 * pkg_dir entry/header:
+	 * qword | 63:56 | 55   | 54:48 | 47:32 | 31:24 | 23:0
+	 * N         Address/Offset/"_IUPKDR_"
+	 * N + 1 | rsvd  | rsvd | type  | ver   | rsvd  | size
+	 *
+	 * We can ignore other fields that size in N + 1 qword as they
+	 * are 0 anyway. Just setting size for now.
+	 */
+
+	ent = ipu_cpd_get_moduledata(src);
+
+	ret = ipu_cpd_parse_module_data(isp, src + ent->offset,
+					ent->len,
+					dma_addr_src + ent->offset,
+					pkg_dir,
+					src + met_ent->offset, met_ent->len);
+	if (ret) {
+		dev_err(&isp->pdev->dev,
+			"Unable to parse module data section!\n");
+		dma_free_attrs(&isp->psys->dev, *pkg_dir_size, pkg_dir,
+			       *dma_addr,
+			       0);
+		return NULL;
+	}
+
+	/* Copy manifest after pkg_dir */
+	pkg_dir_pos = pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT;
+	memcpy(pkg_dir_pos, src + man_ent->offset, man_sz);
+
+	/* Copy metadata after manifest */
+	pkg_dir_pos += man_sz;
+	memcpy(pkg_dir_pos, src + met_ent->offset, met_sz);
+
+	dma_sync_single_range_for_device(&adev->dev, *dma_addr,
+					 0, *pkg_dir_size, DMA_TO_DEVICE);
+
+	return pkg_dir;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_create_pkg_dir);
+
+void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev,
+			  u64 *pkg_dir,
+			  dma_addr_t dma_addr, unsigned int pkg_dir_size)
+{
+	dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, 0);
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_free_pkg_dir);
+
+static int ipu_cpd_validate_cpd(struct ipu_device *isp,
+				const void *cpd,
+				unsigned long cpd_size, unsigned long data_size)
+{
+	const struct ipu_cpd_hdr *cpd_hdr = cpd;
+	struct ipu_cpd_ent *ent;
+	unsigned int i;
+	u8 len;
+
+	len = cpd_hdr->hdr_len;
+
+	/* Ensure cpd hdr is within moduledata */
+	if (cpd_size < len) {
+		dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+		return -EINVAL;
+	}
+
+	/* Sanity check for CPD header */
+	if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) {
+		dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+		return -EINVAL;
+	}
+
+	/* Ensure that all entries are within moduledata */
+	ent = (struct ipu_cpd_ent *)(((u8 *)cpd_hdr) + len);
+	for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) {
+		if (data_size < ent->offset ||
+		    data_size - ent->offset < ent->len) {
+			dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ipu_cpd_validate_moduledata(struct ipu_device *isp,
+				       const void *moduledata,
+				       u32 moduledata_size)
+{
+	const struct ipu_cpd_module_data_hdr *mod_hdr = moduledata;
+	int rval;
+
+	/* Ensure moduledata hdr is within moduledata */
+	if (moduledata_size < sizeof(*mod_hdr) ||
+	    moduledata_size < mod_hdr->hdr_len) {
+		dev_err(&isp->pdev->dev, "Invalid moduledata size\n");
+		return -EINVAL;
+	}
+
+	dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+	rval = ipu_cpd_validate_cpd(isp, moduledata +
+				    mod_hdr->hdr_len,
+				    moduledata_size -
+				    mod_hdr->hdr_len, moduledata_size);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipu_cpd_validate_metadata(struct ipu_device *isp,
+				     const void *metadata, u32 meta_size)
+{
+	const struct ipu_cpd_metadata_extn *extn = metadata;
+	unsigned int size;
+
+	/* Sanity check for metadata size */
+	if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) {
+		dev_err(&isp->pdev->dev, "%s: Invalid metadata\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Validate extension and image types */
+	if (extn->extn_type != IPU_CPD_METADATA_EXTN_TYPE_IUNIT ||
+	    extn->img_type != IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) {
+		dev_err(&isp->pdev->dev,
+			"Invalid metadata descriptor img_type (%d)\n",
+			extn->img_type);
+		return -EINVAL;
+	}
+
+	/* Validate metadata size multiple of metadata components */
+	if (ipu_ver == IPU_VER_6)
+		size = sizeof(struct ipu6_cpd_metadata_cmpnt);
+	else
+		size = sizeof(struct ipu_cpd_metadata_cmpnt);
+
+	if ((meta_size - sizeof(*extn)) % size) {
+		dev_err(&isp->pdev->dev, "%s: Invalid metadata size\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ipu_cpd_validate_cpd_file(struct ipu_device *isp,
+			      const void *cpd_file, unsigned long cpd_file_size)
+{
+	const struct ipu_cpd_hdr *hdr = cpd_file;
+	struct ipu_cpd_ent *ent;
+	int rval;
+
+	rval = ipu_cpd_validate_cpd(isp, cpd_file,
+				    cpd_file_size, cpd_file_size);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Invalid CPD in file\n");
+		return -EINVAL;
+	}
+
+	/* Check for CPD file marker */
+	if (hdr->hdr_mark != CPD_HDR_MARK) {
+		dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+		return -EINVAL;
+	}
+
+	/* Sanity check for manifest size */
+	ent = ipu_cpd_get_manifest(cpd_file);
+	if (ent->len > MAX_MANIFEST_SIZE) {
+		dev_err(&isp->pdev->dev, "Invalid manifest size\n");
+		return -EINVAL;
+	}
+
+	/* Validate metadata */
+	ent = ipu_cpd_get_metadata(cpd_file);
+	rval = ipu_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Invalid metadata\n");
+		return rval;
+	}
+
+	/* Validate moduledata */
+	ent = ipu_cpd_get_moduledata(cpd_file);
+	rval = ipu_cpd_validate_moduledata(isp, cpd_file + ent->offset,
+					   ent->len);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Invalid moduledata\n");
+		return rval;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_validate_cpd_file);
+
+unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx)
+{
+	return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN];
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_address);
+
+unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir)
+{
+	return pkg_dir[1];
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_num_entries);
+
+unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx)
+{
+	return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] & PKG_DIR_SIZE_MASK;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_size);
+
+unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx)
+{
+	return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] >>
+	    PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_type);
diff --git a/drivers/media/pci/intel/ipu-cpd.h b/drivers/media/pci/intel/ipu-cpd.h
new file mode 100644
index 000000000000..6e8fd5a9e51f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-cpd.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2015 - 2020 Intel Corporation */
+
+#ifndef IPU_CPD_H
+#define IPU_CPD_H
+
+#define IPU_CPD_SIZE_OF_FW_ARCH_VERSION		7
+#define IPU_CPD_SIZE_OF_SYSTEM_VERSION		11
+#define IPU_CPD_SIZE_OF_COMPONENT_NAME		12
+
+#define IPU_CPD_METADATA_EXTN_TYPE_IUNIT	0x10
+
+#define IPU_CPD_METADATA_IMAGE_TYPE_RESERVED		0
+#define IPU_CPD_METADATA_IMAGE_TYPE_BOOTLOADER		1
+#define IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE	2
+
+#define IPU_CPD_PKG_DIR_PSYS_SERVER_IDX	0
+#define IPU_CPD_PKG_DIR_ISYS_SERVER_IDX	1
+
+#define IPU_CPD_PKG_DIR_CLIENT_PG_TYPE	3
+
+#define IPU6_CPD_METADATA_HASH_KEY_SIZE          48
+#define IPU_CPD_METADATA_HASH_KEY_SIZE           32
+
+struct __packed ipu_cpd_module_data_hdr {
+	u32 hdr_len;
+	u32 endian;
+	u32 fw_pkg_date;
+	u32 hive_sdk_date;
+	u32 compiler_date;
+	u32 target_platform_type;
+	u8 sys_ver[IPU_CPD_SIZE_OF_SYSTEM_VERSION];
+	u8 fw_arch_ver[IPU_CPD_SIZE_OF_FW_ARCH_VERSION];
+	u8 rsvd[2];
+};
+
+/* ipu_cpd_hdr structure updated as the chksum and
+ * sub_partition_name is unused on host side
+ * CSE layout version 1.6 for ipu6se (hdr_len = 0x10)
+ * CSE layout version 1.7 for ipu6 (hdr_len = 0x14)
+ */
+struct __packed ipu_cpd_hdr {
+	u32 hdr_mark;
+	u32 ent_cnt;
+	u8 hdr_ver;
+	u8 ent_ver;
+	u8 hdr_len;
+};
+
+struct __packed ipu_cpd_ent {
+	u8 name[IPU_CPD_SIZE_OF_COMPONENT_NAME];
+	u32 offset;
+	u32 len;
+	u8 rsvd[4];
+};
+
+struct __packed ipu_cpd_metadata_cmpnt {
+	u32 id;
+	u32 size;
+	u32 ver;
+	u8 sha2_hash[IPU_CPD_METADATA_HASH_KEY_SIZE];
+	u32 entry_point;
+	u32 icache_base_offs;
+	u8 attrs[16];
+};
+
+struct __packed ipu6_cpd_metadata_cmpnt {
+	u32 id;
+	u32 size;
+	u32 ver;
+	u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE];
+	u32 entry_point;
+	u32 icache_base_offs;
+	u8 attrs[16];
+};
+
+struct __packed ipu_cpd_metadata_extn {
+	u32 extn_type;
+	u32 len;
+	u32 img_type;
+	u8 rsvd[16];
+};
+
+struct __packed ipu_cpd_client_pkg_hdr {
+	u32 prog_list_offs;
+	u32 prog_list_size;
+	u32 prog_desc_offs;
+	u32 prog_desc_size;
+	u32 pg_manifest_offs;
+	u32 pg_manifest_size;
+	u32 prog_bin_offs;
+	u32 prog_bin_size;
+};
+
+void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev,
+			     const void *src,
+			     dma_addr_t dma_addr_src,
+			     dma_addr_t *dma_addr, unsigned int *pkg_dir_size);
+void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev,
+			  u64 *pkg_dir,
+			  dma_addr_t dma_addr, unsigned int pkg_dir_size);
+int ipu_cpd_validate_cpd_file(struct ipu_device *isp,
+			      const void *cpd_file,
+			      unsigned long cpd_file_size);
+unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx);
+unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir);
+unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx);
+unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx);
+
+#endif /* IPU_CPD_H */
diff --git a/drivers/media/pci/intel/ipu-dma.c b/drivers/media/pci/intel/ipu-dma.c
new file mode 100644
index 000000000000..34fa1fb6b060
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-dma.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/iova.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+
+#include "ipu-dma.h"
+#include "ipu-bus.h"
+#include "ipu-mmu.h"
+
+struct vm_info {
+	struct list_head list;
+	struct page **pages;
+	void *vaddr;
+	unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu_mmu *mmu, void *vaddr)
+{
+	struct vm_info *info, *save;
+
+	list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+		if (info->vaddr == vaddr)
+			return info;
+	}
+
+	return NULL;
+}
+
+/* Begin of things adapted from arch/arm/mm/dma-mapping.c */
+static void __dma_clear_buffer(struct page *page, size_t size,
+			       unsigned long attrs)
+{
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	void *ptr = page_address(page);
+
+	memset(ptr, 0, size);
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		clflush_cache_range(ptr, size);
+}
+
+static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+					gfp_t gfp,
+					unsigned long attrs)
+{
+	struct page **pages;
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i = 0;
+
+	pages = kvzalloc(array_size, GFP_KERNEL);
+	if (!pages)
+		return NULL;
+
+	gfp |= __GFP_NOWARN;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order) {
+			split_page(pages[i], order);
+			j = 1 << order;
+			while (--j)
+				pages[i + j] = pages[i] + j;
+		}
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (i--)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	kvfree(pages);
+	return NULL;
+}
+
+static int __dma_free_buffer(struct device *dev, struct page **pages,
+			     size_t size,
+			     unsigned long attrs)
+{
+	int count = size >> PAGE_SHIFT;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		if (pages[i]) {
+			__dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
+			__free_pages(pages[i], 0);
+		}
+	}
+
+	kvfree(pages);
+	return 0;
+}
+
+/* End of things adapted from arch/arm/mm/dma-mapping.c */
+
+static void ipu_dma_sync_single_for_cpu(struct device *dev,
+					dma_addr_t dma_handle,
+					size_t size,
+					enum dma_data_direction dir)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	unsigned long pa = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info,
+						dma_handle);
+
+	clflush_cache_range(phys_to_virt(pa), size);
+}
+
+static void ipu_dma_sync_sg_for_cpu(struct device *dev,
+				    struct scatterlist *sglist,
+				    int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *sg;
+	int i;
+
+	for_each_sg(sglist, sg, nents, i)
+		clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+}
+
+static void *ipu_dma_alloc(struct device *dev, size_t size,
+			   dma_addr_t *dma_handle, gfp_t gfp,
+			   unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct page **pages;
+	struct iova *iova;
+	struct vm_info *info;
+	int i;
+	int rval;
+	unsigned long count;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	size = PAGE_ALIGN(size);
+	count = size >> PAGE_SHIFT;
+
+	iova = alloc_iova(&mmu->dmap->iovad, count,
+			  dma_get_mask(dev) >> PAGE_SHIFT, 0);
+	if (!iova) {
+		kfree(info);
+		return NULL;
+	}
+
+	pages = __dma_alloc_buffer(dev, size, gfp, attrs);
+	if (!pages)
+		goto out_free_iova;
+
+	for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+		rval = ipu_mmu_map(mmu->dmap->mmu_info,
+				   (iova->pfn_lo + i) << PAGE_SHIFT,
+				   page_to_phys(pages[i]), PAGE_SIZE);
+		if (rval)
+			goto out_unmap;
+	}
+
+	info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+	if (!info->vaddr)
+		goto out_unmap;
+
+	*dma_handle = iova->pfn_lo << PAGE_SHIFT;
+
+	mmu->tlb_invalidate(mmu);
+
+	info->pages = pages;
+	info->size = size;
+	list_add(&info->list, &mmu->vma_list);
+
+	return info->vaddr;
+
+out_unmap:
+	for (i--; i >= 0; i--) {
+		ipu_mmu_unmap(mmu->dmap->mmu_info,
+			      (iova->pfn_lo + i) << PAGE_SHIFT, PAGE_SIZE);
+	}
+	__dma_free_buffer(dev, pages, size, attrs);
+
+out_free_iova:
+	__free_iova(&mmu->dmap->iovad, iova);
+	kfree(info);
+
+	return NULL;
+}
+
+static void ipu_dma_free(struct device *dev, size_t size, void *vaddr,
+			 dma_addr_t dma_handle,
+			 unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct page **pages;
+	struct vm_info *info;
+	struct iova *iova = find_iova(&mmu->dmap->iovad,
+				      dma_handle >> PAGE_SHIFT);
+
+	if (WARN_ON(!iova))
+		return;
+
+	info = get_vm_info(mmu, vaddr);
+	if (WARN_ON(!info))
+		return;
+
+	if (WARN_ON(!info->vaddr))
+		return;
+
+	if (WARN_ON(!info->pages))
+		return;
+
+	list_del(&info->list);
+
+	size = PAGE_ALIGN(size);
+
+	pages = info->pages;
+
+	vunmap(vaddr);
+
+	ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+		      (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+	__dma_free_buffer(dev, pages, size, attrs);
+
+	__free_iova(&mmu->dmap->iovad, iova);
+
+	mmu->tlb_invalidate(mmu);
+
+	kfree(info);
+}
+
+static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *addr, dma_addr_t iova, size_t size,
+			unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct vm_info *info;
+	size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	size_t i;
+
+	info = get_vm_info(mmu, addr);
+	if (!info)
+		return -EFAULT;
+
+	if (!info->vaddr)
+		return -EFAULT;
+
+	if (vma->vm_start & ~PAGE_MASK)
+		return -EINVAL;
+
+	if (size > info->size)
+		return -EFAULT;
+
+	for (i = 0; i < count; i++)
+		vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
+			       info->pages[i]);
+
+	return 0;
+}
+
+static void ipu_dma_unmap_sg(struct device *dev,
+			     struct scatterlist *sglist,
+			     int nents, enum dma_data_direction dir,
+			     unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct iova *iova = find_iova(&mmu->dmap->iovad,
+				      sg_dma_address(sglist) >> PAGE_SHIFT);
+
+	if (!nents)
+		return;
+
+	if (WARN_ON(!iova))
+		return;
+
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+	ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+		      (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+	mmu->tlb_invalidate(mmu);
+
+	__free_iova(&mmu->dmap->iovad, iova);
+}
+
+static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+			  int nents, enum dma_data_direction dir,
+			  unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct scatterlist *sg;
+	struct iova *iova;
+	size_t size = 0;
+	u32 iova_addr;
+	int i;
+
+	for_each_sg(sglist, sg, nents, i)
+		size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+
+	dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size);
+
+	iova = alloc_iova(&mmu->dmap->iovad, size,
+			  dma_get_mask(dev) >> PAGE_SHIFT, 0);
+	if (!iova)
+		return 0;
+
+	dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+		iova->pfn_hi);
+
+	iova_addr = iova->pfn_lo;
+
+	for_each_sg(sglist, sg, nents, i) {
+		int rval;
+
+		dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n",
+			i, iova_addr << PAGE_SHIFT,
+			(unsigned long long)page_to_phys(sg_page(sg)));
+		rval = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
+				   page_to_phys(sg_page(sg)),
+				   PAGE_ALIGN(sg->length));
+		if (rval)
+			goto out_fail;
+		sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		sg_dma_len(sg) = sg->length;
+#endif /* CONFIG_NEED_SG_DMA_LENGTH */
+
+		iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+	}
+
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+	mmu->tlb_invalidate(mmu);
+
+	return nents;
+
+out_fail:
+	ipu_dma_unmap_sg(dev, sglist, i, dir, attrs);
+
+	return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			       void *cpu_addr, dma_addr_t handle, size_t size,
+			       unsigned long attrs)
+{
+	struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+	struct vm_info *info;
+	int n_pages;
+	int ret = 0;
+
+	info = get_vm_info(mmu, cpu_addr);
+	if (!info)
+		return -EFAULT;
+
+	if (!info->vaddr)
+		return -EFAULT;
+
+	if (WARN_ON(!info->pages))
+		return -ENOMEM;
+
+	n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+	ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+					GFP_KERNEL);
+	if (ret)
+		dev_dbg(dev, "IPU get sgt table fail\n");
+
+	return ret;
+}
+
+const struct dma_map_ops ipu_dma_ops = {
+	.alloc = ipu_dma_alloc,
+	.free = ipu_dma_free,
+	.mmap = ipu_dma_mmap,
+	.map_sg = ipu_dma_map_sg,
+	.unmap_sg = ipu_dma_unmap_sg,
+	.sync_single_for_cpu = ipu_dma_sync_single_for_cpu,
+	.sync_single_for_device = ipu_dma_sync_single_for_cpu,
+	.sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu,
+	.sync_sg_for_device = ipu_dma_sync_sg_for_cpu,
+	.get_sgtable = ipu_dma_get_sgtable,
+};
diff --git a/drivers/media/pci/intel/ipu-dma.h b/drivers/media/pci/intel/ipu-dma.h
new file mode 100644
index 000000000000..e3a68aa5adec
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-dma.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_DMA_H
+#define IPU_DMA_H
+
+#include <linux/iova.h>
+
+struct ipu_mmu_info;
+
+struct ipu_dma_mapping {
+	struct ipu_mmu_info *mmu_info;
+	struct iova_domain iovad;
+	struct kref ref;
+};
+
+extern const struct dma_map_ops ipu_dma_ops;
+
+#endif /* IPU_DMA_H */
diff --git a/drivers/media/pci/intel/ipu-fw-com.c b/drivers/media/pci/intel/ipu-fw-com.c
new file mode 100644
index 000000000000..59d69ea6110c
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-com.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "ipu.h"
+#include "ipu-fw-com.h"
+#include "ipu-bus.h"
+
+/*
+ * FWCOM layer is a shared resource between FW and driver. It consist
+ * of token queues to both send and receive directions. Queue is simply
+ * an array of structures with read and write indexes to the queue.
+ * There are 1...n queues to both directions. Queues locates in
+ * system ram and are mapped to ISP MMU so that both CPU and ISP can
+ * see the same buffer. Indexes are located in ISP DMEM so that FW code
+ * can poll those with very low latency and cost. CPU access to indexes is
+ * more costly but that happens only at message sending time and
+ * interrupt trigged message handling. CPU doesn't need to poll indexes.
+ * wr_reg / rd_reg are offsets to those dmem location. They are not
+ * the indexes itself.
+ */
+
+/* Shared structure between driver and FW - do not modify */
+struct ipu_fw_sys_queue {
+	u64 host_address;
+	u32 vied_address;
+	u32 size;
+	u32 token_size;
+	u32 wr_reg;	/* reg no in subsystem's regmem */
+	u32 rd_reg;
+	u32 _align;
+};
+
+struct ipu_fw_sys_queue_res {
+	u64 host_address;
+	u32 vied_address;
+	u32 reg;
+};
+
+enum syscom_state {
+	/* Program load or explicit host setting should init to this */
+	SYSCOM_STATE_UNINIT = 0x57A7E000,
+	/* SP Syscom sets this when it is ready for use */
+	SYSCOM_STATE_READY = 0x57A7E001,
+	/* SP Syscom sets this when no more syscom accesses will happen */
+	SYSCOM_STATE_INACTIVE = 0x57A7E002
+};
+
+enum syscom_cmd {
+	/* Program load or explicit host setting should init to this */
+	SYSCOM_COMMAND_UNINIT = 0x57A7F000,
+	/* Host Syscom requests syscom to become inactive */
+	SYSCOM_COMMAND_INACTIVE = 0x57A7F001
+};
+
+/* firmware config: data that sent from the host to SP via DDR */
+/* Cell copies data into a context */
+
+struct ipu_fw_syscom_config {
+	u32 firmware_address;
+
+	u32 num_input_queues;
+	u32 num_output_queues;
+
+	/* ISP pointers to an array of ipu_fw_sys_queue structures */
+	u32 input_queue;
+	u32 output_queue;
+
+	/* ISYS / PSYS private data */
+	u32 specific_addr;
+	u32 specific_size;
+};
+
+/* End of shared structures / data */
+
+struct ipu_fw_com_context {
+	struct ipu_bus_device *adev;
+	void __iomem *dmem_addr;
+	int (*cell_ready)(struct ipu_bus_device *adev);
+	void (*cell_start)(struct ipu_bus_device *adev);
+
+	void *dma_buffer;
+	dma_addr_t dma_addr;
+	unsigned int dma_size;
+	unsigned long attrs;
+
+	unsigned int num_input_queues;
+	unsigned int num_output_queues;
+
+	struct ipu_fw_sys_queue *input_queue;	/* array of host to SP queues */
+	struct ipu_fw_sys_queue *output_queue;	/* array of SP to host */
+
+	void *config_host_addr;
+	void *specific_host_addr;
+	u64 ibuf_host_addr;
+	u64 obuf_host_addr;
+
+	u32 config_vied_addr;
+	u32 input_queue_vied_addr;
+	u32 output_queue_vied_addr;
+	u32 specific_vied_addr;
+	u32 ibuf_vied_addr;
+	u32 obuf_vied_addr;
+
+	unsigned int buttress_boot_offset;
+	void __iomem *base_addr;
+};
+
+#define FW_COM_WR_REG 0
+#define FW_COM_RD_REG 4
+
+#define REGMEM_OFFSET 0
+#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a
+
+enum regmem_id {
+	/* pass pkg_dir address to SPC in non-secure mode */
+	PKG_DIR_ADDR_REG = 0,
+	/* Tunit CFG blob for secure - provided by host.*/
+	TUNIT_CFG_DWR_REG = 1,
+	/* syscom commands - modified by the host */
+	SYSCOM_COMMAND_REG = 2,
+	/* Store interrupt status - updated by SP */
+	SYSCOM_IRQ_REG = 3,
+	/* first syscom queue pointer register */
+	SYSCOM_QPR_BASE_REG = 4
+};
+
+enum message_direction {
+	DIR_RECV = 0,
+	DIR_SEND
+};
+
+#define BUTRESS_FW_BOOT_PARAMS_0 0x4000
+#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) ((base) \
+	+ BUTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4)
+
+enum buttress_syscom_id {
+	/* pass syscom configuration to SPC */
+	SYSCOM_CONFIG_ID		= 0,
+	/* syscom state - modified by SP */
+	SYSCOM_STATE_ID			= 1,
+	/* syscom vtl0 addr mask */
+	SYSCOM_VTL0_ADDR_MASK_ID	= 2,
+	SYSCOM_ID_MAX
+};
+
+static unsigned int num_messages(unsigned int wr, unsigned int rd,
+				 unsigned int size)
+{
+	if (wr < rd)
+		wr += size;
+	return wr - rd;
+}
+
+static unsigned int num_free(unsigned int wr, unsigned int rd,
+			     unsigned int size)
+{
+	return size - num_messages(wr, rd, size);
+}
+
+static unsigned int curr_index(void __iomem *q_dmem,
+			       enum message_direction dir)
+{
+	return readl(q_dmem +
+			 (dir == DIR_RECV ? FW_COM_RD_REG : FW_COM_WR_REG));
+}
+
+static unsigned int inc_index(void __iomem *q_dmem, struct ipu_fw_sys_queue *q,
+			      enum message_direction dir)
+{
+	unsigned int index;
+
+	index = curr_index(q_dmem, dir) + 1;
+	return index >= q->size ? 0 : index;
+}
+
+static unsigned int ipu_sys_queue_buf_size(unsigned int size,
+					   unsigned int token_size)
+{
+	return (size + 1) * token_size;
+}
+
+static void ipu_sys_queue_init(struct ipu_fw_sys_queue *q, unsigned int size,
+			       unsigned int token_size,
+			       struct ipu_fw_sys_queue_res *res)
+{
+	unsigned int buf_size;
+
+	q->size = size + 1;
+	q->token_size = token_size;
+	buf_size = ipu_sys_queue_buf_size(size, token_size);
+
+	/* acquire the shared buffer space */
+	q->host_address = res->host_address;
+	res->host_address += buf_size;
+	q->vied_address = res->vied_address;
+	res->vied_address += buf_size;
+
+	/* acquire the shared read and writer pointers */
+	q->wr_reg = res->reg;
+	res->reg++;
+	q->rd_reg = res->reg;
+	res->reg++;
+}
+
+void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg,
+			 struct ipu_bus_device *adev, void __iomem *base)
+{
+	struct ipu_fw_com_context *ctx;
+	struct ipu_fw_syscom_config *fw_cfg;
+	unsigned int i;
+	unsigned int sizeall, offset;
+	unsigned int sizeinput = 0, sizeoutput = 0;
+	unsigned long attrs = 0;
+	struct ipu_fw_sys_queue_res res;
+
+	/* error handling */
+	if (!cfg || !cfg->cell_start || !cfg->cell_ready)
+		return NULL;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+	ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET;
+	ctx->adev = adev;
+	ctx->cell_start = cfg->cell_start;
+	ctx->cell_ready = cfg->cell_ready;
+	ctx->buttress_boot_offset = cfg->buttress_boot_offset;
+	ctx->base_addr  = base;
+
+	ctx->num_input_queues = cfg->num_input_queues;
+	ctx->num_output_queues = cfg->num_output_queues;
+
+	/*
+	 * Allocate DMA mapped memory. Allocate one big chunk.
+	 */
+	sizeall =
+	    /* Base cfg for FW */
+	    roundup(sizeof(struct ipu_fw_syscom_config), 8) +
+	    /* Descriptions of the queues */
+	    cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue) +
+	    cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue) +
+	    /* FW specific information structure */
+	    roundup(cfg->specific_size, 8);
+
+	for (i = 0; i < cfg->num_input_queues; i++)
+		sizeinput += ipu_sys_queue_buf_size(cfg->input[i].queue_size,
+						cfg->input[i].token_size);
+
+	for (i = 0; i < cfg->num_output_queues; i++)
+		sizeoutput += ipu_sys_queue_buf_size(cfg->output[i].queue_size,
+						 cfg->output[i].token_size);
+
+	sizeall += sizeinput + sizeoutput;
+
+	ctx->dma_buffer = dma_alloc_attrs(&ctx->adev->dev, sizeall,
+					  &ctx->dma_addr, GFP_KERNEL,
+					  attrs);
+	ctx->attrs = attrs;
+	if (!ctx->dma_buffer) {
+		dev_err(&ctx->adev->dev, "failed to allocate dma memory\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->dma_size = sizeall;
+
+	/* This is the address where FW starts to parse allocations */
+	ctx->config_host_addr = ctx->dma_buffer;
+	ctx->config_vied_addr = ctx->dma_addr;
+	fw_cfg = (struct ipu_fw_syscom_config *)ctx->config_host_addr;
+	offset = roundup(sizeof(struct ipu_fw_syscom_config), 8);
+
+	ctx->input_queue = ctx->dma_buffer + offset;
+	ctx->input_queue_vied_addr = ctx->dma_addr + offset;
+	offset += cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue);
+
+	ctx->output_queue = ctx->dma_buffer + offset;
+	ctx->output_queue_vied_addr = ctx->dma_addr + offset;
+	offset += cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue);
+
+	ctx->specific_host_addr = ctx->dma_buffer + offset;
+	ctx->specific_vied_addr = ctx->dma_addr + offset;
+	offset += roundup(cfg->specific_size, 8);
+
+	ctx->ibuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset);
+	ctx->ibuf_vied_addr = ctx->dma_addr + offset;
+	offset += sizeinput;
+
+	ctx->obuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset);
+	ctx->obuf_vied_addr = ctx->dma_addr + offset;
+	offset += sizeoutput;
+
+	/* initialize input queues */
+	res.reg = SYSCOM_QPR_BASE_REG;
+	res.host_address = ctx->ibuf_host_addr;
+	res.vied_address = ctx->ibuf_vied_addr;
+	for (i = 0; i < cfg->num_input_queues; i++) {
+		ipu_sys_queue_init(ctx->input_queue + i,
+				   cfg->input[i].queue_size,
+				   cfg->input[i].token_size, &res);
+	}
+
+	/* initialize output queues */
+	res.host_address = ctx->obuf_host_addr;
+	res.vied_address = ctx->obuf_vied_addr;
+	for (i = 0; i < cfg->num_output_queues; i++) {
+		ipu_sys_queue_init(ctx->output_queue + i,
+				   cfg->output[i].queue_size,
+				   cfg->output[i].token_size, &res);
+	}
+
+	/* copy firmware specific data */
+	if (cfg->specific_addr && cfg->specific_size) {
+		memcpy((void *)ctx->specific_host_addr,
+		       cfg->specific_addr, cfg->specific_size);
+	}
+
+	fw_cfg->num_input_queues = cfg->num_input_queues;
+	fw_cfg->num_output_queues = cfg->num_output_queues;
+	fw_cfg->input_queue = ctx->input_queue_vied_addr;
+	fw_cfg->output_queue = ctx->output_queue_vied_addr;
+	fw_cfg->specific_addr = ctx->specific_vied_addr;
+	fw_cfg->specific_size = cfg->specific_size;
+	return ctx;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_prepare);
+
+int ipu_fw_com_open(struct ipu_fw_com_context *ctx)
+{
+	/*
+	 * Disable tunit configuration by FW.
+	 * This feature is used to configure tunit in secure mode.
+	 */
+	writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4);
+	/* Check if SP is in valid state */
+	if (!ctx->cell_ready(ctx->adev))
+		return -EIO;
+
+	/* store syscom uninitialized command */
+	writel(SYSCOM_COMMAND_UNINIT,
+	       ctx->dmem_addr + SYSCOM_COMMAND_REG * 4);
+
+	/* store syscom uninitialized state */
+	writel(SYSCOM_STATE_UNINIT,
+	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+					  ctx->buttress_boot_offset,
+					  SYSCOM_STATE_ID));
+
+	/* store firmware configuration address */
+	writel(ctx->config_vied_addr,
+	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+					  ctx->buttress_boot_offset,
+					  SYSCOM_CONFIG_ID));
+	ctx->cell_start(ctx->adev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_open);
+
+int ipu_fw_com_close(struct ipu_fw_com_context *ctx)
+{
+	int state;
+
+	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+						 ctx->buttress_boot_offset,
+						 SYSCOM_STATE_ID));
+	if (state != SYSCOM_STATE_READY)
+		return -EBUSY;
+
+	/* set close request flag */
+	writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr +
+		   SYSCOM_COMMAND_REG * 4);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_close);
+
+int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force)
+{
+	/* check if release is forced, an verify cell state if it is not */
+	if (!force && !ctx->cell_ready(ctx->adev))
+		return -EBUSY;
+
+	dma_free_attrs(&ctx->adev->dev, ctx->dma_size,
+		       ctx->dma_buffer, ctx->dma_addr,
+		       ctx->attrs);
+	kfree(ctx);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_release);
+
+int ipu_fw_com_ready(struct ipu_fw_com_context *ctx)
+{
+	int state;
+
+	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+						 ctx->buttress_boot_offset,
+						 SYSCOM_STATE_ID));
+	if (state != SYSCOM_STATE_READY)
+		return -EBUSY;	/* SPC is not ready to handle messages yet */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_ready);
+
+static bool is_index_valid(struct ipu_fw_sys_queue *q, unsigned int index)
+{
+	if (index >= q->size)
+		return false;
+	return true;
+}
+
+void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int wr, rd;
+	unsigned int packets;
+	unsigned int index;
+
+	wr = readl(q_dmem + FW_COM_WR_REG);
+	rd = readl(q_dmem + FW_COM_RD_REG);
+
+	/* Catch indexes in dmem */
+	if (!is_index_valid(q, wr) || !is_index_valid(q, rd))
+		return NULL;
+
+	packets = num_free(wr + 1, rd, q->size);
+	if (!packets)
+		return NULL;
+
+	index = curr_index(q_dmem, DIR_SEND);
+
+	return (void *)(unsigned long)q->host_address + (index * q->token_size);
+}
+EXPORT_SYMBOL_GPL(ipu_send_get_token);
+
+void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	int index = curr_index(q_dmem, DIR_SEND);
+
+	/* Increment index */
+	index = inc_index(q_dmem, q, DIR_SEND);
+
+	writel(index, q_dmem + FW_COM_WR_REG);
+}
+EXPORT_SYMBOL_GPL(ipu_send_put_token);
+
+void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int wr, rd;
+	unsigned int packets;
+	void *addr;
+
+	wr = readl(q_dmem + FW_COM_WR_REG);
+	rd = readl(q_dmem + FW_COM_RD_REG);
+
+	/* Catch indexes in dmem? */
+	if (!is_index_valid(q, wr) || !is_index_valid(q, rd))
+		return NULL;
+
+	packets = num_messages(wr, rd, q->size);
+	if (!packets)
+		return NULL;
+
+	addr = (void *)(unsigned long)q->host_address + (rd * q->token_size);
+
+	return addr;
+}
+EXPORT_SYMBOL_GPL(ipu_recv_get_token);
+
+void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int rd = inc_index(q_dmem, q, DIR_RECV);
+
+	/* Release index */
+	writel(rd, q_dmem + FW_COM_RD_REG);
+}
+EXPORT_SYMBOL_GPL(ipu_recv_put_token);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu fw comm library");
diff --git a/drivers/media/pci/intel/ipu-fw-com.h b/drivers/media/pci/intel/ipu-fw-com.h
new file mode 100644
index 000000000000..855dba667372
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-com.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_COM_H
+#define IPU_FW_COM_H
+
+struct ipu_fw_com_context;
+struct ipu_bus_device;
+
+struct ipu_fw_syscom_queue_config {
+	unsigned int queue_size;	/* tokens per queue */
+	unsigned int token_size;	/* bytes per token */
+};
+
+#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET	0
+#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET	7
+
+struct ipu_fw_com_cfg {
+	unsigned int num_input_queues;
+	unsigned int num_output_queues;
+	struct ipu_fw_syscom_queue_config *input;
+	struct ipu_fw_syscom_queue_config *output;
+
+	unsigned int dmem_addr;
+
+	/* firmware-specific configuration data */
+	void *specific_addr;
+	unsigned int specific_size;
+	int (*cell_ready)(struct ipu_bus_device *adev);
+	void (*cell_start)(struct ipu_bus_device *adev);
+
+	unsigned int buttress_boot_offset;
+};
+
+void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg,
+			 struct ipu_bus_device *adev, void __iomem *base);
+
+int ipu_fw_com_open(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_ready(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_close(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force);
+
+void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr);
+
+#endif
diff --git a/drivers/media/pci/intel/ipu-fw-isys.c b/drivers/media/pci/intel/ipu-fw-isys.c
new file mode 100644
index 000000000000..974c6f59d34d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-isys.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "ipu.h"
+#include "ipu-trace.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform.h"
+#include "ipu-fw-isys.h"
+#include "ipu-fw-com.h"
+#include "ipu-isys.h"
+
+#define IPU_FW_UNSUPPORTED_DATA_TYPE	0
+static const uint32_t
+extracted_bits_per_pixel_per_mipi_data_type[N_IPU_FW_ISYS_MIPI_DATA_TYPE] = {
+	64,	/* [0x00]   IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE */
+	64,	/* [0x01]   IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE */
+	64,	/* [0x02]   IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE */
+	64,	/* [0x03]   IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x04] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x05] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x06] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x07] */
+	64,	/* [0x08]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 */
+	64,	/* [0x09]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 */
+	64,	/* [0x0A]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 */
+	64,	/* [0x0B]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 */
+	64,	/* [0x0C]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 */
+	64,	/* [0x0D]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 */
+	64,	/* [0x0E]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 */
+	64,	/* [0x0F]   IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x10] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x11] */
+	8,	/* [0x12]    IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x13] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x14] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x15] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x16] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x17] */
+	12,	/* [0x18]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 */
+	15,	/* [0x19]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 */
+	12,	/* [0x1A]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x1B] */
+	12,	/* [0x1C]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT */
+	15,	/* [0x1D]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT */
+	16,	/* [0x1E]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 */
+	20,	/* [0x1F]   IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 */
+	16,	/* [0x20]   IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 */
+	16,	/* [0x21]   IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 */
+	16,	/* [0x22]   IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 */
+	18,	/* [0x23]   IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 */
+	24,	/* [0x24]   IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x25] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x26] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x27] */
+	6,	/* [0x28]    IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 */
+	7,	/* [0x29]    IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 */
+	8,	/* [0x2A]    IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 */
+	10,	/* [0x2B]   IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 */
+	12,	/* [0x2C]   IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 */
+	14,	/* [0x2D]   IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 */
+	16,	/* [0x2E]   IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 */
+	8,	/* [0x2F]    IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 */
+	8,	/* [0x30]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 */
+	8,	/* [0x31]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 */
+	8,	/* [0x32]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 */
+	8,	/* [0x33]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 */
+	8,	/* [0x34]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 */
+	8,	/* [0x35]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 */
+	8,	/* [0x36]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 */
+	8,	/* [0x37]    IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x38] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x39] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x3A] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x3B] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x3C] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x3D] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE,	/* [0x3E] */
+	IPU_FW_UNSUPPORTED_DATA_TYPE	/* [0x3F] */
+};
+
+static const char send_msg_types[N_IPU_FW_ISYS_SEND_TYPE][32] = {
+	"STREAM_OPEN",
+	"STREAM_START",
+	"STREAM_START_AND_CAPTURE",
+	"STREAM_CAPTURE",
+	"STREAM_STOP",
+	"STREAM_FLUSH",
+	"STREAM_CLOSE"
+};
+
+static int handle_proxy_response(struct ipu_isys *isys, unsigned int req_id)
+{
+	struct ipu_fw_isys_proxy_resp_info_abi *resp;
+	int rval = -EIO;
+
+	resp = (struct ipu_fw_isys_proxy_resp_info_abi *)
+	    ipu_recv_get_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES);
+	if (!resp)
+		return 1;
+
+	dev_dbg(&isys->adev->dev,
+		"Proxy response: id 0x%x, error %d, details %d\n",
+		resp->request_id, resp->error_info.error,
+		resp->error_info.error_details);
+
+	if (req_id == resp->request_id)
+		rval = 0;
+
+	ipu_recv_put_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES);
+	return rval;
+}
+
+/* Simple blocking proxy send function */
+int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys,
+				 unsigned int req_id,
+				 unsigned int index,
+				 unsigned int offset, u32 value)
+{
+	struct ipu_fw_com_context *ctx = isys->fwcom;
+	struct ipu_fw_proxy_send_queue_token *token;
+	unsigned int timeout = 1000;
+	int rval = -EBUSY;
+
+	dev_dbg(&isys->adev->dev,
+		"proxy send token: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n",
+		req_id, index, offset, value);
+
+	token = ipu_send_get_token(ctx, IPU_BASE_PROXY_SEND_QUEUES);
+	if (!token)
+		goto leave;
+
+	token->request_id = req_id;
+	token->region_index = index;
+	token->offset = offset;
+	token->value = value;
+	ipu_send_put_token(ctx, IPU_BASE_PROXY_SEND_QUEUES);
+
+	/* Currently proxy doesn't support irq based service. Poll */
+	do {
+		usleep_range(100, 110);
+		rval = handle_proxy_response(isys, req_id);
+		if (!rval)
+			break;
+		if (rval == -EIO) {
+			dev_err(&isys->adev->dev,
+				"Proxy response received with unexpected id\n");
+			break;
+		}
+		timeout--;
+	} while (rval && timeout);
+
+	if (!timeout)
+		dev_err(&isys->adev->dev, "Proxy response timed out\n");
+leave:
+	return rval;
+}
+
+int
+ipu_fw_isys_complex_cmd(struct ipu_isys *isys,
+			const unsigned int stream_handle,
+			void *cpu_mapped_buf,
+			dma_addr_t dma_mapped_buf,
+			size_t size, enum ipu_fw_isys_send_type send_type)
+{
+	struct ipu_fw_com_context *ctx = isys->fwcom;
+	struct ipu_fw_send_queue_token *token;
+
+	if (send_type >= N_IPU_FW_ISYS_SEND_TYPE)
+		return -EINVAL;
+
+	dev_dbg(&isys->adev->dev, "send_token: %s\n",
+		send_msg_types[send_type]);
+
+	/*
+	 * Time to flush cache in case we have some payload. Not all messages
+	 * have that
+	 */
+	if (cpu_mapped_buf)
+		clflush_cache_range(cpu_mapped_buf, size);
+
+	token = ipu_send_get_token(ctx,
+				   stream_handle + IPU_BASE_MSG_SEND_QUEUES);
+	if (!token)
+		return -EBUSY;
+
+	token->payload = dma_mapped_buf;
+	token->buf_handle = (unsigned long)cpu_mapped_buf;
+	token->send_type = send_type;
+
+	ipu_send_put_token(ctx, stream_handle + IPU_BASE_MSG_SEND_QUEUES);
+
+	return 0;
+}
+
+int ipu_fw_isys_simple_cmd(struct ipu_isys *isys,
+			   const unsigned int stream_handle,
+			   enum ipu_fw_isys_send_type send_type)
+{
+	return ipu_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0,
+				       send_type);
+}
+
+int ipu_fw_isys_close(struct ipu_isys *isys)
+{
+	struct device *dev = &isys->adev->dev;
+	int timeout = IPU_ISYS_TURNOFF_TIMEOUT;
+	int rval;
+	unsigned long flags;
+	void *fwcom;
+
+	/*
+	 * Stop the isys fw. Actual close takes
+	 * some time as the FW must stop its actions including code fetch
+	 * to SP icache.
+	 * spinlock to wait the interrupt handler to be finished
+	 */
+	spin_lock_irqsave(&isys->power_lock, flags);
+	rval = ipu_fw_com_close(isys->fwcom);
+	fwcom = isys->fwcom;
+	isys->fwcom = NULL;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+	if (rval)
+		dev_err(dev, "Device close failure: %d\n", rval);
+
+	/* release probably fails if the close failed. Let's try still */
+	do {
+		usleep_range(IPU_ISYS_TURNOFF_DELAY_US,
+			     2 * IPU_ISYS_TURNOFF_DELAY_US);
+		rval = ipu_fw_com_release(fwcom, 0);
+		timeout--;
+	} while (rval != 0 && timeout);
+
+	if (rval) {
+		dev_err(dev, "Device release time out %d\n", rval);
+		spin_lock_irqsave(&isys->power_lock, flags);
+		isys->fwcom = fwcom;
+		spin_unlock_irqrestore(&isys->power_lock, flags);
+	}
+
+	return rval;
+}
+
+void ipu_fw_isys_cleanup(struct ipu_isys *isys)
+{
+	int ret;
+
+	ret = ipu_fw_com_release(isys->fwcom, 1);
+	if (ret < 0)
+		dev_err(&isys->adev->dev,
+			"Device busy, fw_com release failed.");
+	isys->fwcom = NULL;
+}
+
+static void start_sp(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = isys->pdata->base +
+	    isys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val = 0;
+
+	val |= IPU_ISYS_SPC_STATUS_START |
+	    IPU_ISYS_SPC_STATUS_RUN |
+	    IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+	val |= isys->icache_prefetch ? IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+
+	writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = isys->pdata->base +
+	    isys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL);
+
+	/* return true when READY == 1, START == 0 */
+	val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START;
+
+	return val == IPU_ISYS_SPC_STATUS_READY;
+}
+
+static int ipu6_isys_fwcom_cfg_init(struct ipu_isys *isys,
+				    struct ipu_fw_com_cfg *fwcom,
+				    unsigned int num_streams)
+{
+	int i;
+	unsigned int size;
+	struct ipu_fw_syscom_queue_config *input_queue_cfg;
+	struct ipu_fw_syscom_queue_config *output_queue_cfg;
+	struct ipu6_fw_isys_fw_config *isys_fw_cfg;
+	int num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
+					    IPU6_ISYS_NUM_STREAMS);
+	int num_out_message_queues = 1;
+	int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY;
+	int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV;
+	int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG;
+	int base_dev_send = IPU_BASE_DEV_SEND_QUEUES;
+	int base_msg_send = IPU_BASE_MSG_SEND_QUEUES;
+	int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES;
+
+	isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg),
+				   GFP_KERNEL);
+	if (!isys_fw_cfg)
+		return -ENOMEM;
+
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+		IPU_N_MAX_PROXY_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] =
+		IPU_N_MAX_DEV_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+		num_in_message_queues;
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+		IPU_N_MAX_PROXY_RECV_QUEUES;
+	/* Common msg/dev return queue */
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0;
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+		num_out_message_queues;
+
+	size = sizeof(*input_queue_cfg) * IPU6_N_MAX_SEND_QUEUES;
+	input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+	if (!input_queue_cfg)
+		return -ENOMEM;
+
+	size = sizeof(*output_queue_cfg) * IPU_N_MAX_RECV_QUEUES;
+	output_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+	if (!output_queue_cfg)
+		return -ENOMEM;
+
+	fwcom->input = input_queue_cfg;
+	fwcom->output = output_queue_cfg;
+
+	fwcom->num_input_queues =
+		isys_fw_cfg->num_send_queues[type_proxy] +
+		isys_fw_cfg->num_send_queues[type_dev] +
+		isys_fw_cfg->num_send_queues[type_msg];
+
+	fwcom->num_output_queues =
+		isys_fw_cfg->num_recv_queues[type_proxy] +
+		isys_fw_cfg->num_recv_queues[type_dev] +
+		isys_fw_cfg->num_recv_queues[type_msg];
+
+	/* SRAM partitioning. Equal partitioning is set. */
+	for (i = 0; i < IPU6_NOF_SRAM_BLOCKS_MAX; i++) {
+		if (i < num_in_message_queues)
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] =
+				(IPU_DEVICE_GDA_NR_PAGES *
+				 IPU_DEVICE_GDA_VIRT_FACTOR) /
+				num_in_message_queues;
+		else
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
+	}
+
+	/* FW assumes proxy interface at fwcom queue 0 */
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
+		input_queue_cfg[i].token_size =
+			sizeof(struct ipu_fw_proxy_send_queue_token);
+		input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
+		input_queue_cfg[base_dev_send + i].token_size =
+			sizeof(struct ipu_fw_send_queue_token);
+		input_queue_cfg[base_dev_send + i].queue_size =
+			IPU6_DEV_SEND_QUEUE_SIZE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
+		input_queue_cfg[base_msg_send + i].token_size =
+			sizeof(struct ipu_fw_send_queue_token);
+		input_queue_cfg[base_msg_send + i].queue_size =
+			IPU_ISYS_SIZE_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
+		output_queue_cfg[i].token_size =
+			sizeof(struct ipu_fw_proxy_resp_queue_token);
+		output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE;
+	}
+	/* There is no recv DEV queue */
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
+		output_queue_cfg[base_msg_recv + i].token_size =
+			sizeof(struct ipu_fw_resp_queue_token);
+		output_queue_cfg[base_msg_recv + i].queue_size =
+			IPU_ISYS_SIZE_RECV_QUEUE;
+	}
+
+	fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
+	fwcom->specific_addr = isys_fw_cfg;
+	fwcom->specific_size = sizeof(*isys_fw_cfg);
+
+	return 0;
+}
+
+static int ipu6se_isys_fwcom_cfg_init(struct ipu_isys *isys,
+				      struct ipu_fw_com_cfg *fwcom,
+				      unsigned int num_streams)
+{
+	int i;
+	unsigned int size;
+	struct ipu_fw_syscom_queue_config *input_queue_cfg;
+	struct ipu_fw_syscom_queue_config *output_queue_cfg;
+	struct ipu6se_fw_isys_fw_config *isys_fw_cfg;
+	int num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
+					    IPU6SE_ISYS_NUM_STREAMS);
+	int num_out_message_queues = 1;
+	int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY;
+	int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV;
+	int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG;
+	int base_dev_send = IPU_BASE_DEV_SEND_QUEUES;
+	int base_msg_send = IPU_BASE_MSG_SEND_QUEUES;
+	int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES;
+
+	isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg),
+				   GFP_KERNEL);
+	if (!isys_fw_cfg)
+		return -ENOMEM;
+
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+		IPU_N_MAX_PROXY_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] =
+		IPU_N_MAX_DEV_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+		num_in_message_queues;
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+		IPU_N_MAX_PROXY_RECV_QUEUES;
+	/* Common msg/dev return queue */
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0;
+	isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+		num_out_message_queues;
+
+	size = sizeof(*input_queue_cfg) * IPU6SE_N_MAX_SEND_QUEUES;
+	input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+	if (!input_queue_cfg)
+		return -ENOMEM;
+
+	size = sizeof(*output_queue_cfg) * IPU_N_MAX_RECV_QUEUES;
+	output_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+	if (!output_queue_cfg)
+		return -ENOMEM;
+
+	fwcom->input = input_queue_cfg;
+	fwcom->output = output_queue_cfg;
+
+	fwcom->num_input_queues =
+		isys_fw_cfg->num_send_queues[type_proxy] +
+		isys_fw_cfg->num_send_queues[type_dev] +
+		isys_fw_cfg->num_send_queues[type_msg];
+
+	fwcom->num_output_queues =
+		isys_fw_cfg->num_recv_queues[type_proxy] +
+		isys_fw_cfg->num_recv_queues[type_dev] +
+		isys_fw_cfg->num_recv_queues[type_msg];
+
+	/* SRAM partitioning. Equal partitioning is set. */
+	for (i = 0; i < IPU6SE_NOF_SRAM_BLOCKS_MAX; i++) {
+		if (i < num_in_message_queues)
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] =
+				(IPU_DEVICE_GDA_NR_PAGES *
+				 IPU_DEVICE_GDA_VIRT_FACTOR) /
+				num_in_message_queues;
+		else
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
+	}
+
+	/* FW assumes proxy interface at fwcom queue 0 */
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
+		input_queue_cfg[i].token_size =
+			sizeof(struct ipu_fw_proxy_send_queue_token);
+		input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
+		input_queue_cfg[base_dev_send + i].token_size =
+			sizeof(struct ipu_fw_send_queue_token);
+		input_queue_cfg[base_dev_send + i].queue_size =
+			IPU6SE_DEV_SEND_QUEUE_SIZE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
+		input_queue_cfg[base_msg_send + i].token_size =
+			sizeof(struct ipu_fw_send_queue_token);
+		input_queue_cfg[base_msg_send + i].queue_size =
+			IPU_ISYS_SIZE_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
+		output_queue_cfg[i].token_size =
+			sizeof(struct ipu_fw_proxy_resp_queue_token);
+		output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE;
+	}
+	/* There is no recv DEV queue */
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
+		output_queue_cfg[base_msg_recv + i].token_size =
+			sizeof(struct ipu_fw_resp_queue_token);
+		output_queue_cfg[base_msg_recv + i].queue_size =
+			IPU_ISYS_SIZE_RECV_QUEUE;
+	}
+
+	fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
+	fwcom->specific_addr = isys_fw_cfg;
+	fwcom->specific_size = sizeof(*isys_fw_cfg);
+
+	return 0;
+}
+
+int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams)
+{
+	int retry = IPU_ISYS_OPEN_RETRY;
+
+	struct ipu_fw_com_cfg fwcom = {
+		.cell_start = start_sp,
+		.cell_ready = query_sp,
+		.buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET,
+	};
+
+	struct device *dev = &isys->adev->dev;
+	int rval;
+
+	if (ipu_ver == IPU_VER_6SE) {
+		ipu6se_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
+	} else if (ipu_ver == IPU_VER_6) {
+		ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
+	} else {
+		dev_err(dev, "unsupported ipu_ver %d\n", ipu_ver);
+		return -EINVAL;
+	}
+
+	isys->fwcom = ipu_fw_com_prepare(&fwcom, isys->adev, isys->pdata->base);
+	if (!isys->fwcom) {
+		dev_err(dev, "isys fw com prepare failed\n");
+		return -EIO;
+	}
+
+	rval = ipu_fw_com_open(isys->fwcom);
+	if (rval) {
+		dev_err(dev, "isys fw com open failed %d\n", rval);
+		return rval;
+	}
+
+	do {
+		usleep_range(IPU_ISYS_OPEN_TIMEOUT_US,
+			     IPU_ISYS_OPEN_TIMEOUT_US + 10);
+		rval = ipu_fw_com_ready(isys->fwcom);
+		if (!rval)
+			break;
+		retry--;
+	} while (retry > 0);
+
+	if (!retry && rval) {
+		dev_err(dev, "isys port open ready failed %d\n", rval);
+		ipu_fw_isys_close(isys);
+	}
+
+	return rval;
+}
+
+struct ipu_fw_isys_resp_info_abi *
+ipu_fw_isys_get_resp(void *context, unsigned int queue,
+		     struct ipu_fw_isys_resp_info_abi *response)
+{
+	return (struct ipu_fw_isys_resp_info_abi *)
+	    ipu_recv_get_token(context, queue);
+}
+
+void ipu_fw_isys_put_resp(void *context, unsigned int queue)
+{
+	ipu_recv_put_token(context, queue);
+}
+
+void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg)
+{
+	unsigned int i;
+	unsigned int idx;
+
+	for (i = 0; i < stream_cfg->nof_input_pins; i++) {
+		idx = stream_cfg->input_pins[i].dt;
+		stream_cfg->input_pins[i].bits_per_pix =
+		    extracted_bits_per_pixel_per_mipi_data_type[idx];
+		stream_cfg->input_pins[i].mapped_dt =
+		    N_IPU_FW_ISYS_MIPI_DATA_TYPE;
+		stream_cfg->input_pins[i].mipi_decompression =
+		    IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION;
+		stream_cfg->input_pins[i].capture_mode =
+			IPU_FW_ISYS_CAPTURE_MODE_REGULAR;
+	}
+}
+
+void
+ipu_fw_isys_dump_stream_cfg(struct device *dev,
+			    struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg)
+{
+	unsigned int i;
+
+	dev_dbg(dev, "---------------------------\n");
+	dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n");
+	dev_dbg(dev, "---------------------------\n");
+
+	dev_dbg(dev, "Source %d\n", stream_cfg->src);
+	dev_dbg(dev, "VC %d\n", stream_cfg->vc);
+	dev_dbg(dev, "Nof input pins %d\n", stream_cfg->nof_input_pins);
+	dev_dbg(dev, "Nof output pins %d\n", stream_cfg->nof_output_pins);
+
+	for (i = 0; i < stream_cfg->nof_input_pins; i++) {
+		dev_dbg(dev, "Input pin %d\n", i);
+		dev_dbg(dev, "Mipi data type 0x%0x\n",
+			stream_cfg->input_pins[i].dt);
+		dev_dbg(dev, "Mipi store mode %d\n",
+			stream_cfg->input_pins[i].mipi_store_mode);
+		dev_dbg(dev, "Bits per pixel %d\n",
+			stream_cfg->input_pins[i].bits_per_pix);
+		dev_dbg(dev, "Mapped data type 0x%0x\n",
+			stream_cfg->input_pins[i].mapped_dt);
+		dev_dbg(dev, "Input res width %d\n",
+			stream_cfg->input_pins[i].input_res.width);
+		dev_dbg(dev, "Input res height %d\n",
+			stream_cfg->input_pins[i].input_res.height);
+		dev_dbg(dev, "mipi decompression %d\n",
+			stream_cfg->input_pins[i].mipi_decompression);
+		dev_dbg(dev, "capture_mode %d\n",
+			stream_cfg->input_pins[i].capture_mode);
+	}
+
+	dev_dbg(dev, "Crop info\n");
+	dev_dbg(dev, "Crop.top_offset %d\n", stream_cfg->crop.top_offset);
+	dev_dbg(dev, "Crop.left_offset %d\n", stream_cfg->crop.left_offset);
+	dev_dbg(dev, "Crop.bottom_offset %d\n",
+		stream_cfg->crop.bottom_offset);
+	dev_dbg(dev, "Crop.right_offset %d\n", stream_cfg->crop.right_offset);
+	dev_dbg(dev, "----------------\n");
+
+	for (i = 0; i < stream_cfg->nof_output_pins; i++) {
+		dev_dbg(dev, "Output pin %d\n", i);
+		dev_dbg(dev, "Output input pin id %d\n",
+			stream_cfg->output_pins[i].input_pin_id);
+		dev_dbg(dev, "Output res width %d\n",
+			stream_cfg->output_pins[i].output_res.width);
+		dev_dbg(dev, "Output res height %d\n",
+			stream_cfg->output_pins[i].output_res.height);
+		dev_dbg(dev, "Stride %d\n", stream_cfg->output_pins[i].stride);
+		dev_dbg(dev, "Pin type %d\n", stream_cfg->output_pins[i].pt);
+		dev_dbg(dev, "Ft %d\n", stream_cfg->output_pins[i].ft);
+		dev_dbg(dev, "Watermar in lines %d\n",
+			stream_cfg->output_pins[i].watermark_in_lines);
+		dev_dbg(dev, "Send irq %d\n",
+			stream_cfg->output_pins[i].send_irq);
+		dev_dbg(dev, "Reserve compression %d\n",
+			stream_cfg->output_pins[i].reserve_compression);
+		dev_dbg(dev, "snoopable %d\n",
+			stream_cfg->output_pins[i].snoopable);
+		dev_dbg(dev, "error_handling_enable %d\n",
+			stream_cfg->output_pins[i].error_handling_enable);
+		dev_dbg(dev, "sensor type %d\n",
+			stream_cfg->output_pins[i].sensor_type);
+		dev_dbg(dev, "----------------\n");
+	}
+
+	dev_dbg(dev, "Isl_use %d\n", stream_cfg->isl_use);
+	dev_dbg(dev, "stream sensor_type %d\n", stream_cfg->sensor_type);
+
+}
+
+void ipu_fw_isys_dump_frame_buff_set(struct device *dev,
+				     struct ipu_fw_isys_frame_buff_set_abi *buf,
+				     unsigned int outputs)
+{
+	unsigned int i;
+
+	dev_dbg(dev, "--------------------------\n");
+	dev_dbg(dev, "IPU_FW_ISYS_FRAME_BUFF_SET\n");
+	dev_dbg(dev, "--------------------------\n");
+
+	for (i = 0; i < outputs; i++) {
+		dev_dbg(dev, "Output pin %d\n", i);
+		dev_dbg(dev, "out_buf_id %llu\n",
+			buf->output_pins[i].out_buf_id);
+		dev_dbg(dev, "addr 0x%x\n", buf->output_pins[i].addr);
+		dev_dbg(dev, "compress %u\n", buf->output_pins[i].compress);
+
+		dev_dbg(dev, "----------------\n");
+	}
+
+	dev_dbg(dev, "send_irq_sof 0x%x\n", buf->send_irq_sof);
+	dev_dbg(dev, "send_irq_eof 0x%x\n", buf->send_irq_eof);
+	dev_dbg(dev, "send_resp_sof 0x%x\n", buf->send_resp_sof);
+	dev_dbg(dev, "send_resp_eof 0x%x\n", buf->send_resp_eof);
+	dev_dbg(dev, "send_irq_capture_ack 0x%x\n", buf->send_irq_capture_ack);
+	dev_dbg(dev, "send_irq_capture_done 0x%x\n",
+		buf->send_irq_capture_done);
+	dev_dbg(dev, "send_resp_capture_ack 0x%x\n",
+		buf->send_resp_capture_ack);
+	dev_dbg(dev, "send_resp_capture_done 0x%x\n",
+		buf->send_resp_capture_done);
+}
diff --git a/drivers/media/pci/intel/ipu-fw-isys.h b/drivers/media/pci/intel/ipu-fw-isys.h
new file mode 100644
index 000000000000..ad599c401b50
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-isys.h
@@ -0,0 +1,824 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_ISYS_H
+#define IPU_FW_ISYS_H
+
+#include "ipu-fw-com.h"
+
+/* Max number of Input/Output Pins */
+#define IPU_MAX_IPINS 4
+
+#define IPU_MAX_OPINS ((IPU_MAX_IPINS) + 1)
+
+#define IPU6_STREAM_ID_MAX 16
+#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX)
+#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX)
+#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX)
+#define IPU6SE_STREAM_ID_MAX 8
+#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX)
+
+/* Single return queue for all streams/commands type */
+#define IPU_N_MAX_MSG_RECV_QUEUES 1
+/* Single device queue for high priority commands (bypass in-order queue) */
+#define IPU_N_MAX_DEV_SEND_QUEUES 1
+/* Single dedicated send queue for proxy interface */
+#define IPU_N_MAX_PROXY_SEND_QUEUES 1
+/* Single dedicated recv queue for proxy interface */
+#define IPU_N_MAX_PROXY_RECV_QUEUES 1
+/* Send queues layout */
+#define IPU_BASE_PROXY_SEND_QUEUES 0
+#define IPU_BASE_DEV_SEND_QUEUES \
+	(IPU_BASE_PROXY_SEND_QUEUES + IPU_N_MAX_PROXY_SEND_QUEUES)
+#define IPU_BASE_MSG_SEND_QUEUES \
+	(IPU_BASE_DEV_SEND_QUEUES + IPU_N_MAX_DEV_SEND_QUEUES)
+/* Recv queues layout */
+#define IPU_BASE_PROXY_RECV_QUEUES 0
+#define IPU_BASE_MSG_RECV_QUEUES \
+	(IPU_BASE_PROXY_RECV_QUEUES + IPU_N_MAX_PROXY_RECV_QUEUES)
+#define IPU_N_MAX_RECV_QUEUES \
+	(IPU_BASE_MSG_RECV_QUEUES + IPU_N_MAX_MSG_RECV_QUEUES)
+
+#define IPU6_N_MAX_SEND_QUEUES \
+	(IPU_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES)
+#define IPU6SE_N_MAX_SEND_QUEUES \
+	(IPU_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES)
+
+/* Max number of supported input pins routed in ISL */
+#define IPU_MAX_IPINS_IN_ISL 2
+
+/* Max number of planes for frame formats supported by the FW */
+#define IPU_PIN_PLANES_MAX 4
+
+/**
+ * enum ipu_fw_isys_resp_type
+ */
+enum ipu_fw_isys_resp_type {
+	IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK,
+	IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY,
+	IPU_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK,
+	IPU_FW_ISYS_RESP_TYPE_FRAME_SOF,
+	IPU_FW_ISYS_RESP_TYPE_FRAME_EOF,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE,
+	IPU_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED,
+	IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED,
+	IPU_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED,
+	IPU_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED,
+	IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY,
+	N_IPU_FW_ISYS_RESP_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_send_type
+ */
+enum ipu_fw_isys_send_type {
+	IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_START,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_STOP,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH,
+	IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE,
+	N_IPU_FW_ISYS_SEND_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_queue_type
+ */
+enum ipu_fw_isys_queue_type {
+	IPU_FW_ISYS_QUEUE_TYPE_PROXY = 0,
+	IPU_FW_ISYS_QUEUE_TYPE_DEV,
+	IPU_FW_ISYS_QUEUE_TYPE_MSG,
+	N_IPU_FW_ISYS_QUEUE_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_stream_source: Specifies a source for a stream
+ */
+enum ipu_fw_isys_stream_source {
+	IPU_FW_ISYS_STREAM_SRC_PORT_0 = 0,
+	IPU_FW_ISYS_STREAM_SRC_PORT_1,
+	IPU_FW_ISYS_STREAM_SRC_PORT_2,
+	IPU_FW_ISYS_STREAM_SRC_PORT_3,
+	IPU_FW_ISYS_STREAM_SRC_PORT_4,
+	IPU_FW_ISYS_STREAM_SRC_PORT_5,
+	IPU_FW_ISYS_STREAM_SRC_PORT_6,
+	IPU_FW_ISYS_STREAM_SRC_PORT_7,
+	IPU_FW_ISYS_STREAM_SRC_PORT_8,
+	IPU_FW_ISYS_STREAM_SRC_PORT_9,
+	IPU_FW_ISYS_STREAM_SRC_PORT_10,
+	IPU_FW_ISYS_STREAM_SRC_PORT_11,
+	IPU_FW_ISYS_STREAM_SRC_PORT_12,
+	IPU_FW_ISYS_STREAM_SRC_PORT_13,
+	IPU_FW_ISYS_STREAM_SRC_PORT_14,
+	IPU_FW_ISYS_STREAM_SRC_PORT_15,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_2,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_3,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_4,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_5,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_6,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_7,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_8,
+	IPU_FW_ISYS_STREAM_SRC_MIPIGEN_9,
+	N_IPU_FW_ISYS_STREAM_SRC
+};
+
+enum ipu_fw_isys_sensor_type {
+	/* non-snoopable to PSYS */
+	IPU_FW_ISYS_VC1_SENSOR_DATA	= 0,
+	/* non-snoopable for PDAF */
+	IPU_FW_ISYS_VC1_SENSOR_PDAF,
+	/* snoopable to CPU */
+	IPU_FW_ISYS_VC0_SENSOR_METADATA,
+	/* snoopable to CPU */
+	IPU_FW_ISYS_VC0_SENSOR_DATA,
+	N_IPU_FW_ISYS_SENSOR_TYPE
+};
+
+enum ipu6se_fw_isys_sensor_info {
+	/* VC1 */
+	IPU6SE_FW_ISYS_SENSOR_DATA_1 = 1,
+	IPU6SE_FW_ISYS_SENSOR_DATA_2 = 2,
+	IPU6SE_FW_ISYS_SENSOR_DATA_3 = 3,
+	IPU6SE_FW_ISYS_SENSOR_PDAF_1 = 4,
+	IPU6SE_FW_ISYS_SENSOR_PDAF_2 = 4,
+	/* VC0 */
+	IPU6SE_FW_ISYS_SENSOR_METADATA = 5,
+	IPU6SE_FW_ISYS_SENSOR_DATA_4 = 6,
+	IPU6SE_FW_ISYS_SENSOR_DATA_5 = 7,
+	IPU6SE_FW_ISYS_SENSOR_DATA_6 = 8,
+	IPU6SE_FW_ISYS_SENSOR_DATA_7 = 9,
+	IPU6SE_FW_ISYS_SENSOR_DATA_8 = 10,
+	IPU6SE_FW_ISYS_SENSOR_DATA_9 = 11,
+	N_IPU6SE_FW_ISYS_SENSOR_INFO,
+	IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_1,
+	IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_3,
+	IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_4,
+	IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_9,
+	IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6SE_FW_ISYS_SENSOR_PDAF_1,
+	IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6SE_FW_ISYS_SENSOR_PDAF_2,
+};
+
+enum ipu6_fw_isys_sensor_info {
+	/* VC1 */
+	IPU6_FW_ISYS_SENSOR_DATA_1 = 1,
+	IPU6_FW_ISYS_SENSOR_DATA_2 = 2,
+	IPU6_FW_ISYS_SENSOR_DATA_3 = 3,
+	IPU6_FW_ISYS_SENSOR_DATA_4 = 4,
+	IPU6_FW_ISYS_SENSOR_DATA_5 = 5,
+	IPU6_FW_ISYS_SENSOR_DATA_6 = 6,
+	IPU6_FW_ISYS_SENSOR_DATA_7 = 7,
+	IPU6_FW_ISYS_SENSOR_DATA_8 = 8,
+	IPU6_FW_ISYS_SENSOR_DATA_9 = 9,
+	IPU6_FW_ISYS_SENSOR_DATA_10 = 10,
+	IPU6_FW_ISYS_SENSOR_PDAF_1 = 11,
+	IPU6_FW_ISYS_SENSOR_PDAF_2 = 12,
+	/* VC0 */
+	IPU6_FW_ISYS_SENSOR_METADATA = 13,
+	IPU6_FW_ISYS_SENSOR_DATA_11 = 14,
+	IPU6_FW_ISYS_SENSOR_DATA_12 = 15,
+	IPU6_FW_ISYS_SENSOR_DATA_13 = 16,
+	IPU6_FW_ISYS_SENSOR_DATA_14 = 17,
+	IPU6_FW_ISYS_SENSOR_DATA_15 = 18,
+	IPU6_FW_ISYS_SENSOR_DATA_16 = 19,
+	N_IPU6_FW_ISYS_SENSOR_INFO,
+	IPU6_FW_ISYS_VC1_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_1,
+	IPU6_FW_ISYS_VC1_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_10,
+	IPU6_FW_ISYS_VC0_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_11,
+	IPU6_FW_ISYS_VC0_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_16,
+	IPU6_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6_FW_ISYS_SENSOR_PDAF_1,
+	IPU6_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6_FW_ISYS_SENSOR_PDAF_2,
+};
+
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_0
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_1
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_2
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_3
+
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU_FW_ISYS_STREAM_SRC_PORT_4
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU_FW_ISYS_STREAM_SRC_PORT_5
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_6
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_7
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_8
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_9
+
+#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0
+#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1
+
+/**
+ * enum ipu_fw_isys_mipi_vc: MIPI csi2 spec
+ * supports up to 4 virtual per physical channel
+ */
+enum ipu_fw_isys_mipi_vc {
+	IPU_FW_ISYS_MIPI_VC_0 = 0,
+	IPU_FW_ISYS_MIPI_VC_1,
+	IPU_FW_ISYS_MIPI_VC_2,
+	IPU_FW_ISYS_MIPI_VC_3,
+	N_IPU_FW_ISYS_MIPI_VC
+};
+
+/**
+ *  Supported Pixel Frame formats. Expandable if needed
+ */
+enum ipu_fw_isys_frame_format_type {
+	IPU_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */
+	IPU_FW_ISYS_FRAME_FORMAT_NV12,	/* 12 bit YUV 420, Y, UV plane */
+	IPU_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+	IPU_FW_ISYS_FRAME_FORMAT_NV12_TILEY, /* 12 bit YUV 420,
+					      * Intel proprietary tiled format,
+					      * TileY
+					      */
+	IPU_FW_ISYS_FRAME_FORMAT_NV16,	/* 16 bit YUV 422, Y, UV plane */
+	IPU_FW_ISYS_FRAME_FORMAT_NV21,	/* 12 bit YUV 420, Y, VU plane */
+	IPU_FW_ISYS_FRAME_FORMAT_NV61,	/* 16 bit YUV 422, Y, VU plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YV12,	/* 12 bit YUV 420, Y, V, U plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YV16,	/* 16 bit YUV 422, Y, V, U plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */
+	IPU_FW_ISYS_FRAME_FORMAT_UYVY,	/* 16 bit YUV 422, UYVY interleaved */
+	IPU_FW_ISYS_FRAME_FORMAT_YUYV,	/* 16 bit YUV 422, YUYV interleaved */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+	IPU_FW_ISYS_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines
+					    * followed by a uvinterleaved line
+					    */
+	IPU_FW_ISYS_FRAME_FORMAT_RAW8,	/* RAW8, 1 plane */
+	IPU_FW_ISYS_FRAME_FORMAT_RAW10,	/* RAW10, 1 plane */
+	IPU_FW_ISYS_FRAME_FORMAT_RAW12,	/* RAW12, 1 plane */
+	IPU_FW_ISYS_FRAME_FORMAT_RAW14,	/* RAW14, 1 plane */
+	IPU_FW_ISYS_FRAME_FORMAT_RAW16,	/* RAW16, 1 plane */
+	IPU_FW_ISYS_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub
+					  * pixels are packed into one 16 bit
+					  * value, 5 bits for R, 6 bits
+					  *   for G and 5 bits for B.
+					  */
+
+	IPU_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888,	/* 24 bit RGB, 3 planes */
+	IPU_FW_ISYS_FRAME_FORMAT_RGBA888,	/* 32 bit RGBA, 1 plane,
+						 * A=Alpha (alpha is unused)
+						 */
+	IPU_FW_ISYS_FRAME_FORMAT_QPLANE6,	/* Internal, for advanced ISP */
+	IPU_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */
+	N_IPU_FW_ISYS_FRAME_FORMAT
+};
+
+/* Temporary for driver compatibility */
+#define IPU_FW_ISYS_FRAME_FORMAT_RAW		(IPU_FW_ISYS_FRAME_FORMAT_RAW16)
+
+enum ipu_fw_isys_mipi_compression_type {
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION = 0,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE2,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE1,
+	IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE2,
+	N_IPU_FW_ISYS_MIPI_COMPRESSION_TYPE,
+};
+
+/**
+ *  Supported MIPI data type. Keep in sync array in ipu_fw_isys_private.c
+ */
+enum ipu_fw_isys_mipi_data_type {
+	/** SYNCHRONIZATION SHORT PACKET DATA TYPES */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02,	/* Optional */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03,	/* Optional */
+	/** Reserved 0x04-0x07 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07,
+	/** GENERIC SHORT PACKET DATA TYPES */
+	/** They are used to keep the timing information for
+	 * the opening/closing of shutters,
+	 *  triggering of flashes and etc.
+	 */
+	/* Generic Short Packet Codes 1 - 8 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F,
+	/** GENERIC LONG PACKET DATA TYPES */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_NULL = 0x10,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11,
+	/* Embedded 8-bit non Image Data */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12,
+	/** Reserved 0x13-0x17 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17,
+	/** YUV DATA TYPES */
+	/* 8 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18,
+	/* 10 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19,
+	/* 8 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A,
+	/** Reserved 0x1B */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B,
+	/* YUV420 8-bit Chroma Shifted Pixel Sampling) */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C,
+	/* YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D,
+	/* UYVY..UVYV, 8 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E,
+	/* UYVY..UVYV, 10 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F,
+	/** RGB DATA TYPES */
+	/* BGR..BGR, 4 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20,
+	/* BGR..BGR, 5 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21,
+	/* BGR..BGR, 5 bits B and R, 6 bits G */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22,
+	/* BGR..BGR, 6 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23,
+	/* BGR..BGR, 8 bits per subpixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24,
+	/** Reserved 0x25-0x27 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27,
+	/** RAW DATA TYPES */
+	/* RAW data, 6 - 14 bits per pixel */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D,
+	/** Reserved 0x2E-2F are used with assigned meaning */
+	/* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E,
+	/* Binary byte stream, which is target at JPEG,
+	 * not specified in CSI-MIPI standard
+	 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F,
+
+	/** USER DEFINED 8-BIT DATA TYPES */
+	/** For example, the data transmitter (e.g. the SoC sensor)
+	 * can keep the JPEG data as
+	 *  the User Defined Data Type 4 and the MPEG data as the
+	 *  User Defined Data Type 7.
+	 */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37,
+	/** Reserved 0x38-0x3F */
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E,
+	IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F,
+
+	/* Keep always last and max value */
+	N_IPU_FW_ISYS_MIPI_DATA_TYPE = 0x40
+};
+
+/** enum ipu_fw_isys_pin_type: output pin buffer types.
+ * Buffers can be queued and de-queued to hand them over between IA and ISYS
+ */
+enum ipu_fw_isys_pin_type {
+	/* Captured as MIPI packets */
+	IPU_FW_ISYS_PIN_TYPE_MIPI = 0,
+	/* Captured through the RAW path */
+	IPU_FW_ISYS_PIN_TYPE_RAW_NS = 1,
+	/* Captured through the SoC path */
+	IPU_FW_ISYS_PIN_TYPE_RAW_SOC = 3,
+	/* Reserved for future use, maybe short packets */
+	IPU_FW_ISYS_PIN_TYPE_METADATA_0 = 4,
+	/* Reserved for future use */
+	IPU_FW_ISYS_PIN_TYPE_METADATA_1 = 5,
+	/* Keep always last and max value */
+	N_IPU_FW_ISYS_PIN_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_mipi_store_mode. Describes if long MIPI packets reach
+ * MIPI SRAM with the long packet header or
+ * if not, then only option is to capture it with pin type MIPI.
+ */
+enum ipu_fw_isys_mipi_store_mode {
+	IPU_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0,
+	IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER,
+	N_IPU_FW_ISYS_MIPI_STORE_MODE
+};
+
+/**
+ * ISYS capture mode and sensor enums
+ * Used for Tobii sensor, if doubt, use default value 0
+ */
+
+enum ipu_fw_isys_capture_mode {
+	IPU_FW_ISYS_CAPTURE_MODE_REGULAR = 0,
+	IPU_FW_ISYS_CAPTURE_MODE_BURST,
+	N_IPU_FW_ISYS_CAPTURE_MODE,
+};
+
+enum ipu_fw_isys_sensor_mode {
+	IPU_FW_ISYS_SENSOR_MODE_NORMAL = 0,
+	IPU_FW_ISYS_SENSOR_MODE_TOBII,
+	N_IPU_FW_ISYS_SENSOR_MODE,
+};
+
+/**
+ * enum ipu_fw_isys_error. Describes the error type detected by the FW
+ */
+enum ipu_fw_isys_error {
+	IPU_FW_ISYS_ERROR_NONE = 0,	/* No details */
+	IPU_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY,	/* enum */
+	IPU_FW_ISYS_ERROR_HW_CONSISTENCY,	/* enum */
+	IPU_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE,	/* enum */
+	IPU_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION,	/* enum */
+	IPU_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION,	/* enum */
+	IPU_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION,	/* enum */
+	IPU_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES,	/* enum */
+	IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO,	/* HW code */
+	IPU_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO,	/* HW code */
+	IPU_FW_ISYS_ERROR_SENSOR_FW_SYNC,	/* enum */
+	IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION,	/* FW code */
+	IPU_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL,	/* FW code */
+	N_IPU_FW_ISYS_ERROR
+};
+
+/**
+ * enum ipu_fw_proxy_error. Describes the error type for
+ * the proxy detected by the FW
+ */
+enum ipu_fw_proxy_error {
+	IPU_FW_PROXY_ERROR_NONE = 0,
+	IPU_FW_PROXY_ERROR_INVALID_WRITE_REGION,
+	IPU_FW_PROXY_ERROR_INVALID_WRITE_OFFSET,
+	N_IPU_FW_PROXY_ERROR
+};
+
+struct ipu_isys;
+
+struct ipu6_fw_isys_buffer_partition_abi {
+	u32 num_gda_pages[IPU6_STREAM_ID_MAX];
+};
+
+struct ipu6_fw_isys_fw_config {
+	struct ipu6_fw_isys_buffer_partition_abi buffer_partition;
+	u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+	u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+};
+
+struct ipu6se_fw_isys_buffer_partition_abi {
+	u32 num_gda_pages[IPU6SE_STREAM_ID_MAX];
+};
+
+struct ipu6se_fw_isys_fw_config {
+	struct ipu6se_fw_isys_buffer_partition_abi buffer_partition;
+	u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+	u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+};
+
+/**
+ * struct ipu_fw_isys_resolution_abi: Generic resolution structure.
+ * @Width
+ * @Height
+ */
+struct ipu_fw_isys_resolution_abi {
+	u32 width;
+	u32 height;
+};
+
+/**
+ * struct ipu_fw_isys_output_pin_payload_abi
+ * @out_buf_id: Points to output pin buffer - buffer identifier
+ * @addr: Points to output pin buffer - CSS Virtual Address
+ * @compress: Request frame compression (1), or  not (0)
+ */
+struct ipu_fw_isys_output_pin_payload_abi {
+	u64 out_buf_id;
+	u32 addr;
+	u32 compress;
+};
+
+/**
+ * struct ipu_fw_isys_output_pin_info_abi
+ * @output_res: output pin resolution
+ * @stride: output stride in Bytes (not valid for statistics)
+ * @watermark_in_lines: pin watermark level in lines
+ * @payload_buf_size: minimum size in Bytes of all buffers that will be
+ *			supplied for capture on this pin
+ * @send_irq: assert if pin event should trigger irq
+ * @pt: pin type -real format "enum ipu_fw_isys_pin_type"
+ * @ft: frame format type -real format "enum ipu_fw_isys_frame_format_type"
+ * @input_pin_id: related input pin id
+ * @reserve_compression: reserve compression resources for pin
+ */
+struct ipu_fw_isys_output_pin_info_abi {
+	struct ipu_fw_isys_resolution_abi output_res;
+	u32 stride;
+	u32 watermark_in_lines;
+	u32 payload_buf_size;
+	u32 ts_offsets[IPU_PIN_PLANES_MAX];
+	u32 s2m_pixel_soc_pixel_remapping;
+	u32 csi_be_soc_pixel_remapping;
+	u8 send_irq;
+	u8 input_pin_id;
+	u8 pt;
+	u8 ft;
+	u8 reserved;
+	u8 reserve_compression;
+	u8 snoopable;
+	u8 error_handling_enable;
+	u32 sensor_type;
+};
+
+/**
+ * struct ipu_fw_isys_param_pin_abi
+ * @param_buf_id: Points to param port buffer - buffer identifier
+ * @addr: Points to param pin buffer - CSS Virtual Address
+ */
+struct ipu_fw_isys_param_pin_abi {
+	u64 param_buf_id;
+	u32 addr;
+};
+
+/**
+ * struct ipu_fw_isys_input_pin_info_abi
+ * @input_res: input resolution
+ * @dt: mipi data type ((enum ipu_fw_isys_mipi_data_type)
+ * @mipi_store_mode: defines if legacy long packet header will be stored or
+ *		     discarded if discarded, output pin pin type for this
+ *		     input pin can only be MIPI
+ *		     (enum ipu_fw_isys_mipi_store_mode)
+ * @bits_per_pix: native bits per pixel
+ * @mapped_dt: actual data type from sensor
+ * @mipi_decompression: defines which compression will be in mipi backend
+
+ * @crop_first_and_last_lines    Control whether to crop the
+ *                              first and last line of the
+ *                              input image. Crop done by HW
+ *                              device.
+ * @capture_mode: mode of capture, regular or burst, default value is regular
+ */
+struct ipu_fw_isys_input_pin_info_abi {
+	struct ipu_fw_isys_resolution_abi input_res;
+	u8 dt;
+	u8 mipi_store_mode;
+	u8 bits_per_pix;
+	u8 mapped_dt;
+	u8 mipi_decompression;
+	u8 crop_first_and_last_lines;
+	u8 capture_mode;
+};
+
+/**
+ * struct ipu_fw_isys_cropping_abi - cropping coordinates
+ */
+struct ipu_fw_isys_cropping_abi {
+	s32 top_offset;
+	s32 left_offset;
+	s32 bottom_offset;
+	s32 right_offset;
+};
+
+/**
+ * struct ipu_fw_isys_stream_cfg_data_abi
+ * ISYS stream configuration data structure
+ * @crop: defines cropping resolution for the
+ * maximum number of input pins which can be cropped,
+ * it is directly mapped to the HW devices
+ * @input_pins: input pin descriptors
+ * @output_pins: output pin descriptors
+ * @compfmt: de-compression setting for User Defined Data
+ * @nof_input_pins: number of input pins
+ * @nof_output_pins: number of output pins
+ * @send_irq_sof_discarded: send irq on discarded frame sof response
+ *		- if '1' it will override the send_resp_sof_discarded
+ *		  and send the response
+ *		- if '0' the send_resp_sof_discarded will determine
+ *		  whether to send the response
+ * @send_irq_eof_discarded: send irq on discarded frame eof response
+ *		- if '1' it will override the send_resp_eof_discarded
+ *		  and send the response
+ *		- if '0' the send_resp_eof_discarded will determine
+ *		  whether to send the response
+ * @send_resp_sof_discarded: send response for discarded frame sof detected,
+ *			     used only when send_irq_sof_discarded is '0'
+ * @send_resp_eof_discarded: send response for discarded frame eof detected,
+ *			     used only when send_irq_eof_discarded is '0'
+ * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1
+ * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel)
+ * @isl_use: indicates whether stream requires ISL and how
+ * @sensor_type: type of connected sensor, tobii or others, default is 0
+ */
+struct ipu_fw_isys_stream_cfg_data_abi {
+	struct ipu_fw_isys_cropping_abi crop;
+	struct ipu_fw_isys_input_pin_info_abi input_pins[IPU_MAX_IPINS];
+	struct ipu_fw_isys_output_pin_info_abi output_pins[IPU_MAX_OPINS];
+	u32 compfmt;
+	u8 nof_input_pins;
+	u8 nof_output_pins;
+	u8 send_irq_sof_discarded;
+	u8 send_irq_eof_discarded;
+	u8 send_resp_sof_discarded;
+	u8 send_resp_eof_discarded;
+	u8 src;
+	u8 vc;
+	u8 isl_use;
+	u8 sensor_type;
+};
+
+/**
+ * struct ipu_fw_isys_frame_buff_set - frame buffer set
+ * @output_pins: output pin addresses
+ * @send_irq_sof: send irq on frame sof response
+ *		- if '1' it will override the send_resp_sof and
+ *		  send the response
+ *		- if '0' the send_resp_sof will determine whether to
+ *		  send the response
+ * @send_irq_eof: send irq on frame eof response
+ *		- if '1' it will override the send_resp_eof and
+ *		  send the response
+ *		- if '0' the send_resp_eof will determine whether to
+ *		  send the response
+ * @send_resp_sof: send response for frame sof detected,
+ *		   used only when send_irq_sof is '0'
+ * @send_resp_eof: send response for frame eof detected,
+ *		   used only when send_irq_eof is '0'
+ * @send_resp_capture_ack: send response for capture ack event
+ * @send_resp_capture_done: send response for capture done event
+ */
+struct ipu_fw_isys_frame_buff_set_abi {
+	struct ipu_fw_isys_output_pin_payload_abi output_pins[IPU_MAX_OPINS];
+	u8 send_irq_sof;
+	u8 send_irq_eof;
+	u8 send_irq_capture_ack;
+	u8 send_irq_capture_done;
+	u8 send_resp_sof;
+	u8 send_resp_eof;
+	u8 send_resp_capture_ack;
+	u8 send_resp_capture_done;
+	u8 reserved;
+};
+
+/**
+ * struct ipu_fw_isys_error_info_abi
+ * @error: error code if something went wrong
+ * @error_details: depending on error code, it may contain additional error info
+ */
+struct ipu_fw_isys_error_info_abi {
+	enum ipu_fw_isys_error error;
+	u32 error_details;
+};
+
+/**
+ * struct ipu_fw_isys_resp_info_comm
+ * @pin: this var is only valid for pin event related responses,
+ *     contains pin addresses
+ * @error_info: error information from the FW
+ * @timestamp: Time information for event if available
+ * @stream_handle: stream id the response corresponds to
+ * @type: response type (enum ipu_fw_isys_resp_type)
+ * @pin_id: pin id that the pin payload corresponds to
+ */
+struct ipu_fw_isys_resp_info_abi {
+	u64 buf_id;
+	struct ipu_fw_isys_output_pin_payload_abi pin;
+	struct ipu_fw_isys_error_info_abi error_info;
+	u32 timestamp[2];
+	u8 stream_handle;
+	u8 type;
+	u8 pin_id;
+	u16 reserved;
+};
+
+/**
+ * struct ipu_fw_isys_proxy_error_info_comm
+ * @proxy_error: error code if something went wrong
+ * @proxy_error_details: depending on error code, it may contain additional
+ *			error info
+ */
+struct ipu_fw_isys_proxy_error_info_abi {
+	enum ipu_fw_proxy_error error;
+	u32 error_details;
+};
+
+struct ipu_fw_isys_proxy_resp_info_abi {
+	u32 request_id;
+	struct ipu_fw_isys_proxy_error_info_abi error_info;
+};
+
+/**
+ * struct ipu_fw_proxy_write_queue_token
+ * @request_id: update id for the specific proxy write request
+ * @region_index: Region id for the proxy write request
+ * @offset: Offset of the write request according to the base address
+ *	    of the region
+ * @value: Value that is requested to be written with the proxy write request
+ */
+struct ipu_fw_proxy_write_queue_token {
+	u32 request_id;
+	u32 region_index;
+	u32 offset;
+	u32 value;
+};
+
+/* From here on type defines not coming from the ISYSAPI interface */
+
+/**
+ * struct ipu_fw_resp_queue_token
+ */
+struct ipu_fw_resp_queue_token {
+	struct ipu_fw_isys_resp_info_abi resp_info;
+};
+
+/**
+ * struct ipu_fw_send_queue_token
+ */
+struct ipu_fw_send_queue_token {
+	u64 buf_handle;
+	u32 payload;
+	u16 send_type;
+	u16 stream_id;
+};
+
+/**
+ * struct ipu_fw_proxy_resp_queue_token
+ */
+struct ipu_fw_proxy_resp_queue_token {
+	struct ipu_fw_isys_proxy_resp_info_abi proxy_resp_info;
+};
+
+/**
+ * struct ipu_fw_proxy_send_queue_token
+ */
+struct ipu_fw_proxy_send_queue_token {
+	u32 request_id;
+	u32 region_index;
+	u32 offset;
+	u32 value;
+};
+
+void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg);
+
+void ipu_fw_isys_dump_stream_cfg(struct device *dev,
+				 struct ipu_fw_isys_stream_cfg_data_abi
+				 *stream_cfg);
+void ipu_fw_isys_dump_frame_buff_set(struct device *dev,
+				     struct ipu_fw_isys_frame_buff_set_abi *buf,
+				     unsigned int outputs);
+int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams);
+int ipu_fw_isys_close(struct ipu_isys *isys);
+int ipu_fw_isys_simple_cmd(struct ipu_isys *isys,
+			   const unsigned int stream_handle,
+			   enum ipu_fw_isys_send_type send_type);
+int ipu_fw_isys_complex_cmd(struct ipu_isys *isys,
+			    const unsigned int stream_handle,
+			    void *cpu_mapped_buf,
+			    dma_addr_t dma_mapped_buf,
+			    size_t size, enum ipu_fw_isys_send_type send_type);
+int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys,
+				 unsigned int req_id,
+				 unsigned int index,
+				 unsigned int offset, u32 value);
+void ipu_fw_isys_cleanup(struct ipu_isys *isys);
+struct ipu_fw_isys_resp_info_abi *
+ipu_fw_isys_get_resp(void *context, unsigned int queue,
+		     struct ipu_fw_isys_resp_info_abi *response);
+void ipu_fw_isys_put_resp(void *context, unsigned int queue);
+#endif
diff --git a/drivers/media/pci/intel/ipu-fw-psys.c b/drivers/media/pci/intel/ipu-fw-psys.c
new file mode 100644
index 000000000000..f7639fd1e72b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-psys.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2016 - 2020 Intel Corporation
+
+#include <linux/delay.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-fw-com.h"
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+
+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd)
+{
+	kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED;
+	return 0;
+}
+
+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_fw_psys_cmd *psys_cmd;
+	int ret = 0;
+
+	psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+	if (!psys_cmd) {
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"%s failed to get token!\n", __func__);
+		kcmd->pg_user = NULL;
+		ret = -ENODATA;
+		goto out;
+	}
+	psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START;
+	psys_cmd->msg = 0;
+	psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+	ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+	return ret;
+}
+
+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_fw_psys_cmd *psys_cmd;
+	int ret = 0;
+
+	/* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */
+	psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 1);
+	if (!psys_cmd) {
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"%s failed to get token!\n", __func__);
+		kcmd->pg_user = NULL;
+		ret = -ENODATA;
+		goto out;
+	}
+	psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND;
+	psys_cmd->msg = 0;
+	psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+	ipu_send_put_token(kcmd->fh->psys->fwcom, 1);
+
+out:
+	return ret;
+}
+
+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_fw_psys_cmd *psys_cmd;
+	int ret = 0;
+
+	psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+	if (!psys_cmd) {
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"%s failed to get token!\n", __func__);
+		kcmd->pg_user = NULL;
+		ret = -ENODATA;
+		goto out;
+	}
+	psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME;
+	psys_cmd->msg = 0;
+	psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+	ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+	return ret;
+}
+
+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_fw_psys_cmd *psys_cmd;
+	int ret = 0;
+
+	psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+	if (!psys_cmd) {
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"%s failed to get token!\n", __func__);
+		kcmd->pg_user = NULL;
+		ret = -ENODATA;
+		goto out;
+	}
+	psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP;
+	psys_cmd->msg = 0;
+	psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+	ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+	return ret;
+}
+
+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd)
+{
+	kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED;
+	return 0;
+}
+
+int ipu_fw_psys_rcv_event(struct ipu_psys *psys,
+			  struct ipu_fw_psys_event *event)
+{
+	void *rcv;
+
+	rcv = ipu_recv_get_token(psys->fwcom, 0);
+	if (!rcv)
+		return 0;
+
+	memcpy(event, rcv, sizeof(*event));
+	ipu_recv_put_token(psys->fwcom, 0);
+	return 1;
+}
+
+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal,
+			     int terminal_idx,
+			     struct ipu_psys_kcmd *kcmd,
+			     u32 buffer, unsigned int size)
+{
+	u32 type;
+	u32 buffer_state;
+
+	type = terminal->terminal_type;
+
+	switch (type) {
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT:
+		buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED;
+		break;
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM:
+	case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN:
+		buffer_state = IPU_FW_PSYS_BUFFER_FULL;
+		break;
+	case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT:
+		buffer_state = IPU_FW_PSYS_BUFFER_EMPTY;
+		break;
+	default:
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"unknown terminal type: 0x%x\n", type);
+		return -EAGAIN;
+	}
+
+	if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN ||
+	    type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) {
+		struct ipu_fw_psys_data_terminal *dterminal =
+		    (struct ipu_fw_psys_data_terminal *)terminal;
+		dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY;
+		dterminal->frame.data_bytes = size;
+		if (!ipu_fw_psys_pg_get_protocol(kcmd))
+			dterminal->frame.data = buffer;
+		else
+			dterminal->frame.data_index = terminal_idx;
+		dterminal->frame.buffer_state = buffer_state;
+	} else {
+		struct ipu_fw_psys_param_terminal *pterminal =
+		    (struct ipu_fw_psys_param_terminal *)terminal;
+		if (!ipu_fw_psys_pg_get_protocol(kcmd))
+			pterminal->param_payload.buffer = buffer;
+		else
+			pterminal->param_payload.terminal_index = terminal_idx;
+	}
+	return 0;
+}
+
+void ipu_fw_psys_pg_dump(struct ipu_psys *psys,
+			 struct ipu_psys_kcmd *kcmd, const char *note)
+{
+	if (ipu_ver == IPU_VER_6SE)
+		ipu6se_fw_psys_pg_dump(psys, kcmd, note);
+	else
+		ipu6_fw_psys_pg_dump(psys, kcmd, note);
+}
+
+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->ID;
+}
+
+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->terminal_count;
+}
+
+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->size;
+}
+
+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd,
+				    dma_addr_t vaddress)
+{
+	kcmd->kpg->pg->ipu_virtual_address = vaddress;
+	return 0;
+}
+
+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd
+							 *kcmd, int index)
+{
+	struct ipu_fw_psys_terminal *terminal;
+	u16 *terminal_offset_table;
+
+	terminal_offset_table =
+	    (uint16_t *)((char *)kcmd->kpg->pg +
+			  kcmd->kpg->pg->terminals_offset);
+	terminal = (struct ipu_fw_psys_terminal *)
+	    ((char *)kcmd->kpg->pg + terminal_offset_table[index]);
+	return terminal;
+}
+
+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token)
+{
+	kcmd->kpg->pg->token = (u64)token;
+}
+
+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->token;
+}
+
+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->protocol_version;
+}
+
+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd,
+				   struct ipu_fw_psys_terminal *terminal,
+				   int terminal_idx, u32 buffer)
+{
+	u32 type;
+	u32 buffer_state;
+	u32 *buffer_ptr;
+	struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set;
+
+	type = terminal->terminal_type;
+
+	switch (type) {
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM:
+	case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT:
+		buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED;
+		break;
+	case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM:
+	case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN:
+	case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN:
+		buffer_state = IPU_FW_PSYS_BUFFER_FULL;
+		break;
+	case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT:
+	case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT:
+		buffer_state = IPU_FW_PSYS_BUFFER_EMPTY;
+		break;
+	default:
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"unknown terminal type: 0x%x\n", type);
+		return -EAGAIN;
+	}
+
+	buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) +
+			      terminal_idx * sizeof(*buffer_ptr));
+
+	*buffer_ptr = buffer;
+
+	if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN ||
+	    type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) {
+		struct ipu_fw_psys_data_terminal *dterminal =
+		    (struct ipu_fw_psys_data_terminal *)terminal;
+		dterminal->frame.buffer_state = buffer_state;
+	}
+
+	return 0;
+}
+
+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd)
+{
+	return (sizeof(struct ipu_fw_psys_buffer_set) +
+		kcmd->kpg->pg->terminal_count * sizeof(u32));
+}
+
+int
+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set,
+				    u32 vaddress)
+{
+	buf_set->ipu_virtual_address = vaddress;
+	return 0;
+}
+
+int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(
+		struct ipu_fw_psys_buffer_set *buf_set,
+		u32 *kernel_enable_bitmap)
+{
+	memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap,
+	       sizeof(buf_set->kernel_enable_bitmap));
+	return 0;
+}
+
+struct ipu_fw_psys_buffer_set *
+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+				  void *kaddr, u32 frame_counter)
+{
+	struct ipu_fw_psys_buffer_set *buffer_set = NULL;
+	unsigned int i;
+
+	buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr;
+
+	/*
+	 * Set base struct members
+	 */
+	buffer_set->ipu_virtual_address = 0;
+	buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address;
+	buffer_set->frame_counter = frame_counter;
+	buffer_set->terminal_count = kcmd->kpg->pg->terminal_count;
+
+	/*
+	 * Initialize adjacent buffer addresses
+	 */
+	for (i = 0; i < buffer_set->terminal_count; i++) {
+		u32 *buffer =
+		    (u32 *)((char *)buffer_set +
+			     sizeof(*buffer_set) + sizeof(u32) * i);
+
+		*buffer = 0;
+	}
+
+	return buffer_set;
+}
+
+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_fw_psys_cmd *psys_cmd;
+	unsigned int queue_id;
+	int ret = 0;
+	unsigned int size;
+
+	if (ipu_ver == IPU_VER_6SE)
+		size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+	else
+		size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+	queue_id = kcmd->kpg->pg->base_queue_id;
+
+	if (queue_id >= size)
+		return -EINVAL;
+
+	psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, queue_id);
+	if (!psys_cmd) {
+		dev_err(&kcmd->fh->psys->adev->dev,
+			"%s failed to get token!\n", __func__);
+		kcmd->pg_user = NULL;
+		return -ENODATA;
+	}
+
+	psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN;
+	psys_cmd->msg = 0;
+	psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address;
+
+	ipu_send_put_token(kcmd->fh->psys->fwcom, queue_id);
+
+	return ret;
+}
+
+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd)
+{
+	return kcmd->kpg->pg->base_queue_id;
+}
+
+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id)
+{
+	kcmd->kpg->pg->base_queue_id = queue_id;
+}
+
+int ipu_fw_psys_open(struct ipu_psys *psys)
+{
+	int retry = IPU_PSYS_OPEN_RETRY, retval;
+
+	retval = ipu_fw_com_open(psys->fwcom);
+	if (retval) {
+		dev_err(&psys->adev->dev, "fw com open failed.\n");
+		return retval;
+	}
+
+	do {
+		usleep_range(IPU_PSYS_OPEN_TIMEOUT_US,
+			     IPU_PSYS_OPEN_TIMEOUT_US + 10);
+		retval = ipu_fw_com_ready(psys->fwcom);
+		if (!retval) {
+			dev_dbg(&psys->adev->dev, "psys port open ready!\n");
+			break;
+		}
+	} while (retry-- > 0);
+
+	if (!retry && retval) {
+		dev_err(&psys->adev->dev, "psys port open ready failed %d\n",
+			retval);
+		ipu_fw_com_close(psys->fwcom);
+		return retval;
+	}
+	return 0;
+}
+
+int ipu_fw_psys_close(struct ipu_psys *psys)
+{
+	int retval;
+
+	retval = ipu_fw_com_close(psys->fwcom);
+	if (retval) {
+		dev_err(&psys->adev->dev, "fw com close failed.\n");
+		return retval;
+	}
+	return retval;
+}
diff --git a/drivers/media/pci/intel/ipu-fw-psys.h b/drivers/media/pci/intel/ipu-fw-psys.h
new file mode 100644
index 000000000000..f8f356104c05
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-fw-psys.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_PSYS_H
+#define IPU_FW_PSYS_H
+
+#include "ipu6-platform-resources.h"
+#include "ipu6se-platform-resources.h"
+
+#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20
+#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40
+
+#define IPU_FW_PSYS_CMD_BITS 64
+#define IPU_FW_PSYS_EVENT_BITS 128
+
+enum {
+	IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0,
+	IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1,
+	IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2,
+	IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3,
+	IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4,
+	IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12,
+	IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13
+};
+
+enum {
+	IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID,
+	IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID
+};
+
+enum {
+	IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0,
+	IPU_FW_PSYS_PROCESS_GROUP_CREATED,
+	IPU_FW_PSYS_PROCESS_GROUP_READY,
+	IPU_FW_PSYS_PROCESS_GROUP_BLOCKED,
+	IPU_FW_PSYS_PROCESS_GROUP_STARTED,
+	IPU_FW_PSYS_PROCESS_GROUP_RUNNING,
+	IPU_FW_PSYS_PROCESS_GROUP_STALLED,
+	IPU_FW_PSYS_PROCESS_GROUP_STOPPED,
+	IPU_FW_PSYS_N_PROCESS_GROUP_STATES
+};
+
+enum {
+	IPU_FW_PSYS_CONNECTION_MEMORY = 0,
+	IPU_FW_PSYS_CONNECTION_MEMORY_STREAM,
+	IPU_FW_PSYS_CONNECTION_STREAM,
+	IPU_FW_PSYS_N_CONNECTION_TYPES
+};
+
+enum {
+	IPU_FW_PSYS_BUFFER_NULL = 0,
+	IPU_FW_PSYS_BUFFER_UNDEFINED,
+	IPU_FW_PSYS_BUFFER_EMPTY,
+	IPU_FW_PSYS_BUFFER_NONEMPTY,
+	IPU_FW_PSYS_BUFFER_FULL,
+	IPU_FW_PSYS_N_BUFFER_STATES
+};
+
+enum {
+	IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0,
+	IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN,
+	IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT,
+	IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN,
+	IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT,
+	IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM,
+	IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT,
+	IPU_FW_PSYS_N_TERMINAL_TYPES
+};
+
+enum {
+	IPU_FW_PSYS_COL_DIMENSION = 0,
+	IPU_FW_PSYS_ROW_DIMENSION = 1,
+	IPU_FW_PSYS_N_DATA_DIMENSION = 2
+};
+
+enum {
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_START,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT,
+	IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET,
+	IPU_FW_PSYS_N_PROCESS_GROUP_CMDS
+};
+
+enum {
+	IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0,
+	IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG,
+	IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS
+};
+
+struct __packed ipu_fw_psys_process_group {
+	u64 token;
+	u64 private_token;
+	u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS];
+	u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS];
+	u32 size;
+	u32 psys_server_init_cycles;
+	u32 pg_load_start_ts;
+	u32 pg_load_cycles;
+	u32 pg_init_cycles;
+	u32 pg_processing_cycles;
+	u32 pg_next_frame_init_cycles;
+	u32 pg_complete_cycles;
+	u32 ID;
+	u32 state;
+	u32 ipu_virtual_address;
+	u32 resource_bitmap;
+	u16 fragment_count;
+	u16 fragment_state;
+	u16 fragment_limit;
+	u16 processes_offset;
+	u16 terminals_offset;
+	u8 process_count;
+	u8 terminal_count;
+	u8 subgraph_count;
+	u8 protocol_version;
+	u8 base_queue_id;
+	u8 num_queues;
+	u8 mask_irq;
+	u8 error_handling_enable;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT];
+};
+
+struct ipu_fw_psys_srv_init {
+	void *host_ddr_pkg_dir;
+	u32 ddr_pkg_dir_address;
+	u32 pkg_dir_size;
+
+	u32 icache_prefetch_sp;
+	u32 icache_prefetch_isp;
+};
+
+struct __packed ipu_fw_psys_cmd {
+	u16 command;
+	u16 msg;
+	u32 context_handle;
+};
+
+struct __packed ipu_fw_psys_event {
+	u16 status;
+	u16 command;
+	u32 context_handle;
+	u64 token;
+};
+
+struct ipu_fw_psys_terminal {
+	u32 terminal_type;
+	s16 parent_offset;
+	u16 size;
+	u16 tm_index;
+	u8 ID;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_param_payload {
+	u64 host_buffer;
+	u32 buffer;
+	u32 terminal_index;
+};
+
+struct ipu_fw_psys_param_terminal {
+	struct ipu_fw_psys_terminal base;
+	struct ipu_fw_psys_param_payload param_payload;
+	u16 param_section_desc_offset;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_frame {
+	u32 buffer_state;
+	u32 access_type;
+	u32 pointer_state;
+	u32 access_scope;
+	u32 data;
+	u32 data_index;
+	u32 data_bytes;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT];
+};
+
+struct ipu_fw_psys_frame_descriptor {
+	u32 frame_format_type;
+	u32 plane_count;
+	u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES];
+	u32 stride[1];
+	u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES];
+	u16 dimension[2];
+	u16 size;
+	u8 bpp;
+	u8 bpe;
+	u8 is_compressed;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT];
+};
+
+struct ipu_fw_psys_stream {
+	u64 dummy;
+};
+
+struct ipu_fw_psys_data_terminal {
+	struct ipu_fw_psys_terminal base;
+	struct ipu_fw_psys_frame_descriptor frame_descriptor;
+	struct ipu_fw_psys_frame frame;
+	struct ipu_fw_psys_stream stream;
+	u32 reserved;
+	u32 connection_type;
+	u16 fragment_descriptor_offset;
+	u8 kernel_id;
+	u8 subgraph_id;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_buffer_set {
+	u64 token;
+	u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+	u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+	u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+	u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS];
+	u32 ipu_virtual_address;
+	u32 process_group_handle;
+	u16 terminal_count;
+	u8 frame_counter;
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT];
+};
+
+struct ipu_fw_psys_program_group_manifest {
+	u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+	u32 ID;
+	u16 program_manifest_offset;
+	u16 terminal_manifest_offset;
+	u16 private_data_offset;
+	u16 rbm_manifest_offset;
+	u16 size;
+	u8 alignment;
+	u8 kernel_count;
+	u8 program_count;
+	u8 terminal_count;
+	u8 subgraph_count;
+	u8 reserved[5];
+};
+
+struct ipu_fw_generic_program_manifest {
+	u16 *dev_chn_size;
+	u16 *dev_chn_offset;
+	u16 *ext_mem_size;
+	u16 *ext_mem_offset;
+	u8 cell_id;
+	u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+	u8 cell_type_id;
+	u8 *is_dfm_relocatable;
+	u32 *dfm_port_bitmap;
+	u32 *dfm_active_port_bitmap;
+};
+
+struct ipu_fw_resource_definitions {
+	u32 num_cells;
+	u32 num_cells_type;
+	const u8 *cells;
+	u32 num_dev_channels;
+	const u16 *dev_channels;
+
+	u32 num_ext_mem_types;
+	u32 num_ext_mem_ids;
+	const u16 *ext_mem_ids;
+
+	u32 num_dfm_ids;
+	const u16 *dfms;
+
+	u32 cell_mem_row;
+	const u8 *cell_mem;
+};
+
+struct ipu6_psys_hw_res_variant {
+	unsigned int queue_num;
+	unsigned int cell_num;
+	int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset,
+				u16 value);
+	int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr,
+				   u16 id, u32 bitmap, u32 active_bitmap);
+	int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr,
+				u16 type_id, u16 mem_id, u16 offset);
+	int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm,
+			       const struct ipu_fw_psys_program_group_manifest
+			       *pg_manifest,
+			       struct ipu_fw_psys_process *process);
+};
+struct ipu_psys_kcmd;
+struct ipu_psys;
+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_rcv_event(struct ipu_psys *psys,
+			  struct ipu_fw_psys_event *event);
+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal,
+			     int terminal_idx,
+			     struct ipu_psys_kcmd *kcmd,
+			     u32 buffer, unsigned int size);
+void ipu_fw_psys_pg_dump(struct ipu_psys *psys,
+			 struct ipu_psys_kcmd *kcmd, const char *note);
+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd,
+				    dma_addr_t vaddress);
+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd
+							 *kcmd, int index);
+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token);
+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd,
+				   struct ipu_fw_psys_terminal *terminal,
+				   int terminal_idx, u32 buffer);
+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd);
+int
+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set,
+				    u32 vaddress);
+int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(
+	struct ipu_fw_psys_buffer_set *buf_set, u32 *kernel_enable_bitmap);
+struct ipu_fw_psys_buffer_set *
+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+				  void *kaddr, u32 frame_counter);
+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd);
+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd);
+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id);
+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_open(struct ipu_psys *psys);
+int ipu_fw_psys_close(struct ipu_psys *psys);
+
+/* common resource interface for both abi and api mode */
+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index,
+				    u8 value);
+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index);
+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr);
+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				 u16 value);
+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				    u16 type_id, u16 mem_id, u16 offset);
+int ipu_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process);
+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				  u16 value);
+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				     u16 id, u32 bitmap,
+				     u32 active_bitmap);
+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				     u16 type_id, u16 mem_id, u16 offset);
+int ipu6_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process);
+int ipu6se_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				    u16 value);
+int ipu6se_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				       u16 id, u32 bitmap,
+				       u32 active_bitmap);
+int ipu6se_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				       u16 type_id, u16 mem_id, u16 offset);
+int ipu6se_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process);
+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
+			  struct ipu_psys_kcmd *kcmd, const char *note);
+void ipu6se_fw_psys_pg_dump(struct ipu_psys *psys,
+			    struct ipu_psys_kcmd *kcmd, const char *note);
+void ipu6_psys_hw_res_variant_init(void);
+#endif /* IPU_FW_PSYS_H */
diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c
new file mode 100644
index 000000000000..db90680e736d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_soc_supported_codes_pad[] = {
+	MEDIA_BUS_FMT_Y10_1X10,
+	MEDIA_BUS_FMT_RGB565_1X16,
+	MEDIA_BUS_FMT_RGB888_1X24,
+	MEDIA_BUS_FMT_UYVY8_1X16,
+	MEDIA_BUS_FMT_YUYV8_1X16,
+	MEDIA_BUS_FMT_SBGGR12_1X12,
+	MEDIA_BUS_FMT_SGBRG12_1X12,
+	MEDIA_BUS_FMT_SGRBG12_1X12,
+	MEDIA_BUS_FMT_SRGGB12_1X12,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	0,
+};
+
+static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS];
+
+static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = {
+	.open = ipu_isys_subdev_open,
+	.close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_core_ops csi2_be_soc_sd_core_ops = {
+};
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_be_soc_sd_video_ops = {
+	.s_stream = set_stream,
+};
+
+static int
+__subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+		       struct v4l2_subdev_format *source_fmt,
+		       struct v4l2_subdev_format *sink_fmt)
+{
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+
+	ip->csi2_be_soc = to_ipu_isys_csi2_be_soc(sd);
+	return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static int
+ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_selection *sel)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+
+	if (sel->target == V4L2_SEL_TGT_CROP &&
+	    pad->flags & MEDIA_PAD_FL_SOURCE &&
+	    asd->valid_tgts[sel->pad].crop) {
+		struct v4l2_rect *r;
+		enum isys_subdev_prop_tgt tgt =
+		    IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP;
+
+		r = __ipu_isys_get_selection(sd, cfg, sel->target,
+					     0, sel->which);
+
+		/* Cropping is not supported by SoC BE.
+		 * Only horizontal padding is allowed.
+		 */
+		sel->r.top = r->top;
+		sel->r.left = r->left;
+		sel->r.width = clamp(sel->r.width, r->width,
+				     IPU_ISYS_MAX_WIDTH);
+		sel->r.height = r->height;
+
+		*__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad,
+					  sel->which) = sel->r;
+		ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r,
+					      tgt, sel->pad, sel->which);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static const struct v4l2_subdev_pad_ops csi2_be_soc_sd_pad_ops = {
+	.link_validate = __subdev_link_validate,
+	.get_fmt = ipu_isys_subdev_get_ffmt,
+	.set_fmt = ipu_isys_subdev_set_ffmt,
+	.get_selection = ipu_isys_subdev_get_sel,
+	.set_selection = ipu_isys_csi2_be_soc_set_sel,
+	.enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_be_soc_sd_ops = {
+	.core = &csi2_be_soc_sd_core_ops,
+	.video = &csi2_be_soc_sd_video_ops,
+	.pad = &csi2_be_soc_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_be_soc_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_format *fmt)
+{
+	struct v4l2_mbus_framefmt *ffmt =
+		__ipu_isys_get_ffmt(sd, cfg, fmt->pad,
+				    fmt->which);
+
+	if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SINK) {
+		if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+			fmt->format.field = V4L2_FIELD_NONE;
+		*ffmt = fmt->format;
+
+		ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format,
+					      NULL,
+					      IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+					      fmt->pad, fmt->which);
+	} else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
+		struct v4l2_mbus_framefmt *sink_ffmt;
+		struct v4l2_rect *r;
+
+		sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, 0, fmt->which);
+		r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP,
+					     fmt->pad, fmt->which);
+
+		ffmt->width = r->width;
+		ffmt->height = r->height;
+		ffmt->code = sink_ffmt->code;
+		ffmt->field = sink_ffmt->field;
+
+	}
+}
+
+void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be_soc)
+{
+	v4l2_device_unregister_subdev(&csi2_be_soc->asd.sd);
+	ipu_isys_subdev_cleanup(&csi2_be_soc->asd);
+	ipu_isys_video_cleanup(&csi2_be_soc->av);
+}
+
+int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc,
+			      struct ipu_isys *isys, int index)
+{
+	struct v4l2_subdev_format fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.pad = CSI2_BE_SOC_PAD_SINK,
+		.format = {
+			   .width = 4096,
+			   .height = 3072,
+			   },
+	};
+	int rval, i;
+
+	csi2_be_soc->asd.sd.entity.ops = &csi2_be_soc_entity_ops;
+	csi2_be_soc->asd.isys = isys;
+
+	rval = ipu_isys_subdev_init(&csi2_be_soc->asd,
+				    &csi2_be_soc_sd_ops, 0,
+				    NR_OF_CSI2_BE_SOC_PADS,
+				    NR_OF_CSI2_BE_SOC_SOURCE_PADS,
+				    NR_OF_CSI2_BE_SOC_SINK_PADS, 0);
+	if (rval)
+		goto fail;
+
+	csi2_be_soc->asd.pad[CSI2_BE_SOC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	csi2_be_soc->asd.pad[CSI2_BE_SOC_PAD_SOURCE].flags =
+		MEDIA_PAD_FL_SOURCE;
+	csi2_be_soc->asd.valid_tgts[CSI2_BE_SOC_PAD_SOURCE].crop = true;
+
+	for (i = 0; i < NR_OF_CSI2_BE_SOC_PADS; i++)
+		csi2_be_soc_supported_codes[i] =
+			csi2_be_soc_supported_codes_pad;
+	csi2_be_soc->asd.supported_codes = csi2_be_soc_supported_codes;
+	csi2_be_soc->asd.be_mode = IPU_BE_SOC;
+	csi2_be_soc->asd.isl_mode = IPU_ISL_OFF;
+	csi2_be_soc->asd.set_ffmt = csi2_be_soc_set_ffmt;
+
+	fmt.pad = CSI2_BE_SOC_PAD_SINK;
+	ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt);
+	fmt.pad = CSI2_BE_SOC_PAD_SOURCE;
+	ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt);
+	csi2_be_soc->asd.sd.internal_ops = &csi2_be_soc_sd_internal_ops;
+
+	snprintf(csi2_be_soc->asd.sd.name, sizeof(csi2_be_soc->asd.sd.name),
+		 IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC %d", index);
+	v4l2_set_subdevdata(&csi2_be_soc->asd.sd, &csi2_be_soc->asd);
+
+	rval = v4l2_device_register_subdev(&isys->v4l2_dev,
+					   &csi2_be_soc->asd.sd);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+		goto fail;
+	}
+
+	snprintf(csi2_be_soc->av.vdev.name, sizeof(csi2_be_soc->av.vdev.name),
+		 IPU_ISYS_ENTITY_PREFIX " BE SOC capture %d", index);
+
+	/*
+	 * Pin type could be overwritten for YUV422 to I420 case, at
+	 * set_format phase
+	 */
+	csi2_be_soc->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_SOC;
+	csi2_be_soc->av.isys = isys;
+	csi2_be_soc->av.pfmts = ipu_isys_pfmts_be_soc;
+	csi2_be_soc->av.try_fmt_vid_mplane =
+		ipu_isys_video_try_fmt_vid_mplane_default;
+	csi2_be_soc->av.prepare_fw_stream =
+		ipu_isys_prepare_fw_cfg_default;
+	csi2_be_soc->av.aq.buf_prepare = ipu_isys_buf_prepare;
+	csi2_be_soc->av.aq.fill_frame_buff_set_pin =
+		ipu_isys_buffer_to_fw_frame_buff_pin;
+	csi2_be_soc->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+	csi2_be_soc->av.aq.vbq.buf_struct_size =
+		sizeof(struct ipu_isys_video_buffer);
+
+	rval = ipu_isys_video_init(&csi2_be_soc->av,
+				   &csi2_be_soc->asd.sd.entity,
+				   CSI2_BE_SOC_PAD_SOURCE,
+				   MEDIA_PAD_FL_SINK,
+				   MEDIA_LNK_FL_DYNAMIC);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't init video node\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipu_isys_csi2_be_soc_cleanup(csi2_be_soc);
+
+	return rval;
+}
diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.c b/drivers/media/pci/intel/ipu-isys-csi2-be.c
new file mode 100644
index 000000000000..9aae37af8011
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-csi2-be.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_supported_codes_pad[] = {
+	MEDIA_BUS_FMT_SBGGR12_1X12,
+	MEDIA_BUS_FMT_SGBRG12_1X12,
+	MEDIA_BUS_FMT_SGRBG12_1X12,
+	MEDIA_BUS_FMT_SRGGB12_1X12,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	0,
+};
+
+static const u32 *csi2_be_supported_codes[] = {
+	csi2_be_supported_codes_pad,
+	csi2_be_supported_codes_pad,
+};
+
+static struct v4l2_subdev_internal_ops csi2_be_sd_internal_ops = {
+	.open = ipu_isys_subdev_open,
+	.close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_core_ops csi2_be_sd_core_ops = {
+};
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_be_sd_video_ops = {
+	.s_stream = set_stream,
+};
+
+static int __subdev_link_validate(struct v4l2_subdev *sd,
+				  struct media_link *link,
+				  struct v4l2_subdev_format *source_fmt,
+				  struct v4l2_subdev_format *sink_fmt)
+{
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+
+	ip->csi2_be = to_ipu_isys_csi2_be(sd);
+	return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static int get_supported_code_index(u32 code)
+{
+	int i;
+
+	for (i = 0; csi2_be_supported_codes_pad[i]; i++) {
+		if (csi2_be_supported_codes_pad[i] == code)
+			return i;
+	}
+	return -EINVAL;
+}
+
+static int ipu_isys_csi2_be_set_sel(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_selection *sel)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+
+	if (sel->target == V4L2_SEL_TGT_CROP &&
+	    pad->flags & MEDIA_PAD_FL_SOURCE &&
+	    asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop) {
+		struct v4l2_mbus_framefmt *ffmt =
+			__ipu_isys_get_ffmt(sd, cfg, sel->pad, sel->which);
+		struct v4l2_rect *r = __ipu_isys_get_selection
+		    (sd, cfg, sel->target, CSI2_BE_PAD_SINK, sel->which);
+
+		if (get_supported_code_index(ffmt->code) < 0) {
+			/* Non-bayer formats can't be single line cropped */
+			sel->r.left &= ~1;
+			sel->r.top &= ~1;
+
+			/* Non-bayer formats can't pe padded at all */
+			sel->r.width = clamp(sel->r.width,
+					     IPU_ISYS_MIN_WIDTH, r->width);
+		} else {
+			sel->r.width = clamp(sel->r.width,
+					     IPU_ISYS_MIN_WIDTH,
+					     IPU_ISYS_MAX_WIDTH);
+		}
+
+		/*
+		 * Vertical padding is not supported, height is
+		 * restricted by sink pad resolution.
+		 */
+		sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT,
+				      r->height);
+		*__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad,
+					  sel->which) = sel->r;
+		ipu_isys_subdev_fmt_propagate
+		    (sd, cfg, NULL, &sel->r,
+		     IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP,
+		     sel->pad, sel->which);
+		return 0;
+	}
+	return ipu_isys_subdev_set_sel(sd, cfg, sel);
+}
+
+static const struct v4l2_subdev_pad_ops csi2_be_sd_pad_ops = {
+	.link_validate = __subdev_link_validate,
+	.get_fmt = ipu_isys_subdev_get_ffmt,
+	.set_fmt = ipu_isys_subdev_set_ffmt,
+	.get_selection = ipu_isys_subdev_get_sel,
+	.set_selection = ipu_isys_csi2_be_set_sel,
+	.enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_be_sd_ops = {
+	.core = &csi2_be_sd_core_ops,
+	.video = &csi2_be_sd_video_ops,
+	.pad = &csi2_be_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_be_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+static void csi2_be_set_ffmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+	struct v4l2_mbus_framefmt *ffmt =
+		__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which);
+
+	switch (fmt->pad) {
+	case CSI2_BE_PAD_SINK:
+		if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+			fmt->format.field = V4L2_FIELD_NONE;
+		*ffmt = fmt->format;
+
+		ipu_isys_subdev_fmt_propagate
+		    (sd, cfg, &fmt->format, NULL,
+		     IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which);
+		return;
+	case CSI2_BE_PAD_SOURCE: {
+		struct v4l2_mbus_framefmt *sink_ffmt =
+			__ipu_isys_get_ffmt(sd, cfg, CSI2_BE_PAD_SINK,
+					    fmt->which);
+		struct v4l2_rect *r =
+			__ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP,
+						 CSI2_BE_PAD_SOURCE,
+						 fmt->which);
+		struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+		u32 code = sink_ffmt->code;
+		int idx = get_supported_code_index(code);
+
+		if (asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop && idx >= 0) {
+			int crop_info = 0;
+
+			if (r->top & 1)
+				crop_info |= CSI2_BE_CROP_VER;
+			if (r->left & 1)
+				crop_info |= CSI2_BE_CROP_HOR;
+			code = csi2_be_supported_codes_pad
+				[((idx & CSI2_BE_CROP_MASK) ^ crop_info)
+				+ (idx & ~CSI2_BE_CROP_MASK)];
+		}
+		ffmt->width = r->width;
+		ffmt->height = r->height;
+		ffmt->code = code;
+		ffmt->field = sink_ffmt->field;
+		return;
+	}
+	default:
+		dev_err(&csi2->isys->adev->dev, "Unknown pad type\n");
+		WARN_ON(1);
+	}
+}
+
+void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be)
+{
+	v4l2_device_unregister_subdev(&csi2_be->asd.sd);
+	ipu_isys_subdev_cleanup(&csi2_be->asd);
+	ipu_isys_video_cleanup(&csi2_be->av);
+}
+
+int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be,
+			  struct ipu_isys *isys)
+{
+	struct v4l2_subdev_format fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.pad = CSI2_BE_PAD_SINK,
+		.format = {
+			   .width = 4096,
+			   .height = 3072,
+			  },
+	};
+	struct v4l2_subdev_selection sel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.pad = CSI2_BE_PAD_SOURCE,
+		.target = V4L2_SEL_TGT_CROP,
+		.r = {
+		      .width = fmt.format.width,
+		      .height = fmt.format.height,
+		     },
+	};
+	int rval;
+
+	csi2_be->asd.sd.entity.ops = &csi2_be_entity_ops;
+	csi2_be->asd.isys = isys;
+
+	rval = ipu_isys_subdev_init(&csi2_be->asd, &csi2_be_sd_ops, 0,
+				    NR_OF_CSI2_BE_PADS,
+				    NR_OF_CSI2_BE_SOURCE_PADS,
+				    NR_OF_CSI2_BE_SINK_PADS, 0);
+	if (rval)
+		goto fail;
+
+	csi2_be->asd.pad[CSI2_BE_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+	    | MEDIA_PAD_FL_MUST_CONNECT;
+	csi2_be->asd.pad[CSI2_BE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	csi2_be->asd.valid_tgts[CSI2_BE_PAD_SOURCE].crop = true;
+	csi2_be->asd.set_ffmt = csi2_be_set_ffmt;
+
+	BUILD_BUG_ON(ARRAY_SIZE(csi2_be_supported_codes) != NR_OF_CSI2_BE_PADS);
+	csi2_be->asd.supported_codes = csi2_be_supported_codes;
+	csi2_be->asd.be_mode = IPU_BE_RAW;
+	csi2_be->asd.isl_mode = IPU_ISL_CSI2_BE;
+
+	ipu_isys_subdev_set_ffmt(&csi2_be->asd.sd, NULL, &fmt);
+	ipu_isys_csi2_be_set_sel(&csi2_be->asd.sd, NULL, &sel);
+
+	csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops;
+	snprintf(csi2_be->asd.sd.name, sizeof(csi2_be->asd.sd.name),
+		 IPU_ISYS_ENTITY_PREFIX " CSI2 BE");
+	snprintf(csi2_be->av.vdev.name, sizeof(csi2_be->av.vdev.name),
+		 IPU_ISYS_ENTITY_PREFIX " CSI2 BE capture");
+	csi2_be->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS;
+	v4l2_set_subdevdata(&csi2_be->asd.sd, &csi2_be->asd);
+	rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be->asd.sd);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+		goto fail;
+	}
+
+	csi2_be->av.isys = isys;
+	csi2_be->av.pfmts = ipu_isys_pfmts;
+	csi2_be->av.try_fmt_vid_mplane =
+	    ipu_isys_video_try_fmt_vid_mplane_default;
+	csi2_be->av.prepare_fw_stream =
+	    ipu_isys_prepare_fw_cfg_default;
+	csi2_be->av.aq.buf_prepare = ipu_isys_buf_prepare;
+	csi2_be->av.aq.fill_frame_buff_set_pin =
+	    ipu_isys_buffer_to_fw_frame_buff_pin;
+	csi2_be->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+	csi2_be->av.aq.vbq.buf_struct_size =
+	    sizeof(struct ipu_isys_video_buffer);
+
+	rval = ipu_isys_video_init(&csi2_be->av, &csi2_be->asd.sd.entity,
+				   CSI2_BE_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't init video node\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipu_isys_csi2_be_cleanup(csi2_be);
+
+	return rval;
+}
diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.h b/drivers/media/pci/intel/ipu-isys-csi2-be.h
new file mode 100644
index 000000000000..b90e55446948
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-csi2-be.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_CSI2_BE_H
+#define IPU_ISYS_CSI2_BE_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-queue.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys.h"
+
+struct ipu_isys_csi2_be_pdata;
+struct ipu_isys;
+
+#define CSI2_BE_PAD_SINK		0
+#define CSI2_BE_PAD_SOURCE		1
+
+#define NR_OF_CSI2_BE_PADS		2
+#define NR_OF_CSI2_BE_SOURCE_PADS	1
+#define NR_OF_CSI2_BE_SINK_PADS		1
+
+#define NR_OF_CSI2_BE_SOC_SOURCE_PADS	1
+#define NR_OF_CSI2_BE_SOC_SINK_PADS	1
+#define CSI2_BE_SOC_PAD_SINK 0
+#define CSI2_BE_SOC_PAD_SOURCE 1
+#define NR_OF_CSI2_BE_SOC_PADS \
+	(NR_OF_CSI2_BE_SOC_SOURCE_PADS + NR_OF_CSI2_BE_SOC_SINK_PADS)
+
+#define CSI2_BE_CROP_HOR	BIT(0)
+#define CSI2_BE_CROP_VER	BIT(1)
+#define CSI2_BE_CROP_MASK	(CSI2_BE_CROP_VER | CSI2_BE_CROP_HOR)
+
+/*
+ * struct ipu_isys_csi2_be
+ */
+struct ipu_isys_csi2_be {
+	struct ipu_isys_csi2_be_pdata *pdata;
+	struct ipu_isys_subdev asd;
+	struct ipu_isys_video av;
+};
+
+struct ipu_isys_csi2_be_soc {
+	struct ipu_isys_csi2_be_pdata *pdata;
+	struct ipu_isys_subdev asd;
+	struct ipu_isys_video av;
+};
+
+#define to_ipu_isys_csi2_be(sd)	\
+	container_of(to_ipu_isys_subdev(sd), \
+	struct ipu_isys_csi2_be, asd)
+
+#define to_ipu_isys_csi2_be_soc(sd)	\
+	container_of(to_ipu_isys_subdev(sd), \
+	struct ipu_isys_csi2_be_soc, asd)
+
+int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be,
+			  struct ipu_isys *isys);
+int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc,
+			      struct ipu_isys *isys, int index);
+void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be);
+void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be);
+
+#endif /* IPU_ISYS_CSI2_BE_H */
diff --git a/drivers/media/pci/intel/ipu-isys-csi2.c b/drivers/media/pci/intel/ipu-isys-csi2.c
new file mode 100644
index 000000000000..4838537b0c40
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-csi2.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/version.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-regs.h"
+
+static const u32 csi2_supported_codes_pad_sink[] = {
+	MEDIA_BUS_FMT_Y10_1X10,
+	MEDIA_BUS_FMT_RGB565_1X16,
+	MEDIA_BUS_FMT_RGB888_1X24,
+	MEDIA_BUS_FMT_UYVY8_1X16,
+	MEDIA_BUS_FMT_YUYV8_1X16,
+	MEDIA_BUS_FMT_YUYV10_1X20,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+	MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+	MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+	MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+	MEDIA_BUS_FMT_SBGGR12_1X12,
+	MEDIA_BUS_FMT_SGBRG12_1X12,
+	MEDIA_BUS_FMT_SGRBG12_1X12,
+	MEDIA_BUS_FMT_SRGGB12_1X12,
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	0,
+};
+
+static const u32 csi2_supported_codes_pad_source[] = {
+	MEDIA_BUS_FMT_Y10_1X10,
+	MEDIA_BUS_FMT_RGB565_1X16,
+	MEDIA_BUS_FMT_RGB888_1X24,
+	MEDIA_BUS_FMT_UYVY8_1X16,
+	MEDIA_BUS_FMT_YUYV8_1X16,
+	MEDIA_BUS_FMT_YUYV10_1X20,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	MEDIA_BUS_FMT_SBGGR12_1X12,
+	MEDIA_BUS_FMT_SGBRG12_1X12,
+	MEDIA_BUS_FMT_SGRBG12_1X12,
+	MEDIA_BUS_FMT_SRGGB12_1X12,
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	0,
+};
+
+static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS];
+
+static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = {
+	.open = ipu_isys_subdev_open,
+	.close = ipu_isys_subdev_close,
+};
+
+int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq)
+{
+	struct ipu_isys_pipeline *pipe = container_of(csi2->asd.sd.entity.pipe,
+						      struct ipu_isys_pipeline,
+						      pipe);
+	struct v4l2_subdev *ext_sd =
+	    media_entity_to_v4l2_subdev(pipe->external->entity);
+	struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, };
+	struct v4l2_ext_controls cs = {.count = 1,
+		.controls = &c,
+	};
+	struct v4l2_querymenu qm = {.id = c.id, };
+	int rval;
+
+	if (!ext_sd) {
+		WARN_ON(1);
+		return -ENODEV;
+	}
+	rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler,
+				ext_sd->devnode,
+				ext_sd->v4l2_dev->mdev,
+				&cs);
+	if (rval) {
+		dev_info(&csi2->isys->adev->dev, "can't get link frequency\n");
+		return rval;
+	}
+
+	qm.index = c.value;
+
+	rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm);
+	if (rval) {
+		dev_info(&csi2->isys->adev->dev, "can't get menu item\n");
+		return rval;
+	}
+
+	dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__,
+		qm.value);
+
+	if (!qm.value)
+		return -EINVAL;
+	*link_freq = qm.value;
+	return 0;
+}
+
+static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+			   struct v4l2_event_subscription *sub)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+
+	dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n",
+		sub->type, sub->id);
+
+	switch (sub->type) {
+	case V4L2_EVENT_FRAME_SYNC:
+		return v4l2_event_subscribe(fh, sub, 10, NULL);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
+	.subscribe_event = subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+/*
+ * The input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ *	register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ *	UI = 1 / (2 * F) in seconds
+ *	COUNT_ACC = counter accuracy in seconds
+ *	For IPU4,  COUNT_ACC = 0.125 ns
+ *
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ *				       Minimum     Maximum
+ * Clock lane			       A     B     A     B
+ * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
+ * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
+ *
+ * We use the minimum values of both A and B.
+ */
+
+#define DIV_SHIFT	8
+
+static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv)
+{
+	return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT)
+			     / (int32_t)(link_freq >> DIV_SHIFT));
+}
+
+static int
+ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2,
+			  struct ipu_isys_csi2_timing *timing, uint32_t accinv)
+{
+	__s64 link_freq;
+	int rval;
+
+	rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq);
+	if (rval)
+		return rval;
+
+	timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B,
+				      link_freq, accinv);
+	timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B,
+				      link_freq, accinv);
+	dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen);
+	dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle);
+
+	timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B,
+				      link_freq, accinv);
+	timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B,
+				      link_freq, accinv);
+	dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen);
+	dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle);
+
+	return 0;
+}
+
+#define CSI2_ACCINV	8
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+	struct ipu_isys_csi2_config *cfg;
+	struct v4l2_subdev *ext_sd;
+	struct ipu_isys_csi2_timing timing = {0};
+	unsigned int nlanes;
+	int rval;
+
+	dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable);
+
+	if (!ip->external->entity) {
+		WARN_ON(1);
+		return -ENODEV;
+	}
+	ext_sd = media_entity_to_v4l2_subdev(ip->external->entity);
+	cfg = v4l2_get_subdev_hostdata(ext_sd);
+
+	if (!enable) {
+		ipu_isys_csi2_set_stream(sd, timing, 0, enable);
+		return 0;
+	}
+
+	ip->has_sof = true;
+
+	nlanes = cfg->nlanes;
+
+	dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes);
+
+	rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
+	if (rval)
+		return rval;
+
+	rval = ipu_isys_csi2_set_stream(sd, timing, nlanes, enable);
+
+	return rval;
+}
+
+static void csi2_capture_done(struct ipu_isys_pipeline *ip,
+			      struct ipu_fw_isys_resp_info_abi *info)
+{
+	if (ip->interlaced && ip->isys->short_packet_source ==
+	    IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
+		struct ipu_isys_buffer *ib;
+		unsigned long flags;
+
+		spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+		if (!list_empty(&ip->short_packet_active)) {
+			ib = list_last_entry(&ip->short_packet_active,
+					     struct ipu_isys_buffer, head);
+			list_move(&ib->head, &ip->short_packet_incoming);
+		}
+		spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+	}
+}
+
+static int csi2_link_validate(struct media_link *link)
+{
+	struct ipu_isys_csi2 *csi2;
+	struct ipu_isys_pipeline *ip;
+	int rval;
+
+	if (!link->sink->entity ||
+	    !link->sink->entity->pipe || !link->source->entity)
+		return -EINVAL;
+	csi2 =
+	    to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity));
+	ip = to_ipu_isys_pipeline(link->sink->entity->pipe);
+	csi2->receiver_errors = 0;
+	ip->csi2 = csi2;
+	ipu_isys_video_add_capture_done(to_ipu_isys_pipeline
+					(link->sink->entity->pipe),
+					csi2_capture_done);
+
+	rval = v4l2_subdev_link_validate(link);
+	if (rval)
+		return rval;
+
+	if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) {
+		struct media_pad *remote_pad =
+		    media_entity_remote_pad(&csi2->asd.pad[CSI2_PAD_SOURCE]);
+
+		if (remote_pad &&
+		    is_media_entity_v4l2_subdev(remote_pad->entity)) {
+			dev_err(&csi2->isys->adev->dev,
+				"CSI2 BE requires CSI2 headers.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_sd_video_ops = {
+	.s_stream = set_stream,
+};
+
+static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_format *fmt)
+{
+	return ipu_isys_subdev_get_ffmt(sd, cfg, fmt);
+}
+
+static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_format *fmt)
+{
+	return ipu_isys_subdev_set_ffmt(sd, cfg, fmt);
+}
+
+static int __subdev_link_validate(struct v4l2_subdev *sd,
+				  struct media_link *link,
+				  struct v4l2_subdev_format *source_fmt,
+				  struct v4l2_subdev_format *sink_fmt)
+{
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+
+	if (source_fmt->format.field == V4L2_FIELD_ALTERNATE)
+		ip->interlaced = true;
+
+	return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
+	.link_validate = __subdev_link_validate,
+	.get_fmt = ipu_isys_csi2_get_fmt,
+	.set_fmt = ipu_isys_csi2_set_fmt,
+	.enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_sd_ops = {
+	.core = &csi2_sd_core_ops,
+	.video = &csi2_sd_video_ops,
+	.pad = &csi2_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_entity_ops = {
+	.link_validate = csi2_link_validate,
+};
+
+static void csi2_set_ffmt(struct v4l2_subdev *sd,
+			  struct v4l2_subdev_pad_config *cfg,
+			  struct v4l2_subdev_format *fmt)
+{
+	enum isys_subdev_prop_tgt tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT;
+	struct v4l2_mbus_framefmt *ffmt =
+		__ipu_isys_get_ffmt(sd, cfg, fmt->pad,
+				    fmt->which);
+
+	if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+		fmt->format.field = V4L2_FIELD_NONE;
+
+	if (fmt->pad == CSI2_PAD_SINK) {
+		*ffmt = fmt->format;
+		ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL,
+					      tgt, fmt->pad, fmt->which);
+		return;
+	}
+
+	if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
+		ffmt->width = fmt->format.width;
+		ffmt->height = fmt->format.height;
+		ffmt->field = fmt->format.field;
+		ffmt->code =
+		    ipu_isys_subdev_code_to_uncompressed(fmt->format.code);
+		return;
+	}
+
+	WARN_ON(1);
+}
+
+static const struct ipu_isys_pixelformat *
+csi2_try_fmt(struct ipu_isys_video *av,
+	     struct v4l2_pix_format_mplane *mpix)
+{
+	struct media_link *link = list_first_entry(&av->vdev.entity.links,
+						   struct media_link, list);
+	struct v4l2_subdev *sd =
+	    media_entity_to_v4l2_subdev(link->source->entity);
+	struct ipu_isys_csi2 *csi2;
+
+	if (!sd)
+		return NULL;
+
+	csi2 = to_ipu_isys_csi2(sd);
+
+	return ipu_isys_video_try_fmt_vid_mplane(av, mpix,
+				v4l2_ctrl_g_ctrl(csi2->store_csi2_header));
+}
+
+void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2)
+{
+	if (!csi2->isys)
+		return;
+
+	v4l2_device_unregister_subdev(&csi2->asd.sd);
+	ipu_isys_subdev_cleanup(&csi2->asd);
+	ipu_isys_video_cleanup(&csi2->av);
+	csi2->isys = NULL;
+}
+
+static void csi_ctrl_init(struct v4l2_subdev *sd)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+
+	static const struct v4l2_ctrl_config cfg = {
+		.id = V4L2_CID_IPU_STORE_CSI2_HEADER,
+		.name = "Store CSI-2 Headers",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.min = 0,
+		.max = 1,
+		.step = 1,
+		.def = 1,
+	};
+
+	csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler,
+						       &cfg, NULL);
+}
+
+int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2,
+		       struct ipu_isys *isys,
+		       void __iomem *base, unsigned int index)
+{
+	struct v4l2_subdev_format fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.pad = CSI2_PAD_SINK,
+		.format = {
+			   .width = 4096,
+			   .height = 3072,
+			  },
+	};
+	int i, rval, src;
+
+	dev_dbg(&isys->adev->dev, "csi-%d base = 0x%lx\n", index,
+		(unsigned long)base);
+	csi2->isys = isys;
+	csi2->base = base;
+	csi2->index = index;
+
+	csi2->asd.sd.entity.ops = &csi2_entity_ops;
+	csi2->asd.ctrl_init = csi_ctrl_init;
+	csi2->asd.isys = isys;
+	init_completion(&csi2->eof_completion);
+	rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
+				    NR_OF_CSI2_PADS,
+				    NR_OF_CSI2_SOURCE_PADS,
+				    NR_OF_CSI2_SINK_PADS,
+				    0);
+	if (rval)
+		goto fail;
+
+	csi2->asd.pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+		| MEDIA_PAD_FL_MUST_CONNECT;
+	csi2->asd.pad[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+	src = index;
+	csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src;
+	csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink;
+
+	for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++)
+		csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source;
+	csi2->asd.supported_codes = csi2_supported_codes;
+	csi2->asd.set_ffmt = csi2_set_ffmt;
+
+	csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+	csi2->asd.sd.internal_ops = &csi2_sd_internal_ops;
+	snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
+		 IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index);
+	v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
+
+	rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+		goto fail;
+	}
+
+	mutex_lock(&csi2->asd.mutex);
+	__ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt);
+	mutex_unlock(&csi2->asd.mutex);
+
+	snprintf(csi2->av.vdev.name, sizeof(csi2->av.vdev.name),
+		 IPU_ISYS_ENTITY_PREFIX " CSI-2 %u capture", index);
+	csi2->av.isys = isys;
+	csi2->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI;
+	csi2->av.pfmts = ipu_isys_pfmts_packed;
+	csi2->av.try_fmt_vid_mplane = csi2_try_fmt;
+	csi2->av.prepare_fw_stream =
+		ipu_isys_prepare_fw_cfg_default;
+	csi2->av.packed = true;
+	csi2->av.line_header_length =
+		IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE;
+	csi2->av.line_footer_length =
+		IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE;
+	csi2->av.aq.buf_prepare = ipu_isys_buf_prepare;
+	csi2->av.aq.fill_frame_buff_set_pin =
+	ipu_isys_buffer_to_fw_frame_buff_pin;
+	csi2->av.aq.link_fmt_validate =
+		ipu_isys_link_fmt_validate;
+	csi2->av.aq.vbq.buf_struct_size =
+		sizeof(struct ipu_isys_video_buffer);
+
+	rval = ipu_isys_video_init(&csi2->av,
+				   &csi2->asd.sd.entity,
+				   CSI2_PAD_SOURCE,
+				   MEDIA_PAD_FL_SINK, 0);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't init video node\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipu_isys_csi2_cleanup(csi2);
+
+	return rval;
+}
+
+void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2)
+{
+	struct ipu_isys_pipeline *ip = NULL;
+	struct v4l2_event ev = {
+		.type = V4L2_EVENT_FRAME_SYNC,
+	};
+	struct video_device *vdev = csi2->asd.sd.devnode;
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&csi2->isys->lock, flags);
+	csi2->in_frame = true;
+
+	for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+		if (csi2->isys->pipes[i] &&
+		    csi2->isys->pipes[i]->csi2 == csi2) {
+			ip = csi2->isys->pipes[i];
+			break;
+		}
+	}
+
+	/* Pipe already vanished */
+	if (!ip) {
+		spin_unlock_irqrestore(&csi2->isys->lock, flags);
+		return;
+	}
+
+	ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1;
+	spin_unlock_irqrestore(&csi2->isys->lock, flags);
+
+	v4l2_event_queue(vdev, &ev);
+	dev_dbg(&csi2->isys->adev->dev,
+		"sof_event::csi2-%i sequence: %i\n",
+		csi2->index, ev.u.frame_sync.frame_sequence);
+}
+
+void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2)
+{
+	struct ipu_isys_pipeline *ip = NULL;
+	unsigned long flags;
+	unsigned int i;
+	u32 frame_sequence;
+
+	spin_lock_irqsave(&csi2->isys->lock, flags);
+	csi2->in_frame = false;
+	if (csi2->wait_for_sync)
+		complete(&csi2->eof_completion);
+
+	for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+		if (csi2->isys->pipes[i] &&
+		    csi2->isys->pipes[i]->csi2 == csi2) {
+			ip = csi2->isys->pipes[i];
+			break;
+		}
+	}
+
+	if (ip) {
+		frame_sequence = atomic_read(&ip->sequence);
+
+	spin_unlock_irqrestore(&csi2->isys->lock, flags);
+
+	dev_dbg(&csi2->isys->adev->dev, "eof_event::csi2-%i sequence: %i\n",
+		csi2->index, frame_sequence);
+	}
+}
+
+/* Call this function only _after_ the sensor has been stopped */
+void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2)
+{
+	unsigned long flags, tout;
+
+	spin_lock_irqsave(&csi2->isys->lock, flags);
+
+	if (!csi2->in_frame) {
+		spin_unlock_irqrestore(&csi2->isys->lock, flags);
+		return;
+	}
+
+	reinit_completion(&csi2->eof_completion);
+	csi2->wait_for_sync = true;
+	spin_unlock_irqrestore(&csi2->isys->lock, flags);
+	tout = wait_for_completion_timeout(&csi2->eof_completion,
+					   IPU_EOF_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_err(&csi2->isys->adev->dev,
+			"csi2-%d: timeout at sync to eof\n",
+			csi2->index);
+	csi2->wait_for_sync = false;
+}
+
+struct ipu_isys_buffer *
+ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip,
+				      struct ipu_isys_buffer_list *bl)
+{
+	struct ipu_isys_buffer *ib;
+	struct ipu_isys_private_buffer *pb;
+	struct ipu_isys_mipi_packet_header *ph;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+	if (list_empty(&ip->short_packet_incoming)) {
+		spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+		return NULL;
+	}
+	ib = list_last_entry(&ip->short_packet_incoming,
+			     struct ipu_isys_buffer, head);
+	pb = ipu_isys_buffer_to_private_buffer(ib);
+	ph = (struct ipu_isys_mipi_packet_header *)pb->buffer;
+
+	/* Fill the packet header with magic number. */
+	ph->word_count = 0xffff;
+	ph->dtype = 0xff;
+
+	dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr,
+				sizeof(*ph), DMA_BIDIRECTIONAL);
+	spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+	list_move(&ib->head, &bl->head);
+
+	return ib;
+}
diff --git a/drivers/media/pci/intel/ipu-isys-csi2.h b/drivers/media/pci/intel/ipu-isys-csi2.h
new file mode 100644
index 000000000000..04c45571e9e1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-csi2.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_CSI2_H
+#define IPU_ISYS_CSI2_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-queue.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys.h"
+
+struct ipu_isys_csi2_timing;
+struct ipu_isys_csi2_pdata;
+struct ipu_isys;
+
+#define NR_OF_CSI2_SINK_PADS		1
+#define CSI2_PAD_SINK			0
+#define NR_OF_CSI2_SOURCE_PADS		1
+#define CSI2_PAD_SOURCE			1
+#define NR_OF_CSI2_PADS	(NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS)
+
+#define IPU_ISYS_SHORT_PACKET_BUFFER_NUM	VIDEO_MAX_FRAME
+#define IPU_ISYS_SHORT_PACKET_WIDTH	32
+#define IPU_ISYS_SHORT_PACKET_FRAME_PACKETS	2
+#define IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS	64
+#define IPU_ISYS_SHORT_PACKET_UNITSIZE	8
+#define IPU_ISYS_SHORT_PACKET_GENERAL_DT	0
+#define IPU_ISYS_SHORT_PACKET_PT		0
+#define IPU_ISYS_SHORT_PACKET_FT		0
+
+#define IPU_ISYS_SHORT_PACKET_STRIDE \
+	(IPU_ISYS_SHORT_PACKET_WIDTH * \
+	IPU_ISYS_SHORT_PACKET_UNITSIZE)
+#define IPU_ISYS_SHORT_PACKET_NUM(num_lines) \
+	((num_lines) * 2 + IPU_ISYS_SHORT_PACKET_FRAME_PACKETS + \
+	IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS)
+#define IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \
+	DIV_ROUND_UP(IPU_ISYS_SHORT_PACKET_NUM(num_lines) * \
+	IPU_ISYS_SHORT_PACKET_UNITSIZE, \
+	IPU_ISYS_SHORT_PACKET_STRIDE)
+#define IPU_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \
+	(IPU_ISYS_SHORT_PACKET_WIDTH * \
+	IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \
+	IPU_ISYS_SHORT_PACKET_UNITSIZE)
+
+#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER	256
+#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE	16
+#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \
+	(IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \
+	IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE)
+
+#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER	0
+#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT		1
+
+#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100
+#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK	0x2082
+#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2)
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A		0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B		0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A		95
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B		-8
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A		0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B		0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A		85
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B		-2
+
+#define IPU_EOF_TIMEOUT 300
+#define IPU_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(IPU_EOF_TIMEOUT)
+
+/*
+ * struct ipu_isys_csi2
+ *
+ * @nlanes: number of lanes in the receiver
+ */
+struct ipu_isys_csi2 {
+	struct ipu_isys_csi2_pdata *pdata;
+	struct ipu_isys *isys;
+	struct ipu_isys_subdev asd;
+	struct ipu_isys_video av;
+	struct completion eof_completion;
+
+	void __iomem *base;
+	u32 receiver_errors;
+	unsigned int nlanes;
+	unsigned int index;
+	atomic_t sof_sequence;
+	bool in_frame;
+	bool wait_for_sync;
+
+	struct v4l2_ctrl *store_csi2_header;
+};
+
+struct ipu_isys_csi2_timing {
+	u32 ctermen;
+	u32 csettle;
+	u32 dtermen;
+	u32 dsettle;
+};
+
+/*
+ * This structure defines the MIPI packet header output
+ * from IPU MIPI receiver. Due to hardware conversion,
+ * this structure is not the same as defined in CSI-2 spec.
+ */
+struct ipu_isys_mipi_packet_header {
+	u32 word_count:16, dtype:13, sync:2, stype:1;
+	u32 sid:4, port_id:4, reserved:23, odd_even:1;
+} __packed;
+
+/*
+ * This structure defines the trace message content
+ * for CSI2 receiver monitor messages.
+ */
+struct ipu_isys_csi2_monitor_message {
+	u64 fe:1,
+	    fs:1,
+	    pe:1,
+	    ps:1,
+	    le:1,
+	    ls:1,
+	    reserved1:2,
+	    sequence:2,
+	    reserved2:2,
+	    flash_shutter:4,
+	    error_cause:12,
+	    fifo_overrun:1,
+	    crc_error:2,
+	    reserved3:1,
+	    timestamp_l:16,
+	    port:4, vc:2, reserved4:2, frame_sync:4, reserved5:4;
+	u64 reserved6:3,
+	    cmd:2, reserved7:1, monitor_id:7, reserved8:1, timestamp_h:50;
+} __packed;
+
+#define to_ipu_isys_csi2(sd) container_of(to_ipu_isys_subdev(sd), \
+					struct ipu_isys_csi2, asd)
+
+int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq);
+int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2,
+		       struct ipu_isys *isys,
+		       void __iomem *base, unsigned int index);
+void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2);
+struct ipu_isys_buffer *
+ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip,
+				      struct ipu_isys_buffer_list *bl);
+void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2);
+
+/* interface for platform specific */
+int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd,
+			     struct ipu_isys_csi2_timing timing,
+			     unsigned int nlanes, int enable);
+unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip,
+					     unsigned int *timestamp);
+void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2);
+
+#endif /* IPU_ISYS_CSI2_H */
diff --git a/drivers/media/pci/intel/ipu-isys-media.h b/drivers/media/pci/intel/ipu-isys-media.h
new file mode 100644
index 000000000000..72b48bcbf7f1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-media.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_MEDIA_H
+#define IPU_ISYS_MEDIA_H
+
+#include <linux/slab.h>
+#include <media/media-entity.h>
+
+struct __packed media_request_cmd {
+	__u32 cmd;
+	__u32 request;
+	__u32 flags;
+};
+
+struct __packed media_event_request_complete {
+	__u32 id;
+};
+
+#define MEDIA_EVENT_TYPE_REQUEST_COMPLETE	1
+
+struct __packed media_event {
+	__u32 type;
+	__u32 sequence;
+	__u32 reserved[4];
+
+	union {
+		struct media_event_request_complete req_complete;
+	};
+};
+
+enum media_device_request_state {
+	MEDIA_DEVICE_REQUEST_STATE_IDLE,
+	MEDIA_DEVICE_REQUEST_STATE_QUEUED,
+	MEDIA_DEVICE_REQUEST_STATE_DELETED,
+	MEDIA_DEVICE_REQUEST_STATE_COMPLETE,
+};
+
+struct media_kevent {
+	struct list_head list;
+	struct media_event ev;
+};
+
+struct media_device_request {
+	u32 id;
+	struct media_device *mdev;
+	struct file *filp;
+	struct media_kevent *kev;
+	struct kref kref;
+	struct list_head list;
+	struct list_head fh_list;
+	enum media_device_request_state state;
+	struct list_head data;
+	u32 flags;
+};
+
+static inline struct media_device_request *
+media_device_request_find(struct media_device *mdev, u16 reqid)
+{
+	return NULL;
+}
+
+static inline void media_device_request_get(struct media_device_request *req)
+{
+}
+
+static inline void media_device_request_put(struct media_device_request *req)
+{
+}
+
+static inline void
+media_device_request_complete(struct media_device *mdev,
+			      struct media_device_request *req)
+{
+}
+
+#endif /* IPU_ISYS_MEDIA_H */
diff --git a/drivers/media/pci/intel/ipu-isys-queue.c b/drivers/media/pci/intel/ipu-isys-queue.c
new file mode 100644
index 000000000000..46d29d311098
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-queue.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-ioctl.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-video.h"
+
+static bool wall_clock_ts_on;
+module_param(wall_clock_ts_on, bool, 0660);
+MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock");
+
+static int queue_setup(struct vb2_queue *q,
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[],
+		       struct device *alloc_devs[])
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	bool use_fmt = false;
+	unsigned int i;
+
+	/* num_planes == 0: we're being called through VIDIOC_REQBUFS */
+	if (!*num_planes) {
+		use_fmt = true;
+		*num_planes = av->mpix.num_planes;
+	}
+
+	for (i = 0; i < *num_planes; i++) {
+		if (use_fmt)
+			sizes[i] = av->mpix.plane_fmt[i].sizeimage;
+		alloc_devs[i] = aq->dev;
+		dev_dbg(&av->isys->adev->dev,
+			"%s: queue setup: plane %d size %u\n",
+			av->vdev.name, i, sizes[i]);
+	}
+
+	return 0;
+}
+
+static void ipu_isys_queue_lock(struct vb2_queue *q)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name);
+	mutex_lock(&av->mutex);
+}
+
+static void ipu_isys_queue_unlock(struct vb2_queue *q)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name);
+	mutex_unlock(&av->mutex);
+}
+
+static int buf_init(struct vb2_buffer *vb)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+		__func__);
+
+	if (aq->buf_init)
+		return aq->buf_init(vb);
+
+	return 0;
+}
+
+int ipu_isys_buf_prepare(struct vb2_buffer *vb)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev,
+		"buffer: %s: configured size %u, buffer size %lu\n",
+		av->vdev.name,
+		av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0));
+
+	if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0))
+		return -EINVAL;
+
+	vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline *
+			      av->mpix.height);
+	vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE;
+
+	return 0;
+}
+
+static int buf_prepare(struct vb2_buffer *vb)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	int rval;
+
+	if (av->isys->adev->isp->flr_done)
+		return -EIO;
+
+	rval = aq->buf_prepare(vb);
+	return rval;
+}
+
+static void buf_finish(struct vb2_buffer *vb)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+		__func__);
+
+}
+
+static void buf_cleanup(struct vb2_buffer *vb)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+	dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+		__func__);
+
+	if (aq->buf_cleanup)
+		return aq->buf_cleanup(vb);
+}
+
+/*
+ * Queue a buffer list back to incoming or active queues. The buffers
+ * are removed from the buffer list.
+ */
+void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl,
+				unsigned long op_flags,
+				enum vb2_buffer_state state)
+{
+	struct ipu_isys_buffer *ib, *ib_safe;
+	unsigned long flags;
+	bool first = true;
+
+	if (!bl)
+		return;
+
+	WARN_ON(!bl->nbufs);
+	WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE &&
+		op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING);
+
+	list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
+		struct ipu_isys_video *av;
+
+		if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
+			struct vb2_buffer *vb =
+			    ipu_isys_buffer_to_vb2_buffer(ib);
+			struct ipu_isys_queue *aq =
+			    vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+
+			av = ipu_isys_queue_to_video(aq);
+			spin_lock_irqsave(&aq->lock, flags);
+			list_del(&ib->head);
+			if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
+				list_add(&ib->head, &aq->active);
+			else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
+				list_add_tail(&ib->head, &aq->incoming);
+			spin_unlock_irqrestore(&aq->lock, flags);
+
+			if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE)
+				vb2_buffer_done(vb, state);
+		} else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
+			struct ipu_isys_private_buffer *pb =
+			    ipu_isys_buffer_to_private_buffer(ib);
+			struct ipu_isys_pipeline *ip = pb->ip;
+
+			av = container_of(ip, struct ipu_isys_video, ip);
+			spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+			list_del(&ib->head);
+			if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
+				list_add(&ib->head, &ip->short_packet_active);
+			else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
+				list_add(&ib->head, &ip->short_packet_incoming);
+			spin_unlock_irqrestore(&ip->short_packet_queue_lock,
+					       flags);
+		} else {
+			WARN_ON(1);
+			return;
+		}
+
+		if (first) {
+			dev_dbg(&av->isys->adev->dev,
+				"queue buf list %p flags %lx, s %d, %d bufs\n",
+				bl, op_flags, state, bl->nbufs);
+			first = false;
+		}
+
+		bl->nbufs--;
+	}
+
+	WARN_ON(bl->nbufs);
+}
+
+/*
+ * flush_firmware_streamon_fail() - Flush in cases where requests may
+ * have been queued to firmware and the *firmware streamon fails for a
+ * reason or another.
+ */
+static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip)
+{
+	struct ipu_isys_video *pipe_av =
+	    container_of(ip, struct ipu_isys_video, ip);
+	struct ipu_isys_queue *aq;
+	unsigned long flags;
+
+	lockdep_assert_held(&pipe_av->mutex);
+
+	list_for_each_entry(aq, &ip->queues, node) {
+		struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+		struct ipu_isys_buffer *ib, *ib_safe;
+
+		spin_lock_irqsave(&aq->lock, flags);
+		list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
+			struct vb2_buffer *vb =
+			    ipu_isys_buffer_to_vb2_buffer(ib);
+
+			list_del(&ib->head);
+			if (av->streaming) {
+				dev_dbg(&av->isys->adev->dev,
+					"%s: queue buffer %u back to incoming\n",
+					av->vdev.name,
+					vb->index);
+				/* Queue already streaming, return to driver. */
+				list_add(&ib->head, &aq->incoming);
+				continue;
+			}
+			/* Queue not yet streaming, return to user. */
+			dev_dbg(&av->isys->adev->dev,
+				"%s: return %u back to videobuf2\n",
+				av->vdev.name,
+				vb->index);
+			vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib),
+					VB2_BUF_STATE_QUEUED);
+		}
+		spin_unlock_irqrestore(&aq->lock, flags);
+	}
+}
+
+/*
+ * Attempt obtaining a buffer list from the incoming queues, a list of
+ * buffers that contains one entry from each video buffer queue. If
+ * all queues have no buffers, the buffers that were already dequeued
+ * are returned to their queues.
+ */
+static int buffer_list_get(struct ipu_isys_pipeline *ip,
+			   struct ipu_isys_buffer_list *bl)
+{
+	struct ipu_isys_queue *aq;
+	struct ipu_isys_buffer *ib;
+	unsigned long flags;
+	int ret = 0;
+
+	bl->nbufs = 0;
+	INIT_LIST_HEAD(&bl->head);
+
+	list_for_each_entry(aq, &ip->queues, node) {
+		struct ipu_isys_buffer *ib;
+
+		spin_lock_irqsave(&aq->lock, flags);
+		if (list_empty(&aq->incoming)) {
+			spin_unlock_irqrestore(&aq->lock, flags);
+			ret = -ENODATA;
+			goto error;
+		}
+
+		ib = list_last_entry(&aq->incoming,
+				     struct ipu_isys_buffer, head);
+		if (ib->req) {
+			spin_unlock_irqrestore(&aq->lock, flags);
+			ret = -ENODATA;
+			goto error;
+		}
+
+		dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n",
+			ipu_isys_queue_to_video(aq)->vdev.name,
+			ipu_isys_buffer_to_vb2_buffer(ib)->index
+		    );
+		list_del(&ib->head);
+		list_add(&ib->head, &bl->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		bl->nbufs++;
+	}
+
+	list_for_each_entry(ib, &bl->head, head) {
+		struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+		aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+		if (aq->prepare_frame_buff_set)
+			aq->prepare_frame_buff_set(vb);
+	}
+
+	/* Get short packet buffer. */
+	if (ip->interlaced && ip->isys->short_packet_source ==
+	    IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
+		ib = ipu_isys_csi2_get_short_packet_buffer(ip, bl);
+		if (!ib) {
+			ret = -ENODATA;
+			dev_err(&ip->isys->adev->dev,
+				"No more short packet buffers. Driver bug?");
+			WARN_ON(1);
+			goto error;
+		}
+		bl->nbufs++;
+	}
+
+	dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl,
+		bl->nbufs);
+	return ret;
+
+error:
+	if (!list_empty(&bl->head))
+		ipu_isys_buffer_list_queue(bl,
+					   IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0);
+	return ret;
+}
+
+void
+ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb,
+				     struct ipu_fw_isys_frame_buff_set_abi *set)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+
+	set->output_pins[aq->fw_output].addr =
+	    vb2_dma_contig_plane_dma_addr(vb, 0);
+	set->output_pins[aq->fw_output].out_buf_id =
+	    vb->index + 1;
+}
+
+/*
+ * Convert a buffer list to a isys fw ABI framebuffer set. The
+ * buffer list is not modified.
+ */
+#define IPU_ISYS_FRAME_NUM_THRESHOLD  (30)
+void
+ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set,
+				 struct ipu_isys_pipeline *ip,
+				 struct ipu_isys_buffer_list *bl)
+{
+	struct ipu_isys_buffer *ib;
+
+	WARN_ON(!bl->nbufs);
+
+	set->send_irq_sof = 1;
+	set->send_resp_sof = 1;
+
+	set->send_irq_eof = 0;
+	set->send_resp_eof = 0;
+	/*
+	 * In tpg case, the stream start timeout if the capture ack irq
+	 * disabled. So keep it active while starting the stream, then close
+	 * it after the stream start succeed.
+	 */
+	if (ip->streaming)
+		set->send_irq_capture_ack = 0;
+	else
+		set->send_irq_capture_ack = 1;
+	set->send_irq_capture_done = 0;
+
+	set->send_resp_capture_ack = 1;
+	set->send_resp_capture_done = 1;
+	if (!ip->interlaced &&
+	    atomic_read(&ip->sequence) >= IPU_ISYS_FRAME_NUM_THRESHOLD) {
+		set->send_resp_capture_ack = 0;
+		set->send_resp_capture_done = 0;
+	}
+
+	list_for_each_entry(ib, &bl->head, head) {
+		if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
+			struct vb2_buffer *vb =
+			    ipu_isys_buffer_to_vb2_buffer(ib);
+			struct ipu_isys_queue *aq =
+			    vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+
+			if (aq->fill_frame_buff_set_pin)
+				aq->fill_frame_buff_set_pin(vb, set);
+		} else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
+			struct ipu_isys_private_buffer *pb =
+			    ipu_isys_buffer_to_private_buffer(ib);
+			struct ipu_fw_isys_output_pin_payload_abi *output_pin =
+			    &set->output_pins[ip->short_packet_output_pin];
+
+			output_pin->addr = pb->dma_addr;
+			output_pin->out_buf_id = pb->index + 1;
+		} else {
+			WARN_ON(1);
+		}
+	}
+}
+
+/* Start streaming for real. The buffer list must be available. */
+static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip,
+				 struct ipu_isys_buffer_list *bl, bool error)
+{
+	struct ipu_isys_video *pipe_av =
+	    container_of(ip, struct ipu_isys_video, ip);
+	struct ipu_isys_buffer_list __bl;
+	int rval;
+
+	mutex_lock(&pipe_av->isys->stream_mutex);
+
+	rval = ipu_isys_video_set_streaming(pipe_av, 1, bl);
+	if (rval) {
+		mutex_unlock(&pipe_av->isys->stream_mutex);
+		goto out_requeue;
+	}
+
+	ip->streaming = 1;
+
+	mutex_unlock(&pipe_av->isys->stream_mutex);
+
+	bl = &__bl;
+
+	do {
+		struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+		struct isys_fw_msgs *msg;
+		enum ipu_fw_isys_send_type send_type =
+		    IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE;
+
+		rval = buffer_list_get(ip, bl);
+		if (rval == -EINVAL)
+			goto out_requeue;
+		else if (rval < 0)
+			break;
+
+		msg = ipu_get_fw_msg_buf(ip);
+		if (!msg)
+			return -ENOMEM;
+
+		buf = to_frame_msg_buf(msg);
+
+		ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl);
+
+		ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
+						ip->nr_output_pins);
+
+		ipu_isys_buffer_list_queue(bl,
+					   IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+		rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
+					       ip->stream_handle,
+					       buf, to_dma_addr(msg),
+					       sizeof(*buf),
+					       send_type);
+		ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
+	} while (!WARN_ON(rval));
+
+	return 0;
+
+out_requeue:
+	if (bl && bl->nbufs)
+		ipu_isys_buffer_list_queue(bl,
+					   IPU_ISYS_BUFFER_LIST_FL_INCOMING |
+					   (error ?
+					    IPU_ISYS_BUFFER_LIST_FL_SET_STATE :
+					    0),
+					   error ? VB2_BUF_STATE_ERROR :
+					   VB2_BUF_STATE_QUEUED);
+	flush_firmware_streamon_fail(ip);
+
+	return rval;
+}
+
+static void __buf_queue(struct vb2_buffer *vb, bool force)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb);
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct ipu_isys_buffer_list bl;
+
+	struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+	struct isys_fw_msgs *msg;
+
+	struct ipu_isys_video *pipe_av =
+	    container_of(ip, struct ipu_isys_video, ip);
+	unsigned long flags;
+	unsigned int i;
+	int rval;
+
+	dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n",
+		av->vdev.name,
+		vb->index
+	    );
+
+	for (i = 0; i < vb->num_planes; i++)
+		dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i,
+			(u32)vb2_dma_contig_plane_dma_addr(vb, i));
+
+	spin_lock_irqsave(&aq->lock, flags);
+	list_add(&ib->head, &aq->incoming);
+	spin_unlock_irqrestore(&aq->lock, flags);
+
+	if (ib->req)
+		return;
+
+	if (!pipe_av || !vb->vb2_queue->streaming) {
+		dev_dbg(&av->isys->adev->dev,
+			"not pipe_av set, adding to incoming\n");
+		return;
+	}
+
+	mutex_unlock(&av->mutex);
+	mutex_lock(&pipe_av->mutex);
+
+	if (!force && ip->nr_streaming != ip->nr_queues) {
+		dev_dbg(&av->isys->adev->dev,
+			"not streaming yet, adding to incoming\n");
+		goto out;
+	}
+
+	/*
+	 * We just put one buffer to the incoming list of this queue
+	 * (above). Let's see whether all queues in the pipeline would
+	 * have a buffer.
+	 */
+	rval = buffer_list_get(ip, &bl);
+	if (rval < 0) {
+		if (rval == -EINVAL) {
+			dev_err(&av->isys->adev->dev,
+				"error: should not happen\n");
+			WARN_ON(1);
+		} else {
+			dev_dbg(&av->isys->adev->dev,
+				"not enough buffers available\n");
+		}
+		goto out;
+	}
+
+	msg = ipu_get_fw_msg_buf(ip);
+	if (!msg) {
+		rval = -ENOMEM;
+		goto out;
+	}
+	buf = to_frame_msg_buf(msg);
+
+	ipu_isys_buffer_to_fw_frame_buff(buf, ip, &bl);
+
+	ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
+					ip->nr_output_pins);
+
+	if (!ip->streaming) {
+		dev_dbg(&av->isys->adev->dev,
+			"got a buffer to start streaming!\n");
+		rval = ipu_isys_stream_start(ip, &bl, true);
+		if (rval)
+			dev_err(&av->isys->adev->dev,
+				"stream start failed.\n");
+		goto out;
+	}
+
+	/*
+	 * We must queue the buffers in the buffer list to the
+	 * appropriate video buffer queues BEFORE passing them to the
+	 * firmware since we could get a buffer event back before we
+	 * have queued them ourselves to the active queue.
+	 */
+	ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+	rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
+				       ip->stream_handle,
+				       buf, to_dma_addr(msg),
+				       sizeof(*buf),
+				       IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE);
+	ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
+	if (!WARN_ON(rval < 0))
+		dev_dbg(&av->isys->adev->dev, "queued buffer\n");
+
+out:
+	mutex_unlock(&pipe_av->mutex);
+	mutex_lock(&av->mutex);
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+	__buf_queue(vb, false);
+}
+
+int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq)
+{
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	struct v4l2_subdev_format fmt = { 0 };
+	struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads);
+	struct v4l2_subdev *sd;
+	int rval;
+
+	if (!pad) {
+		dev_dbg(&av->isys->adev->dev,
+			"video node %s pad not connected\n", av->vdev.name);
+		return -ENOTCONN;
+	}
+
+	sd = media_entity_to_v4l2_subdev(pad->entity);
+
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	fmt.pad = pad->index;
+	rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+	if (rval)
+		return rval;
+
+	if (fmt.format.width != av->mpix.width ||
+	    fmt.format.height != av->mpix.height) {
+		dev_dbg(&av->isys->adev->dev,
+			"wrong width or height %ux%u (%ux%u expected)\n",
+			av->mpix.width, av->mpix.height,
+			fmt.format.width, fmt.format.height);
+		return -EINVAL;
+	}
+
+	if (fmt.format.field != av->mpix.field) {
+		dev_dbg(&av->isys->adev->dev,
+			"wrong field value 0x%8.8x (0x%8.8x expected)\n",
+			av->mpix.field, fmt.format.field);
+		return -EINVAL;
+	}
+
+	if (fmt.format.code != av->pfmt->code) {
+		dev_dbg(&av->isys->adev->dev,
+			"wrong media bus code 0x%8.8x (0x%8.8x expected)\n",
+			av->pfmt->code, fmt.format.code);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Return buffers back to videobuf2. */
+static void return_buffers(struct ipu_isys_queue *aq,
+			   enum vb2_buffer_state state)
+{
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	int reset_needed = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&aq->lock, flags);
+	while (!list_empty(&aq->incoming)) {
+		struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming,
+							      struct
+							      ipu_isys_buffer,
+							      head);
+		struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		vb2_buffer_done(vb, state);
+
+		dev_dbg(&av->isys->adev->dev,
+			"%s: stop_streaming incoming %u\n",
+			ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
+						(vb->vb2_queue))->vdev.name,
+			vb->index);
+
+		spin_lock_irqsave(&aq->lock, flags);
+	}
+
+	/*
+	 * Something went wrong (FW crash / HW hang / not all buffers
+	 * returned from isys) if there are still buffers queued in active
+	 * queue. We have to clean up places a bit.
+	 */
+	while (!list_empty(&aq->active)) {
+		struct ipu_isys_buffer *ib = list_first_entry(&aq->active,
+							      struct
+							      ipu_isys_buffer,
+							      head);
+		struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		vb2_buffer_done(vb, state);
+
+		dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n",
+			 ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
+						 (vb->vb2_queue))->vdev.name,
+			 vb->index);
+
+		spin_lock_irqsave(&aq->lock, flags);
+		reset_needed = 1;
+	}
+
+	spin_unlock_irqrestore(&aq->lock, flags);
+
+	if (reset_needed) {
+		mutex_lock(&av->isys->mutex);
+		av->isys->reset_needed = true;
+		mutex_unlock(&av->isys->mutex);
+	}
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	struct ipu_isys_video *pipe_av;
+	struct ipu_isys_pipeline *ip;
+	struct ipu_isys_buffer_list __bl, *bl = NULL;
+	bool first;
+	int rval;
+
+	dev_dbg(&av->isys->adev->dev,
+		"stream: %s: width %u, height %u, css pixelformat %u\n",
+		av->vdev.name, av->mpix.width, av->mpix.height,
+		av->pfmt->css_pixelformat);
+
+	mutex_lock(&av->isys->stream_mutex);
+
+	first = !av->vdev.entity.pipe;
+
+	if (first) {
+		rval = ipu_isys_video_prepare_streaming(av, 1);
+		if (rval)
+			goto out_return_buffers;
+	}
+
+	mutex_unlock(&av->isys->stream_mutex);
+
+	rval = aq->link_fmt_validate(aq);
+	if (rval) {
+		dev_dbg(&av->isys->adev->dev,
+			"%s: link format validation failed (%d)\n",
+			av->vdev.name, rval);
+		goto out_unprepare_streaming;
+	}
+
+	ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	pipe_av = container_of(ip, struct ipu_isys_video, ip);
+	mutex_unlock(&av->mutex);
+
+	mutex_lock(&pipe_av->mutex);
+	ip->nr_streaming++;
+	dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming,
+		ip->nr_queues);
+	list_add(&aq->node, &ip->queues);
+	if (ip->nr_streaming != ip->nr_queues)
+		goto out;
+
+	if (list_empty(&av->isys->requests)) {
+		bl = &__bl;
+		rval = buffer_list_get(ip, bl);
+		if (rval == -EINVAL) {
+			goto out_stream_start;
+		} else if (rval < 0) {
+			dev_dbg(&av->isys->adev->dev,
+				"no request available, postponing streamon\n");
+			goto out;
+		}
+	}
+
+	rval = ipu_isys_stream_start(ip, bl, false);
+	if (rval)
+		goto out_stream_start;
+
+out:
+	mutex_unlock(&pipe_av->mutex);
+	mutex_lock(&av->mutex);
+
+	return 0;
+
+out_stream_start:
+	list_del(&aq->node);
+	ip->nr_streaming--;
+	mutex_unlock(&pipe_av->mutex);
+	mutex_lock(&av->mutex);
+
+out_unprepare_streaming:
+	mutex_lock(&av->isys->stream_mutex);
+	if (first)
+		ipu_isys_video_prepare_streaming(av, 0);
+
+out_return_buffers:
+	mutex_unlock(&av->isys->stream_mutex);
+	return_buffers(aq, VB2_BUF_STATE_QUEUED);
+
+	return rval;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct ipu_isys_video *pipe_av =
+	    container_of(ip, struct ipu_isys_video, ip);
+
+	if (pipe_av != av) {
+		mutex_unlock(&av->mutex);
+		mutex_lock(&pipe_av->mutex);
+	}
+
+	mutex_lock(&av->isys->stream_mutex);
+	if (ip->nr_streaming == ip->nr_queues && ip->streaming)
+		ipu_isys_video_set_streaming(av, 0, NULL);
+	if (ip->nr_streaming == 1)
+		ipu_isys_video_prepare_streaming(av, 0);
+	mutex_unlock(&av->isys->stream_mutex);
+
+	ip->nr_streaming--;
+	list_del(&aq->node);
+	ip->streaming = 0;
+
+	if (pipe_av != av) {
+		mutex_unlock(&pipe_av->mutex);
+		mutex_lock(&av->mutex);
+	}
+
+	return_buffers(aq, VB2_BUF_STATE_ERROR);
+}
+
+static unsigned int
+get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip,
+			      struct ipu_fw_isys_resp_info_abi *info)
+{
+	struct ipu_isys *isys =
+	    container_of(ip, struct ipu_isys_video, ip)->isys;
+	u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+	unsigned int i;
+
+	/*
+	 * The timestamp is invalid as no TSC in some FPGA platform,
+	 * so get the sequence from pipeline directly in this case.
+	 */
+	if (time == 0)
+		return atomic_read(&ip->sequence) - 1;
+	for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+		if (time == ip->seq[i].timestamp) {
+			dev_dbg(&isys->adev->dev,
+				"sof: using seq nr %u for ts 0x%16.16llx\n",
+				ip->seq[i].sequence, time);
+			return ip->seq[i].sequence;
+		}
+
+	dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time);
+	for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+		dev_dbg(&isys->adev->dev,
+			"SOF: sequence %u, timestamp value 0x%16.16llx\n",
+			ip->seq[i].sequence, ip->seq[i].timestamp);
+	dev_dbg(&isys->adev->dev, "SOF sequence number not found\n");
+
+	return 0;
+}
+
+static u64 get_sof_ns_delta(struct ipu_isys_video *av,
+			    struct ipu_fw_isys_resp_info_abi *info)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	u64 delta, tsc_now;
+
+	if (!ipu_buttress_tsc_read(isp, &tsc_now))
+		delta = tsc_now -
+		    ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+	else
+		delta = 0;
+
+	return ipu_buttress_tsc_ticks_to_ns(delta);
+}
+
+void
+ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib,
+				struct ipu_fw_isys_resp_info_abi *info)
+{
+	struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+	struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->dev;
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	u64 ns;
+	u32 sequence;
+
+	if (ip->has_sof) {
+		ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns();
+		ns -= get_sof_ns_delta(av, info);
+		sequence = get_sof_sequence_by_timestamp(ip, info);
+	} else {
+		ns = ((wall_clock_ts_on) ? ktime_get_real_ns() :
+		      ktime_get_ns());
+		sequence = (atomic_inc_return(&ip->sequence) - 1)
+		    / ip->nr_queues;
+	}
+
+	vbuf->vb2_buf.timestamp = ns;
+	vbuf->sequence = sequence;
+
+	dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
+		av->vdev.name, ktime_get_ns(), sequence);
+	dev_dbg(dev, "index:%d, vbuf timestamp:%lld, endl\n",
+		vb->index, vbuf->vb2_buf.timestamp);
+}
+
+void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib)
+{
+	struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+	if (atomic_read(&ib->str2mmio_flag)) {
+		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+		/*
+		 * Operation on buffer is ended with error and will be reported
+		 * to the userspace when it is de-queued
+		 */
+		atomic_set(&ib->str2mmio_flag, 0);
+	} else {
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	}
+}
+
+void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip,
+			      struct ipu_fw_isys_resp_info_abi *info)
+{
+	struct ipu_isys *isys =
+	    container_of(ip, struct ipu_isys_video, ip)->isys;
+	struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq;
+	struct ipu_isys_buffer *ib;
+	struct vb2_buffer *vb;
+	unsigned long flags;
+	bool first = true;
+	struct vb2_v4l2_buffer *buf;
+
+	dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n",
+		ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr);
+
+	spin_lock_irqsave(&aq->lock, flags);
+	if (list_empty(&aq->active)) {
+		spin_unlock_irqrestore(&aq->lock, flags);
+		dev_err(&isys->adev->dev, "active queue empty\n");
+		return;
+	}
+
+	list_for_each_entry_reverse(ib, &aq->active, head) {
+		dma_addr_t addr;
+
+		vb = ipu_isys_buffer_to_vb2_buffer(ib);
+		addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+		if (info->pin.addr != addr) {
+			if (first)
+				dev_err(&isys->adev->dev,
+					"WARN: buffer address %pad expected!\n",
+					&addr);
+			first = false;
+			continue;
+		}
+
+		if (info->error_info.error ==
+		    IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+			/*
+			 * Check for error message:
+			 * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
+			 */
+			atomic_set(&ib->str2mmio_flag, 1);
+		}
+		dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr);
+
+		buf = to_vb2_v4l2_buffer(vb);
+		buf->field = V4L2_FIELD_NONE;
+
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		ipu_isys_buf_calc_sequence_time(ib, info);
+
+		/*
+		 * For interlaced buffers, the notification to user space
+		 * is postponed to capture_done event since the field
+		 * information is available only at that time.
+		 */
+		if (ip->interlaced) {
+			spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+			list_add(&ib->head, &ip->pending_interlaced_bufs);
+			spin_unlock_irqrestore(&ip->short_packet_queue_lock,
+					       flags);
+		} else {
+			ipu_isys_queue_buf_done(ib);
+		}
+
+		return;
+	}
+
+	dev_err(&isys->adev->dev,
+		"WARNING: cannot find a matching video buffer!\n");
+
+	spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+void
+ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip,
+				  struct ipu_fw_isys_resp_info_abi *info)
+{
+	struct ipu_isys *isys =
+	    container_of(ip, struct ipu_isys_video, ip)->isys;
+	unsigned long flags;
+
+	dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n",
+		info->pin.addr);
+	spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+	ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp);
+	spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+}
+
+struct vb2_ops ipu_isys_queue_ops = {
+	.queue_setup = queue_setup,
+	.wait_prepare = ipu_isys_queue_unlock,
+	.wait_finish = ipu_isys_queue_lock,
+	.buf_init = buf_init,
+	.buf_prepare = buf_prepare,
+	.buf_finish = buf_finish,
+	.buf_cleanup = buf_cleanup,
+	.start_streaming = start_streaming,
+	.stop_streaming = stop_streaming,
+	.buf_queue = buf_queue,
+};
+
+int ipu_isys_queue_init(struct ipu_isys_queue *aq)
+{
+	struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys;
+	int rval;
+
+	if (!aq->vbq.io_modes)
+		aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+	aq->vbq.drv_priv = aq;
+	aq->vbq.ops = &ipu_isys_queue_ops;
+	aq->vbq.mem_ops = &vb2_dma_contig_memops;
+	aq->vbq.timestamp_flags = (wall_clock_ts_on) ?
+	    V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+	rval = vb2_queue_init(&aq->vbq);
+	if (rval)
+		return rval;
+
+	aq->dev = &isys->adev->dev;
+	aq->vbq.dev = &isys->adev->dev;
+	spin_lock_init(&aq->lock);
+	INIT_LIST_HEAD(&aq->active);
+	INIT_LIST_HEAD(&aq->incoming);
+
+	return 0;
+}
+
+void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq)
+{
+	vb2_queue_release(&aq->vbq);
+}
diff --git a/drivers/media/pci/intel/ipu-isys-queue.h b/drivers/media/pci/intel/ipu-isys-queue.h
new file mode 100644
index 000000000000..99be2989a088
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-queue.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_QUEUE_H
+#define IPU_ISYS_QUEUE_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu-isys-media.h"
+
+struct ipu_isys_video;
+struct ipu_isys_pipeline;
+struct ipu_fw_isys_resp_info_abi;
+struct ipu_fw_isys_frame_buff_set_abi;
+
+enum ipu_isys_buffer_type {
+	IPU_ISYS_VIDEO_BUFFER,
+	IPU_ISYS_SHORT_PACKET_BUFFER,
+};
+
+struct ipu_isys_queue {
+	struct list_head node;	/* struct ipu_isys_pipeline.queues */
+	struct vb2_queue vbq;
+	struct device *dev;
+	/*
+	 * @lock: serialise access to queued and pre_streamon_queued
+	 */
+	spinlock_t lock;
+	struct list_head active;
+	struct list_head incoming;
+	u32 css_pin_type;
+	unsigned int fw_output;
+	int (*buf_init)(struct vb2_buffer *vb);
+	void (*buf_cleanup)(struct vb2_buffer *vb);
+	int (*buf_prepare)(struct vb2_buffer *vb);
+	void (*prepare_frame_buff_set)(struct vb2_buffer *vb);
+	void (*fill_frame_buff_set_pin)(struct vb2_buffer *vb,
+					struct ipu_fw_isys_frame_buff_set_abi *
+					set);
+	int (*link_fmt_validate)(struct ipu_isys_queue *aq);
+};
+
+struct ipu_isys_buffer {
+	struct list_head head;
+	enum ipu_isys_buffer_type type;
+	struct list_head req_head;
+	struct media_device_request *req;
+	atomic_t str2mmio_flag;
+};
+
+struct ipu_isys_video_buffer {
+	struct vb2_v4l2_buffer vb_v4l2;
+	struct ipu_isys_buffer ib;
+};
+
+struct ipu_isys_private_buffer {
+	struct ipu_isys_buffer ib;
+	struct ipu_isys_pipeline *ip;
+	unsigned int index;
+	unsigned int bytesused;
+	dma_addr_t dma_addr;
+	void *buffer;
+};
+
+#define IPU_ISYS_BUFFER_LIST_FL_INCOMING	BIT(0)
+#define IPU_ISYS_BUFFER_LIST_FL_ACTIVE	BIT(1)
+#define IPU_ISYS_BUFFER_LIST_FL_SET_STATE	BIT(2)
+
+struct ipu_isys_buffer_list {
+	struct list_head head;
+	unsigned int nbufs;
+};
+
+#define vb2_queue_to_ipu_isys_queue(__vb2) \
+	container_of(__vb2, struct ipu_isys_queue, vbq)
+
+#define ipu_isys_to_isys_video_buffer(__ib) \
+	container_of(__ib, struct ipu_isys_video_buffer, ib)
+
+#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \
+	container_of(to_vb2_v4l2_buffer(__vb), \
+	struct ipu_isys_video_buffer, vb_v4l2)
+
+#define ipu_isys_buffer_to_vb2_buffer(__ib) \
+	(&ipu_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf)
+
+#define vb2_buffer_to_ipu_isys_buffer(__vb) \
+	(&vb2_buffer_to_ipu_isys_video_buffer(__vb)->ib)
+
+#define ipu_isys_buffer_to_private_buffer(__ib) \
+	container_of(__ib, struct ipu_isys_private_buffer, ib)
+
+struct ipu_isys_request {
+	struct media_device_request req;
+	/* serialise access to buffers */
+	spinlock_t lock;
+	struct list_head buffers;	/* struct ipu_isys_buffer.head */
+	bool dispatched;
+	/*
+	 * struct ipu_isys.requests;
+	 * struct ipu_isys_pipeline.struct.*
+	 */
+	struct list_head head;
+};
+
+#define to_ipu_isys_request(__req) \
+	container_of(__req, struct ipu_isys_request, req)
+
+int ipu_isys_buf_prepare(struct vb2_buffer *vb);
+
+void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl,
+				unsigned long op_flags,
+				enum vb2_buffer_state state);
+struct ipu_isys_request *
+ipu_isys_next_queued_request(struct ipu_isys_pipeline *ip);
+void
+ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb,
+				     struct ipu_fw_isys_frame_buff_set_abi *
+				     set);
+void
+ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set,
+				 struct ipu_isys_pipeline *ip,
+				 struct ipu_isys_buffer_list *bl);
+int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq);
+
+void
+ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib,
+				struct ipu_fw_isys_resp_info_abi *info);
+void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib);
+void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip,
+			      struct ipu_fw_isys_resp_info_abi *info);
+void
+ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip,
+				  struct ipu_fw_isys_resp_info_abi *inf);
+
+int ipu_isys_queue_init(struct ipu_isys_queue *aq);
+void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq);
+
+#endif /* IPU_ISYS_QUEUE_H */
diff --git a/drivers/media/pci/intel/ipu-isys-subdev.c b/drivers/media/pci/intel/ipu-isys-subdev.c
new file mode 100644
index 000000000000..71a5fa592d44
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-subdev.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+
+#include <uapi/linux/media-bus-format.h>
+
+#include "ipu-isys.h"
+#include "ipu-isys-video.h"
+#include "ipu-isys-subdev.h"
+
+unsigned int ipu_isys_mbus_code_to_bpp(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		return 24;
+	case MEDIA_BUS_FMT_YUYV10_1X20:
+		return 20;
+	case MEDIA_BUS_FMT_Y10_1X10:
+	case MEDIA_BUS_FMT_RGB565_1X16:
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+	case MEDIA_BUS_FMT_YUYV8_1X16:
+		return 16;
+	case MEDIA_BUS_FMT_SBGGR12_1X12:
+	case MEDIA_BUS_FMT_SGBRG12_1X12:
+	case MEDIA_BUS_FMT_SGRBG12_1X12:
+	case MEDIA_BUS_FMT_SRGGB12_1X12:
+		return 12;
+	case MEDIA_BUS_FMT_SBGGR10_1X10:
+	case MEDIA_BUS_FMT_SGBRG10_1X10:
+	case MEDIA_BUS_FMT_SGRBG10_1X10:
+	case MEDIA_BUS_FMT_SRGGB10_1X10:
+		return 10;
+	case MEDIA_BUS_FMT_SBGGR8_1X8:
+	case MEDIA_BUS_FMT_SGBRG8_1X8:
+	case MEDIA_BUS_FMT_SGRBG8_1X8:
+	case MEDIA_BUS_FMT_SRGGB8_1X8:
+	case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+		return 8;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
+unsigned int ipu_isys_mbus_code_to_mipi(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_RGB565_1X16:
+		return IPU_ISYS_MIPI_CSI2_TYPE_RGB565;
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		return IPU_ISYS_MIPI_CSI2_TYPE_RGB888;
+	case MEDIA_BUS_FMT_YUYV10_1X20:
+		return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10;
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+	case MEDIA_BUS_FMT_YUYV8_1X16:
+		return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8;
+	case MEDIA_BUS_FMT_SBGGR12_1X12:
+	case MEDIA_BUS_FMT_SGBRG12_1X12:
+	case MEDIA_BUS_FMT_SGRBG12_1X12:
+	case MEDIA_BUS_FMT_SRGGB12_1X12:
+		return IPU_ISYS_MIPI_CSI2_TYPE_RAW12;
+	case MEDIA_BUS_FMT_Y10_1X10:
+	case MEDIA_BUS_FMT_SBGGR10_1X10:
+	case MEDIA_BUS_FMT_SGBRG10_1X10:
+	case MEDIA_BUS_FMT_SGRBG10_1X10:
+	case MEDIA_BUS_FMT_SRGGB10_1X10:
+		return IPU_ISYS_MIPI_CSI2_TYPE_RAW10;
+	case MEDIA_BUS_FMT_SBGGR8_1X8:
+	case MEDIA_BUS_FMT_SGBRG8_1X8:
+	case MEDIA_BUS_FMT_SGRBG8_1X8:
+	case MEDIA_BUS_FMT_SRGGB8_1X8:
+		return IPU_ISYS_MIPI_CSI2_TYPE_RAW8;
+	case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+		return IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1);
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
+enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_SBGGR12_1X12:
+	case MEDIA_BUS_FMT_SBGGR10_1X10:
+	case MEDIA_BUS_FMT_SBGGR8_1X8:
+	case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+		return IPU_ISYS_SUBDEV_PIXELORDER_BGGR;
+	case MEDIA_BUS_FMT_SGBRG12_1X12:
+	case MEDIA_BUS_FMT_SGBRG10_1X10:
+	case MEDIA_BUS_FMT_SGBRG8_1X8:
+	case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+		return IPU_ISYS_SUBDEV_PIXELORDER_GBRG;
+	case MEDIA_BUS_FMT_SGRBG12_1X12:
+	case MEDIA_BUS_FMT_SGRBG10_1X10:
+	case MEDIA_BUS_FMT_SGRBG8_1X8:
+	case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+		return IPU_ISYS_SUBDEV_PIXELORDER_GRBG;
+	case MEDIA_BUS_FMT_SRGGB12_1X12:
+	case MEDIA_BUS_FMT_SRGGB10_1X10:
+	case MEDIA_BUS_FMT_SRGGB8_1X8:
+	case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+		return IPU_ISYS_SUBDEV_PIXELORDER_RGGB;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
+u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code)
+{
+	switch (sink_code) {
+	case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+		return MEDIA_BUS_FMT_SBGGR10_1X10;
+	case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+		return MEDIA_BUS_FMT_SGBRG10_1X10;
+	case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+		return MEDIA_BUS_FMT_SGRBG10_1X10;
+	case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+		return MEDIA_BUS_FMT_SRGGB10_1X10;
+	default:
+		return sink_code;
+	}
+}
+
+struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd,
+					       struct v4l2_subdev_pad_config
+					       *cfg,
+					       unsigned int pad,
+					       unsigned int which)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+	if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+		return &asd->ffmt[pad];
+	else
+		return v4l2_subdev_get_try_format(sd, cfg, pad);
+}
+
+struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd,
+					   struct v4l2_subdev_pad_config *cfg,
+					   unsigned int target,
+					   unsigned int pad, unsigned int which)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+	if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		switch (target) {
+		case V4L2_SEL_TGT_CROP:
+			return &asd->crop[pad];
+		case V4L2_SEL_TGT_COMPOSE:
+			return &asd->compose[pad];
+		}
+	} else {
+		switch (target) {
+		case V4L2_SEL_TGT_CROP:
+			return v4l2_subdev_get_try_crop(sd, cfg, pad);
+		case V4L2_SEL_TGT_COMPOSE:
+			return v4l2_subdev_get_try_compose(sd, cfg, pad);
+		}
+	}
+	WARN_ON(1);
+	return NULL;
+}
+
+static int target_valid(struct v4l2_subdev *sd, unsigned int target,
+			unsigned int pad)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+	switch (target) {
+	case V4L2_SEL_TGT_CROP:
+		return asd->valid_tgts[pad].crop;
+	case V4L2_SEL_TGT_COMPOSE:
+		return asd->valid_tgts[pad].compose;
+	default:
+		return 0;
+	}
+}
+
+int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg,
+				  struct v4l2_mbus_framefmt *ffmt,
+				  struct v4l2_rect *r,
+				  enum isys_subdev_prop_tgt tgt,
+				  unsigned int pad, unsigned int which)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	struct v4l2_mbus_framefmt **ffmts = NULL;
+	struct v4l2_rect **crops = NULL;
+	struct v4l2_rect **compose = NULL;
+	unsigned int i;
+	int rval = 0;
+
+	if (tgt == IPU_ISYS_SUBDEV_PROP_TGT_NR_OF)
+		return 0;
+
+	if (WARN_ON(pad >= sd->entity.num_pads))
+		return -EINVAL;
+
+	ffmts = kcalloc(sd->entity.num_pads,
+			sizeof(*ffmts), GFP_KERNEL);
+	if (!ffmts) {
+		rval = -ENOMEM;
+		goto out_subdev_fmt_propagate;
+	}
+	crops = kcalloc(sd->entity.num_pads,
+			sizeof(*crops), GFP_KERNEL);
+	if (!crops) {
+		rval = -ENOMEM;
+		goto out_subdev_fmt_propagate;
+	}
+	compose = kcalloc(sd->entity.num_pads,
+			  sizeof(*compose), GFP_KERNEL);
+	if (!compose) {
+		rval = -ENOMEM;
+		goto out_subdev_fmt_propagate;
+	}
+
+	for (i = 0; i < sd->entity.num_pads; i++) {
+		ffmts[i] = __ipu_isys_get_ffmt(sd, cfg, i, which);
+		crops[i] = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP,
+						    i, which);
+		compose[i] = __ipu_isys_get_selection(sd, cfg,
+						      V4L2_SEL_TGT_COMPOSE,
+						      i, which);
+	}
+
+	switch (tgt) {
+	case IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT:
+		crops[pad]->left = 0;
+		crops[pad]->top = 0;
+		crops[pad]->width = ffmt->width;
+		crops[pad]->height = ffmt->height;
+		rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, crops[pad],
+						     tgt + 1, pad, which);
+		goto out_subdev_fmt_propagate;
+	case IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP:
+		if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+			goto out_subdev_fmt_propagate;
+
+		compose[pad]->left = 0;
+		compose[pad]->top = 0;
+		compose[pad]->width = r->width;
+		compose[pad]->height = r->height;
+		rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt,
+						     compose[pad], tgt + 1,
+						     pad, which);
+		goto out_subdev_fmt_propagate;
+	case IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE:
+		if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) {
+			rval = -EINVAL;
+			goto out_subdev_fmt_propagate;
+		}
+
+		for (i = 1; i < sd->entity.num_pads; i++) {
+			if (!(sd->entity.pads[i].flags &
+					MEDIA_PAD_FL_SOURCE))
+				continue;
+
+			compose[i]->left = 0;
+			compose[i]->top = 0;
+			compose[i]->width = r->width;
+			compose[i]->height = r->height;
+			rval = ipu_isys_subdev_fmt_propagate(sd, cfg,
+							     ffmt,
+							     compose[i],
+							     tgt + 1, i,
+							     which);
+			if (rval)
+				goto out_subdev_fmt_propagate;
+		}
+		goto out_subdev_fmt_propagate;
+	case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE:
+		if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)) {
+			rval = -EINVAL;
+			goto out_subdev_fmt_propagate;
+		}
+
+		crops[pad]->left = 0;
+		crops[pad]->top = 0;
+		crops[pad]->width = r->width;
+		crops[pad]->height = r->height;
+		rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt,
+						     crops[pad], tgt + 1,
+						     pad, which);
+		goto out_subdev_fmt_propagate;
+	case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{
+			struct v4l2_subdev_format fmt = {
+				.which = which,
+				.pad = pad,
+				.format = {
+					.width = r->width,
+					.height = r->height,
+					/*
+					 * Either use the code from sink pad
+					 * or the current one.
+					 */
+					.code = ffmt ? ffmt->code :
+						       ffmts[pad]->code,
+					.field = ffmt ? ffmt->field :
+							ffmts[pad]->field,
+				},
+			};
+
+			asd->set_ffmt(sd, cfg, &fmt);
+			goto out_subdev_fmt_propagate;
+		}
+	}
+
+out_subdev_fmt_propagate:
+	kfree(ffmts);
+	kfree(crops);
+	kfree(compose);
+	return rval;
+}
+
+int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_subdev_format *fmt)
+{
+	struct v4l2_mbus_framefmt *ffmt =
+		__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which);
+
+	/* No propagation for non-zero pads. */
+	if (fmt->pad) {
+		struct v4l2_mbus_framefmt *sink_ffmt =
+			__ipu_isys_get_ffmt(sd, cfg, 0, fmt->which);
+
+		ffmt->width = sink_ffmt->width;
+		ffmt->height = sink_ffmt->height;
+		ffmt->code = sink_ffmt->code;
+		ffmt->field = sink_ffmt->field;
+
+		return 0;
+	}
+
+	ffmt->width = fmt->format.width;
+	ffmt->height = fmt->format.height;
+	ffmt->code = fmt->format.code;
+	ffmt->field = fmt->format.field;
+
+	return ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL,
+					     IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+					     fmt->pad, fmt->which);
+}
+
+int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_format *fmt)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	struct v4l2_mbus_framefmt *ffmt =
+		__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which);
+	u32 code = asd->supported_codes[fmt->pad][0];
+	unsigned int i;
+
+	WARN_ON(!mutex_is_locked(&asd->mutex));
+
+	fmt->format.width = clamp(fmt->format.width, IPU_ISYS_MIN_WIDTH,
+				  IPU_ISYS_MAX_WIDTH);
+	fmt->format.height = clamp(fmt->format.height,
+				   IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT);
+
+	for (i = 0; asd->supported_codes[fmt->pad][i]; i++) {
+		if (asd->supported_codes[fmt->pad][i] == fmt->format.code) {
+			code = asd->supported_codes[fmt->pad][i];
+			break;
+		}
+	}
+
+	fmt->format.code = code;
+
+	asd->set_ffmt(sd, cfg, fmt);
+
+	fmt->format = *ffmt;
+
+	return 0;
+}
+
+int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	int rval;
+
+	mutex_lock(&asd->mutex);
+	rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt);
+	mutex_unlock(&asd->mutex);
+
+	return rval;
+}
+
+int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+	mutex_lock(&asd->mutex);
+	fmt->format = *__ipu_isys_get_ffmt(sd, cfg, fmt->pad,
+					   fmt->which);
+	mutex_unlock(&asd->mutex);
+
+	return 0;
+}
+
+int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd,
+			    struct v4l2_subdev_pad_config *cfg,
+			    struct v4l2_subdev_selection *sel)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+	struct v4l2_rect *r, __r = { 0 };
+	unsigned int tgt;
+
+	if (!target_valid(sd, sel->target, sel->pad))
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		if (pad->flags & MEDIA_PAD_FL_SINK) {
+			struct v4l2_mbus_framefmt *ffmt =
+				__ipu_isys_get_ffmt(sd, cfg, sel->pad,
+						    sel->which);
+
+			__r.width = ffmt->width;
+			__r.height = ffmt->height;
+			r = &__r;
+			tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP;
+		} else {
+			/* 0 is the sink pad. */
+			r = __ipu_isys_get_selection(sd, cfg, sel->target, 0,
+						     sel->which);
+			tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP;
+		}
+
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		if (pad->flags & MEDIA_PAD_FL_SINK) {
+			r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP,
+						     sel->pad, sel->which);
+			tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE;
+		} else {
+			r = __ipu_isys_get_selection(sd, cfg,
+						     V4L2_SEL_TGT_COMPOSE, 0,
+						     sel->which);
+			tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width);
+	sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height);
+	*__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad,
+				  sel->which) = sel->r;
+	return ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, tgt,
+					     sel->pad, sel->which);
+}
+
+int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd,
+			    struct v4l2_subdev_pad_config *cfg,
+			    struct v4l2_subdev_selection *sel)
+{
+	if (!target_valid(sd, sel->target, sel->pad))
+		return -EINVAL;
+
+	sel->r = *__ipu_isys_get_selection(sd, cfg, sel->target,
+					   sel->pad, sel->which);
+
+	return 0;
+}
+
+int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	const u32 *supported_codes = asd->supported_codes[code->pad];
+	u32 index;
+
+	for (index = 0; supported_codes[index]; index++) {
+		if (index == code->index) {
+			code->code = supported_codes[index];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Besides validating the link, figure out the external pad and the
+ * ISYS FW ABI source.
+ */
+int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd,
+				  struct media_link *link,
+				  struct v4l2_subdev_format *source_fmt,
+				  struct v4l2_subdev_format *sink_fmt)
+{
+	struct v4l2_subdev *source_sd =
+	    media_entity_to_v4l2_subdev(link->source->entity);
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+	if (!source_sd)
+		return -ENODEV;
+	if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX,
+		    strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) {
+		/*
+		 * source_sd isn't ours --- sd must be the external
+		 * sub-device.
+		 */
+		ip->external = link->source;
+		ip->source = to_ipu_isys_subdev(sd)->source;
+		dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n",
+			sd->entity.name, ip->source);
+	} else if (source_sd->entity.num_pads == 1) {
+		/* All internal sources have a single pad. */
+		ip->external = link->source;
+		ip->source = to_ipu_isys_subdev(source_sd)->source;
+
+		dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n",
+			sd->entity.name, ip->source);
+	}
+
+	if (asd->isl_mode != IPU_ISL_OFF)
+		ip->isl_mode = asd->isl_mode;
+
+	return v4l2_subdev_link_validate_default(sd, link, source_fmt,
+						 sink_fmt);
+}
+
+int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+	unsigned int i;
+
+	mutex_lock(&asd->mutex);
+
+	for (i = 0; i < asd->sd.entity.num_pads; i++) {
+		struct v4l2_mbus_framefmt *try_fmt =
+			v4l2_subdev_get_try_format(sd, fh->pad, i);
+		struct v4l2_rect *try_crop =
+			v4l2_subdev_get_try_crop(sd, fh->pad, i);
+		struct v4l2_rect *try_compose =
+			v4l2_subdev_get_try_compose(sd, fh->pad, i);
+
+		*try_fmt = asd->ffmt[i];
+		*try_crop = asd->crop[i];
+		*try_compose = asd->compose[i];
+	}
+
+	mutex_unlock(&asd->mutex);
+
+	return 0;
+}
+
+int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	return 0;
+}
+
+int ipu_isys_subdev_init(struct ipu_isys_subdev *asd,
+			 struct v4l2_subdev_ops *ops,
+			 unsigned int nr_ctrls,
+			 unsigned int num_pads,
+			 unsigned int num_source,
+			 unsigned int num_sink,
+			 unsigned int sd_flags)
+{
+	int rval = -EINVAL;
+
+	mutex_init(&asd->mutex);
+
+	v4l2_subdev_init(&asd->sd, ops);
+
+	asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | sd_flags;
+	asd->sd.owner = THIS_MODULE;
+	asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+	asd->nsources = num_source;
+	asd->nsinks = num_sink;
+
+	asd->pad = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+				sizeof(*asd->pad), GFP_KERNEL);
+
+	asd->ffmt = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+				 sizeof(*asd->ffmt), GFP_KERNEL);
+
+	asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+				 sizeof(*asd->crop), GFP_KERNEL);
+
+	asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+				    sizeof(*asd->compose), GFP_KERNEL);
+
+	asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+				       sizeof(*asd->valid_tgts), GFP_KERNEL);
+	if (!asd->pad || !asd->ffmt || !asd->crop || !asd->compose ||
+	    !asd->valid_tgts)
+		return -ENOMEM;
+
+	rval = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad);
+	if (rval)
+		goto out_mutex_destroy;
+
+	if (asd->ctrl_init) {
+		rval = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls);
+		if (rval)
+			goto out_media_entity_cleanup;
+
+		asd->ctrl_init(&asd->sd);
+		if (asd->ctrl_handler.error) {
+			rval = asd->ctrl_handler.error;
+			goto out_v4l2_ctrl_handler_free;
+		}
+
+		asd->sd.ctrl_handler = &asd->ctrl_handler;
+	}
+
+	asd->source = -1;
+
+	return 0;
+
+out_v4l2_ctrl_handler_free:
+	v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+out_media_entity_cleanup:
+	media_entity_cleanup(&asd->sd.entity);
+
+out_mutex_destroy:
+	mutex_destroy(&asd->mutex);
+
+	return rval;
+}
+
+void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd)
+{
+	media_entity_cleanup(&asd->sd.entity);
+	v4l2_ctrl_handler_free(&asd->ctrl_handler);
+	mutex_destroy(&asd->mutex);
+}
diff --git a/drivers/media/pci/intel/ipu-isys-subdev.h b/drivers/media/pci/intel/ipu-isys-subdev.h
new file mode 100644
index 000000000000..35eb9d1d3cb7
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-subdev.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_SUBDEV_H
+#define IPU_ISYS_SUBDEV_H
+
+#include <linux/mutex.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "ipu-isys-queue.h"
+
+#define IPU_ISYS_MIPI_CSI2_TYPE_NULL	0x10
+#define IPU_ISYS_MIPI_CSI2_TYPE_BLANKING	0x11
+#define IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8	0x12
+#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8	0x1e
+#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10	0x1f
+#define IPU_ISYS_MIPI_CSI2_TYPE_RGB565	0x22
+#define IPU_ISYS_MIPI_CSI2_TYPE_RGB888	0x24
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW6	0x28
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW7	0x29
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW8	0x2a
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW10	0x2b
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW12	0x2c
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW14	0x2d
+/* 1-8 */
+#define IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(i)	(0x30 + (i) - 1)
+
+#define FMT_ENTRY (struct ipu_isys_fmt_entry [])
+
+enum isys_subdev_prop_tgt {
+	IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+	IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP,
+	IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE,
+	IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE,
+	IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP,
+};
+
+#define	IPU_ISYS_SUBDEV_PROP_TGT_NR_OF \
+	(IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1)
+
+enum ipu_isl_mode {
+	IPU_ISL_OFF = 0,	/* SOC BE */
+	IPU_ISL_CSI2_BE,	/* RAW BE */
+};
+
+enum ipu_be_mode {
+	IPU_BE_RAW = 0,
+	IPU_BE_SOC
+};
+
+enum ipu_isys_subdev_pixelorder {
+	IPU_ISYS_SUBDEV_PIXELORDER_BGGR = 0,
+	IPU_ISYS_SUBDEV_PIXELORDER_GBRG,
+	IPU_ISYS_SUBDEV_PIXELORDER_GRBG,
+	IPU_ISYS_SUBDEV_PIXELORDER_RGGB,
+};
+
+struct ipu_isys;
+
+struct ipu_isys_subdev {
+	/* Serialise access to any other field in the struct */
+	struct mutex mutex;
+	struct v4l2_subdev sd;
+	struct ipu_isys *isys;
+	u32 const *const *supported_codes;
+	struct media_pad *pad;
+	struct v4l2_mbus_framefmt *ffmt;
+	struct v4l2_rect *crop;
+	struct v4l2_rect *compose;
+	unsigned int nsinks;
+	unsigned int nsources;
+	struct v4l2_ctrl_handler ctrl_handler;
+	void (*ctrl_init)(struct v4l2_subdev *sd);
+	void (*set_ffmt)(struct v4l2_subdev *sd,
+			 struct v4l2_subdev_pad_config *cfg,
+			 struct v4l2_subdev_format *fmt);
+	struct {
+		bool crop;
+		bool compose;
+	} *valid_tgts;
+	enum ipu_isl_mode isl_mode;
+	enum ipu_be_mode be_mode;
+	int source;	/* SSI stream source; -1 if unset */
+};
+
+#define to_ipu_isys_subdev(__sd) \
+	container_of(__sd, struct ipu_isys_subdev, sd)
+
+struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd,
+					       struct v4l2_subdev_pad_config
+					       *cfg,
+					       unsigned int pad,
+					       unsigned int which);
+
+unsigned int ipu_isys_mbus_code_to_bpp(u32 code);
+unsigned int ipu_isys_mbus_code_to_mipi(u32 code);
+u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code);
+
+enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code);
+
+int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg,
+				  struct v4l2_mbus_framefmt *ffmt,
+				  struct v4l2_rect *r,
+				  enum isys_subdev_prop_tgt tgt,
+				  unsigned int pad, unsigned int which);
+
+int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_subdev_format *fmt);
+int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_format *fmt);
+struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd,
+					   struct v4l2_subdev_pad_config *cfg,
+					   unsigned int target,
+					   unsigned int pad,
+					   unsigned int which);
+int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt);
+int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_format *fmt);
+int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd,
+			    struct v4l2_subdev_pad_config *cfg,
+			    struct v4l2_subdev_selection *sel);
+int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd,
+			    struct v4l2_subdev_pad_config *cfg,
+			    struct v4l2_subdev_selection *sel);
+int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_subdev_mbus_code_enum
+				   *code);
+int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd,
+				  struct media_link *link,
+				  struct v4l2_subdev_format *source_fmt,
+				  struct v4l2_subdev_format *sink_fmt);
+
+int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int ipu_isys_subdev_init(struct ipu_isys_subdev *asd,
+			 struct v4l2_subdev_ops *ops,
+			 unsigned int nr_ctrls,
+			 unsigned int num_pads,
+			 unsigned int num_source,
+			 unsigned int num_sink,
+			 unsigned int sd_flags);
+void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd);
+#endif /* IPU_ISYS_SUBDEV_H */
diff --git a/drivers/media/pci/intel/ipu-isys-tpg.c b/drivers/media/pci/intel/ipu-isys-tpg.c
new file mode 100644
index 000000000000..9c6855ff0cbc
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-tpg.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-tpg.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+static const u32 tpg_supported_codes_pad[] = {
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	0,
+};
+
+static const u32 *tpg_supported_codes[] = {
+	tpg_supported_codes_pad,
+};
+
+static struct v4l2_subdev_internal_ops tpg_sd_internal_ops = {
+	.open = ipu_isys_subdev_open,
+	.close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_video_ops tpg_sd_video_ops = {
+	.s_stream = tpg_set_stream,
+};
+
+static int ipu_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct ipu_isys_tpg *tpg = container_of(container_of(ctrl->handler,
+							     struct
+							     ipu_isys_subdev,
+							     ctrl_handler),
+						struct ipu_isys_tpg, asd);
+
+	switch (ctrl->id) {
+	case V4L2_CID_HBLANK:
+		writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC);
+		break;
+	case V4L2_CID_VBLANK:
+		writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC);
+		break;
+	case V4L2_CID_TEST_PATTERN:
+		writel(ctrl->val, tpg->base + MIPI_GEN_REG_TPG_MODE);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_isys_tpg_ctrl_ops = {
+	.s_ctrl = ipu_isys_tpg_s_ctrl,
+};
+
+static s64 ipu_isys_tpg_rate(struct ipu_isys_tpg *tpg, unsigned int bpp)
+{
+	return MIPI_GEN_PPC * IPU_ISYS_FREQ / bpp;
+}
+
+static const char *const tpg_mode_items[] = {
+	"Ramp",
+	"Checkerboard",	/* Does not work, disabled. */
+	"Frame Based Colour",
+};
+
+static struct v4l2_ctrl_config tpg_mode = {
+	.ops = &ipu_isys_tpg_ctrl_ops,
+	.id = V4L2_CID_TEST_PATTERN,
+	.name = "Test Pattern",
+	.type = V4L2_CTRL_TYPE_MENU,
+	.min = 0,
+	.max = ARRAY_SIZE(tpg_mode_items) - 1,
+	.def = 0,
+	.menu_skip_mask = 0x2,
+	.qmenu = tpg_mode_items,
+};
+
+static const struct v4l2_ctrl_config csi2_header_cfg = {
+	.id = V4L2_CID_IPU_STORE_CSI2_HEADER,
+	.name = "Store CSI-2 Headers",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+	.def = 1,
+};
+
+static void ipu_isys_tpg_init_controls(struct v4l2_subdev *sd)
+{
+	struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd);
+	int hblank;
+	u64 default_pixel_rate;
+
+	hblank = 1024;
+
+	tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+					&ipu_isys_tpg_ctrl_ops,
+					V4L2_CID_HBLANK, 8, 65535, 1, hblank);
+
+	tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+					&ipu_isys_tpg_ctrl_ops,
+					V4L2_CID_VBLANK, 8, 65535, 1, 1024);
+
+	default_pixel_rate = ipu_isys_tpg_rate(tpg, 8);
+	tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+					    &ipu_isys_tpg_ctrl_ops,
+					    V4L2_CID_PIXEL_RATE,
+					    default_pixel_rate,
+					    default_pixel_rate,
+					    1, default_pixel_rate);
+	if (tpg->pixel_rate) {
+		tpg->pixel_rate->cur.val = default_pixel_rate;
+		tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+	}
+
+	v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL);
+	tpg->store_csi2_header =
+		v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler,
+				     &csi2_header_cfg, NULL);
+}
+
+static void tpg_set_ffmt(struct v4l2_subdev *sd,
+			 struct v4l2_subdev_pad_config *cfg,
+			 struct v4l2_subdev_format *fmt)
+{
+	fmt->format.field = V4L2_FIELD_NONE;
+	*__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which) = fmt->format;
+}
+
+static int ipu_isys_tpg_set_ffmt(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_format *fmt)
+{
+	struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd);
+	__u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE].code;
+	unsigned int bpp = ipu_isys_mbus_code_to_bpp(code);
+	s64 tpg_rate = ipu_isys_tpg_rate(tpg, bpp);
+	int rval;
+
+	mutex_lock(&tpg->asd.mutex);
+	rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt);
+	mutex_unlock(&tpg->asd.mutex);
+
+	if (rval || fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return rval;
+
+	v4l2_ctrl_s_ctrl_int64(tpg->pixel_rate, tpg_rate);
+
+	return 0;
+}
+
+static const struct ipu_isys_pixelformat *
+ipu_isys_tpg_try_fmt(struct ipu_isys_video *av,
+		     struct v4l2_pix_format_mplane *mpix)
+{
+	struct media_link *link = list_first_entry(&av->vdev.entity.links,
+						   struct media_link, list);
+	struct v4l2_subdev *sd =
+		media_entity_to_v4l2_subdev(link->source->entity);
+	struct ipu_isys_tpg *tpg;
+
+	if (!sd)
+		return NULL;
+
+	tpg = to_ipu_isys_tpg(sd);
+
+	return ipu_isys_video_try_fmt_vid_mplane(av, mpix,
+		v4l2_ctrl_g_ctrl(tpg->store_csi2_header));
+}
+
+static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = {
+	.get_fmt = ipu_isys_subdev_get_ffmt,
+	.set_fmt = ipu_isys_tpg_set_ffmt,
+	.enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+			   struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+#ifdef IPU_TPG_FRAME_SYNC
+	case V4L2_EVENT_FRAME_SYNC:
+		return v4l2_event_subscribe(fh, sub, 10, NULL);
+#endif
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	default:
+		return -EINVAL;
+	}
+};
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops tpg_sd_core_ops = {
+	.subscribe_event = subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static struct v4l2_subdev_ops tpg_sd_ops = {
+	.core = &tpg_sd_core_ops,
+	.video = &tpg_sd_video_ops,
+	.pad = &tpg_sd_pad_ops,
+};
+
+static struct media_entity_operations tpg_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg)
+{
+	v4l2_device_unregister_subdev(&tpg->asd.sd);
+	ipu_isys_subdev_cleanup(&tpg->asd);
+	ipu_isys_video_cleanup(&tpg->av);
+}
+
+int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg,
+		      struct ipu_isys *isys,
+		      void __iomem *base, void __iomem *sel,
+		      unsigned int index)
+{
+	struct v4l2_subdev_format fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.pad = TPG_PAD_SOURCE,
+		.format = {
+			   .width = 4096,
+			   .height = 3072,
+			   },
+	};
+	int rval;
+
+	tpg->isys = isys;
+	tpg->base = base;
+	tpg->sel = sel;
+	tpg->index = index;
+
+	tpg->asd.sd.entity.ops = &tpg_entity_ops;
+	tpg->asd.ctrl_init = ipu_isys_tpg_init_controls;
+	tpg->asd.isys = isys;
+
+	rval = ipu_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5,
+				    NR_OF_TPG_PADS,
+				    NR_OF_TPG_SOURCE_PADS,
+				    NR_OF_TPG_SINK_PADS,
+				    V4L2_SUBDEV_FL_HAS_EVENTS);
+	if (rval)
+		return rval;
+
+	tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+	tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+	tpg->asd.source = IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index;
+	tpg->asd.supported_codes = tpg_supported_codes;
+	tpg->asd.set_ffmt = tpg_set_ffmt;
+	ipu_isys_subdev_set_ffmt(&tpg->asd.sd, NULL, &fmt);
+
+	tpg->asd.sd.internal_ops = &tpg_sd_internal_ops;
+	snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name),
+		 IPU_ISYS_ENTITY_PREFIX " TPG %u", index);
+	v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd);
+	rval = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+		goto fail;
+	}
+
+	snprintf(tpg->av.vdev.name, sizeof(tpg->av.vdev.name),
+		 IPU_ISYS_ENTITY_PREFIX " TPG %u capture", index);
+	tpg->av.isys = isys;
+	tpg->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI;
+	tpg->av.pfmts = ipu_isys_pfmts_packed;
+	tpg->av.try_fmt_vid_mplane = ipu_isys_tpg_try_fmt;
+	tpg->av.prepare_fw_stream =
+	    ipu_isys_prepare_fw_cfg_default;
+	tpg->av.packed = true;
+	tpg->av.line_header_length = IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE;
+	tpg->av.line_footer_length = IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE;
+	tpg->av.aq.buf_prepare = ipu_isys_buf_prepare;
+	tpg->av.aq.fill_frame_buff_set_pin =
+	    ipu_isys_buffer_to_fw_frame_buff_pin;
+	tpg->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+	tpg->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer);
+
+	rval = ipu_isys_video_init(&tpg->av, &tpg->asd.sd.entity,
+				   TPG_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0);
+	if (rval) {
+		dev_info(&isys->adev->dev, "can't init video node\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipu_isys_tpg_cleanup(tpg);
+
+	return rval;
+}
diff --git a/drivers/media/pci/intel/ipu-isys-tpg.h b/drivers/media/pci/intel/ipu-isys-tpg.h
new file mode 100644
index 000000000000..332f087ed774
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-tpg.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_TPG_H
+#define IPU_ISYS_TPG_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-isys-queue.h"
+
+struct ipu_isys_tpg_pdata;
+struct ipu_isys;
+
+#define TPG_PAD_SOURCE			0
+#define NR_OF_TPG_PADS			1
+#define NR_OF_TPG_SOURCE_PADS		1
+#define NR_OF_TPG_SINK_PADS		0
+#define NR_OF_TPG_STREAMS		1
+
+/*
+ * PPC is 4 pixels for clock for RAW8, RAW10 and RAW12.
+ * Source: FW validation test code.
+ */
+#define MIPI_GEN_PPC		4
+
+#define MIPI_GEN_REG_COM_ENABLE				0x0
+#define MIPI_GEN_REG_COM_DTYPE				0x4
+/* RAW8, RAW10 or RAW12 */
+#define MIPI_GEN_COM_DTYPE_RAW(n)			(((n) - 8) / 2)
+#define MIPI_GEN_REG_COM_VTYPE				0x8
+#define MIPI_GEN_REG_COM_VCHAN				0xc
+#define MIPI_GEN_REG_COM_WCOUNT				0x10
+#define MIPI_GEN_REG_PRBS_RSTVAL0			0x14
+#define MIPI_GEN_REG_PRBS_RSTVAL1			0x18
+#define MIPI_GEN_REG_SYNG_FREE_RUN			0x1c
+#define MIPI_GEN_REG_SYNG_PAUSE				0x20
+#define MIPI_GEN_REG_SYNG_NOF_FRAMES			0x24
+#define MIPI_GEN_REG_SYNG_NOF_PIXELS			0x28
+#define MIPI_GEN_REG_SYNG_NOF_LINES			0x2c
+#define MIPI_GEN_REG_SYNG_HBLANK_CYC			0x30
+#define MIPI_GEN_REG_SYNG_VBLANK_CYC			0x34
+#define MIPI_GEN_REG_SYNG_STAT_HCNT			0x38
+#define MIPI_GEN_REG_SYNG_STAT_VCNT			0x3c
+#define MIPI_GEN_REG_SYNG_STAT_FCNT			0x40
+#define MIPI_GEN_REG_SYNG_STAT_DONE			0x44
+#define MIPI_GEN_REG_TPG_MODE				0x48
+#define MIPI_GEN_REG_TPG_HCNT_MASK			0x4c
+#define MIPI_GEN_REG_TPG_VCNT_MASK			0x50
+#define MIPI_GEN_REG_TPG_XYCNT_MASK			0x54
+#define MIPI_GEN_REG_TPG_HCNT_DELTA			0x58
+#define MIPI_GEN_REG_TPG_VCNT_DELTA			0x5c
+#define MIPI_GEN_REG_TPG_R1				0x60
+#define MIPI_GEN_REG_TPG_G1				0x64
+#define MIPI_GEN_REG_TPG_B1				0x68
+#define MIPI_GEN_REG_TPG_R2				0x6c
+#define MIPI_GEN_REG_TPG_G2				0x70
+#define MIPI_GEN_REG_TPG_B2				0x74
+
+/*
+ * struct ipu_isys_tpg
+ *
+ * @nlanes: number of lanes in the receiver
+ */
+struct ipu_isys_tpg {
+	struct ipu_isys_tpg_pdata *pdata;
+	struct ipu_isys *isys;
+	struct ipu_isys_subdev asd;
+	struct ipu_isys_video av;
+
+	void __iomem *base;
+	void __iomem *sel;
+	unsigned int index;
+	int streaming;
+
+	struct v4l2_ctrl *hblank;
+	struct v4l2_ctrl *vblank;
+	struct v4l2_ctrl *pixel_rate;
+	struct v4l2_ctrl *store_csi2_header;
+};
+
+#define to_ipu_isys_tpg(sd)		\
+	container_of(to_ipu_isys_subdev(sd), \
+	struct ipu_isys_tpg, asd)
+#ifdef IPU_TPG_FRAME_SYNC
+void ipu_isys_tpg_sof_event(struct ipu_isys_tpg *tpg);
+void ipu_isys_tpg_eof_event(struct ipu_isys_tpg *tpg);
+#endif
+int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg,
+		      struct ipu_isys *isys,
+		      void __iomem *base, void __iomem *sel,
+		      unsigned int index);
+void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg);
+int tpg_set_stream(struct v4l2_subdev *sd, int enable);
+
+#endif /* IPU_ISYS_TPG_H */
diff --git a/drivers/media/pci/intel/ipu-isys-video.c b/drivers/media/pci/intel/ipu-isys-video.c
new file mode 100644
index 000000000000..709f1aa6dfad
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-video.c
@@ -0,0 +1,1698 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/init_task.h>
+#include <linux/kthread.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/compat.h>
+
+#include <uapi/linux/sched/types.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-cpd.h"
+#include "ipu-isys.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-trace.h"
+#include "ipu-fw-isys.h"
+#include "ipu-fw-com.h"
+
+/* use max resolution pixel rate by default */
+#define DEFAULT_PIXEL_RATE	(360000000ULL * 2 * 4 / 10)
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[] = {
+	{V4L2_PIX_FMT_Y10, 16, 10, 0, MEDIA_BUS_FMT_Y10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_UYVY},
+	{V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+	{V4L2_PIX_FMT_NV16, 16, 16, 8, MEDIA_BUS_FMT_UYVY8_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_NV16},
+	{V4L2_PIX_FMT_XRGB32, 32, 32, 0, MEDIA_BUS_FMT_RGB565_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+	{V4L2_PIX_FMT_XBGR32, 32, 32, 0, MEDIA_BUS_FMT_RGB888_1X24,
+	 IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+	/* Raw bayer formats. */
+	{V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{}
+};
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[] = {
+	{V4L2_PIX_FMT_Y10, 10, 10, 0, MEDIA_BUS_FMT_Y10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+#ifdef V4L2_PIX_FMT_Y210
+	{V4L2_PIX_FMT_Y210, 20, 20, 0, MEDIA_BUS_FMT_YUYV10_1X20,
+	 IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+#endif
+	{V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_UYVY},
+	{V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+	{V4L2_PIX_FMT_RGB565, 16, 16, 0, MEDIA_BUS_FMT_RGB565_1X16,
+	 IPU_FW_ISYS_FRAME_FORMAT_RGB565},
+	{V4L2_PIX_FMT_BGR24, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24,
+	 IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+#ifndef V4L2_PIX_FMT_SBGGR12P
+	{V4L2_PIX_FMT_SBGGR12, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGBRG12, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGRBG12, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SRGGB12, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+#else /* V4L2_PIX_FMT_SBGGR12P */
+	{V4L2_PIX_FMT_SBGGR12P, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGBRG12P, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGRBG12P, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SRGGB12P, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+#endif /* V4L2_PIX_FMT_SBGGR12P */
+	{V4L2_PIX_FMT_SBGGR10P, 10, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SGBRG10P, 10, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SGRBG10P, 10, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SRGGB10P, 10, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{}
+};
+
+static int video_open(struct file *file)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+	struct ipu_isys *isys = av->isys;
+	struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	int rval;
+	const struct ipu_isys_internal_pdata *ipdata;
+
+	mutex_lock(&isys->mutex);
+
+	if (isys->reset_needed || isp->flr_done) {
+		mutex_unlock(&isys->mutex);
+		dev_warn(&isys->adev->dev, "isys power cycle required\n");
+		return -EIO;
+	}
+	mutex_unlock(&isys->mutex);
+
+	rval = pm_runtime_get_sync(&isys->adev->dev);
+	if (rval < 0) {
+		pm_runtime_put_noidle(&isys->adev->dev);
+		return rval;
+	}
+
+	rval = v4l2_fh_open(file);
+	if (rval)
+		goto out_power_down;
+
+	rval = v4l2_pipeline_pm_use(&av->vdev.entity, 1);
+	if (rval)
+		goto out_v4l2_fh_release;
+
+	mutex_lock(&isys->mutex);
+
+	if (isys->video_opened++) {
+		/* Already open */
+		mutex_unlock(&isys->mutex);
+		return 0;
+	}
+
+	ipdata = isys->pdata->ipdata;
+	ipu_configure_spc(adev->isp,
+			  &ipdata->hw_variant,
+			  IPU_CPD_PKG_DIR_ISYS_SERVER_IDX,
+			  isys->pdata->base, isys->pkg_dir,
+			  isys->pkg_dir_dma_addr);
+
+	/*
+	 * Buffers could have been left to wrong queue at last closure.
+	 * Move them now back to empty buffer queue.
+	 */
+	ipu_cleanup_fw_msg_bufs(isys);
+
+	if (isys->fwcom) {
+		/*
+		 * Something went wrong in previous shutdown. As we are now
+		 * restarting isys we can safely delete old context.
+		 */
+		dev_err(&isys->adev->dev, "Clearing old context\n");
+		ipu_fw_isys_cleanup(isys);
+	}
+
+	rval = ipu_fw_isys_init(av->isys, ipdata->num_parallel_streams);
+	if (rval < 0)
+		goto out_lib_init;
+
+	mutex_unlock(&isys->mutex);
+
+	return 0;
+
+out_lib_init:
+	isys->video_opened--;
+	mutex_unlock(&isys->mutex);
+	v4l2_pipeline_pm_use(&av->vdev.entity, 0);
+
+out_v4l2_fh_release:
+	v4l2_fh_release(file);
+out_power_down:
+	pm_runtime_put(&isys->adev->dev);
+
+	return rval;
+}
+
+static int video_release(struct file *file)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+	int ret = 0;
+
+	vb2_fop_release(file);
+
+	mutex_lock(&av->isys->mutex);
+
+	if (!--av->isys->video_opened) {
+		ipu_fw_isys_close(av->isys);
+		if (av->isys->fwcom) {
+			av->isys->reset_needed = true;
+			ret = -EIO;
+		}
+	}
+
+	mutex_unlock(&av->isys->mutex);
+
+	v4l2_pipeline_pm_use(&av->vdev.entity, 0);
+
+	if (av->isys->reset_needed)
+		pm_runtime_put_sync(&av->isys->adev->dev);
+	else
+		pm_runtime_put(&av->isys->adev->dev);
+
+	return ret;
+}
+
+static struct media_pad *other_pad(struct media_pad *pad)
+{
+	struct media_link *link;
+
+	list_for_each_entry(link, &pad->entity->links, list) {
+		if ((link->flags & MEDIA_LNK_FL_LINK_TYPE)
+		    != MEDIA_LNK_FL_DATA_LINK)
+			continue;
+
+		return link->source == pad ? link->sink : link->source;
+	}
+
+	WARN_ON(1);
+	return NULL;
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat)
+{
+	struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]);
+	struct v4l2_subdev *sd;
+	const u32 *supported_codes;
+	const struct ipu_isys_pixelformat *pfmt;
+
+	if (!pad || !pad->entity) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	sd = media_entity_to_v4l2_subdev(pad->entity);
+	supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index];
+
+	for (pfmt = av->pfmts; pfmt->bpp; pfmt++) {
+		unsigned int i;
+
+		if (pfmt->pixelformat != pixelformat)
+			continue;
+
+		for (i = 0; supported_codes[i]; i++) {
+			if (pfmt->code == supported_codes[i])
+				return pfmt;
+		}
+	}
+
+	/* Not found. Get the default, i.e. the first defined one. */
+	for (pfmt = av->pfmts; pfmt->bpp; pfmt++) {
+		if (pfmt->code == *supported_codes)
+			return pfmt;
+	}
+
+	WARN_ON(1);
+	return NULL;
+}
+
+int ipu_isys_vidioc_querycap(struct file *file, void *fh,
+			     struct v4l2_capability *cap)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+
+	strlcpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, av->isys->media_dev.model, sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+		 av->isys->media_dev.bus_info);
+	return 0;
+}
+
+int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh,
+			     struct v4l2_fmtdesc *f)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+	struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]);
+	struct v4l2_subdev *sd;
+	const u32 *supported_codes;
+	const struct ipu_isys_pixelformat *pfmt;
+	u32 index;
+
+	if (!pad || !pad->entity)
+		return -EINVAL;
+	sd = media_entity_to_v4l2_subdev(pad->entity);
+	supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index];
+
+	/* Walk the 0-terminated array for the f->index-th code. */
+	for (index = f->index; *supported_codes && index;
+	     index--, supported_codes++) {
+	};
+
+	if (!*supported_codes)
+		return -EINVAL;
+
+	f->flags = 0;
+
+	/* Code found */
+	for (pfmt = av->pfmts; pfmt->bpp; pfmt++)
+		if (pfmt->code == *supported_codes)
+			break;
+
+	if (!pfmt->bpp) {
+		dev_warn(&av->isys->adev->dev,
+			 "Format not found in mapping table.");
+		return -EINVAL;
+	}
+
+	f->pixelformat = pfmt->pixelformat;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+				       struct v4l2_format *fmt)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+
+	fmt->fmt.pix_mp = av->mpix;
+
+	return 0;
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av,
+					  struct v4l2_pix_format_mplane *mpix)
+{
+	return ipu_isys_video_try_fmt_vid_mplane(av, mpix, 0);
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av,
+				  struct v4l2_pix_format_mplane *mpix,
+				  int store_csi2_header)
+{
+	const struct ipu_isys_pixelformat *pfmt =
+	    ipu_isys_get_pixelformat(av, mpix->pixelformat);
+
+	if (!pfmt)
+		return NULL;
+	mpix->pixelformat = pfmt->pixelformat;
+	mpix->num_planes = 1;
+
+	mpix->width = clamp(mpix->width, IPU_ISYS_MIN_WIDTH,
+			    IPU_ISYS_MAX_WIDTH);
+	mpix->height = clamp(mpix->height, IPU_ISYS_MIN_HEIGHT,
+			     IPU_ISYS_MAX_HEIGHT);
+
+	if (!av->packed)
+		mpix->plane_fmt[0].bytesperline =
+		    mpix->width * DIV_ROUND_UP(pfmt->bpp_planar ?
+					       pfmt->bpp_planar : pfmt->bpp,
+					       BITS_PER_BYTE);
+	else if (store_csi2_header)
+		mpix->plane_fmt[0].bytesperline =
+		    DIV_ROUND_UP(av->line_header_length +
+				 av->line_footer_length +
+				 (unsigned int)mpix->width * pfmt->bpp,
+				 BITS_PER_BYTE);
+	else
+		mpix->plane_fmt[0].bytesperline =
+		    DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp,
+				 BITS_PER_BYTE);
+
+	mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline,
+						av->isys->line_align);
+
+	if (pfmt->bpp_planar)
+		mpix->plane_fmt[0].bytesperline =
+		    mpix->plane_fmt[0].bytesperline *
+		    pfmt->bpp / pfmt->bpp_planar;
+	/*
+	 * (height + 1) * bytesperline due to a hardware issue: the DMA unit
+	 * is a power of two, and a line should be transferred as few units
+	 * as possible. The result is that up to line length more data than
+	 * the image size may be transferred to memory after the image.
+	 * Another limition is the GDA allocation unit size. For low
+	 * resolution it gives a bigger number. Use larger one to avoid
+	 * memory corruption.
+	 */
+	mpix->plane_fmt[0].sizeimage =
+	    max(max(mpix->plane_fmt[0].sizeimage,
+		    mpix->plane_fmt[0].bytesperline * mpix->height +
+		    max(mpix->plane_fmt[0].bytesperline,
+			av->isys->pdata->ipdata->isys_dma_overshoot)), 1U);
+
+	memset(mpix->plane_fmt[0].reserved, 0,
+	       sizeof(mpix->plane_fmt[0].reserved));
+
+	if (mpix->field == V4L2_FIELD_ANY)
+		mpix->field = V4L2_FIELD_NONE;
+	/* Use defaults */
+	mpix->colorspace = V4L2_COLORSPACE_RAW;
+	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
+	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+	return pfmt;
+}
+
+static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh,
+				       struct v4l2_format *f)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+
+	if (av->aq.vbq.streaming)
+		return -EBUSY;
+
+	av->pfmt = av->try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+	av->mpix = f->fmt.pix_mp;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+					 struct v4l2_format *f)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+
+	av->try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+
+	return 0;
+}
+
+static long ipu_isys_vidioc_private(struct file *file, void *fh,
+				    bool valid_prio, unsigned int cmd,
+				    void *arg)
+{
+	struct ipu_isys_video *av = video_drvdata(file);
+	int ret = 0;
+
+	switch (cmd) {
+	case VIDIOC_IPU_GET_DRIVER_VERSION:
+		*(u32 *)arg = IPU_DRIVER_VERSION;
+		break;
+
+	default:
+		dev_dbg(&av->isys->adev->dev, "unsupported private ioctl %x\n",
+			cmd);
+	}
+
+	return ret;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+			     struct v4l2_input *input)
+{
+	if (input->index > 0)
+		return -EINVAL;
+	strlcpy(input->name, "camera", sizeof(input->name));
+	input->type = V4L2_INPUT_TYPE_CAMERA;
+
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
+{
+	*input = 0;
+
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
+{
+	return input == 0 ? 0 : -EINVAL;
+}
+
+/*
+ * Return true if an entity directly connected to an Iunit entity is
+ * an image source for the ISP. This can be any external directly
+ * connected entity or any of the test pattern generators in the
+ * Iunit.
+ */
+static bool is_external(struct ipu_isys_video *av, struct media_entity *entity)
+{
+	struct v4l2_subdev *sd;
+	unsigned int i;
+
+	/* All video nodes are ours. */
+	if (!is_media_entity_v4l2_subdev(entity))
+		return false;
+
+	sd = media_entity_to_v4l2_subdev(entity);
+	if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+		    strlen(IPU_ISYS_ENTITY_PREFIX)) != 0)
+		return true;
+
+	for (i = 0; i < av->isys->pdata->ipdata->tpg.ntpgs &&
+	     av->isys->tpg[i].isys; i++)
+		if (entity == &av->isys->tpg[i].asd.sd.entity)
+			return true;
+
+	return false;
+}
+
+static int link_validate(struct media_link *link)
+{
+	struct ipu_isys_video *av =
+	    container_of(link->sink, struct ipu_isys_video, pad);
+	/* All sub-devices connected to a video node are ours. */
+	struct ipu_isys_pipeline *ip =
+		to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct v4l2_subdev *sd;
+
+	if (!link->source->entity)
+		return -EINVAL;
+	sd = media_entity_to_v4l2_subdev(link->source->entity);
+	if (is_external(av, link->source->entity)) {
+		ip->external = media_entity_remote_pad(av->vdev.entity.pads);
+		ip->source = to_ipu_isys_subdev(sd)->source;
+	}
+
+	ip->nr_queues++;
+
+	return 0;
+}
+
+static void get_stream_opened(struct ipu_isys_video *av)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->lock, flags);
+	av->isys->stream_opened++;
+	spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static void put_stream_opened(struct ipu_isys_video *av)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->lock, flags);
+	av->isys->stream_opened--;
+	spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static int get_stream_handle(struct ipu_isys_video *av)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	unsigned int stream_handle;
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->lock, flags);
+	for (stream_handle = 0;
+	     stream_handle < IPU_ISYS_MAX_STREAMS; stream_handle++)
+		if (!av->isys->pipes[stream_handle])
+			break;
+	if (stream_handle == IPU_ISYS_MAX_STREAMS) {
+		spin_unlock_irqrestore(&av->isys->lock, flags);
+		return -EBUSY;
+	}
+	av->isys->pipes[stream_handle] = ip;
+	ip->stream_handle = stream_handle;
+	spin_unlock_irqrestore(&av->isys->lock, flags);
+	return 0;
+}
+
+static void put_stream_handle(struct ipu_isys_video *av)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->lock, flags);
+	av->isys->pipes[ip->stream_handle] = NULL;
+	ip->stream_handle = -1;
+	spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static int get_external_facing_format(struct ipu_isys_pipeline *ip,
+				      struct v4l2_subdev_format *format)
+{
+	struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+	struct v4l2_subdev *sd;
+	struct media_pad *external_facing;
+
+	if (!ip->external->entity) {
+		WARN_ON(1);
+		return -ENODEV;
+	}
+	sd = media_entity_to_v4l2_subdev(ip->external->entity);
+	external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+			   strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ?
+			   ip->external :
+			   media_entity_remote_pad(ip->external);
+	if (WARN_ON(!external_facing)) {
+		dev_warn(&av->isys->adev->dev,
+			 "no external facing pad --- driver bug?\n");
+		return -EINVAL;
+	}
+
+	format->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	format->pad = 0;
+	sd = media_entity_to_v4l2_subdev(external_facing->entity);
+
+	return v4l2_subdev_call(sd, pad, get_fmt, NULL, format);
+}
+
+static void short_packet_queue_destroy(struct ipu_isys_pipeline *ip)
+{
+	struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+	unsigned long attrs;
+	unsigned int i;
+
+	attrs = DMA_ATTR_NON_CONSISTENT;
+	if (!ip->short_packet_bufs)
+		return;
+	for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) {
+		if (ip->short_packet_bufs[i].buffer)
+			dma_free_attrs(&av->isys->adev->dev,
+				       ip->short_packet_buffer_size,
+				       ip->short_packet_bufs[i].buffer,
+				       ip->short_packet_bufs[i].dma_addr,
+				       attrs);
+	}
+	kfree(ip->short_packet_bufs);
+	ip->short_packet_bufs = NULL;
+}
+
+static int short_packet_queue_setup(struct ipu_isys_pipeline *ip)
+{
+	struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+	struct v4l2_subdev_format source_fmt = { 0 };
+	unsigned long attrs;
+	unsigned int i;
+	int rval;
+	size_t buf_size;
+
+	INIT_LIST_HEAD(&ip->pending_interlaced_bufs);
+	ip->cur_field = V4L2_FIELD_TOP;
+
+	if (ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+		ip->short_packet_trace_index = 0;
+		return 0;
+	}
+
+	rval = get_external_facing_format(ip, &source_fmt);
+	if (rval)
+		return rval;
+	buf_size = IPU_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt.format.height);
+	ip->short_packet_buffer_size = buf_size;
+	ip->num_short_packet_lines =
+	    IPU_ISYS_SHORT_PACKET_PKT_LINES(source_fmt.format.height);
+
+	/* Initialize short packet queue. */
+	INIT_LIST_HEAD(&ip->short_packet_incoming);
+	INIT_LIST_HEAD(&ip->short_packet_active);
+	attrs = DMA_ATTR_NON_CONSISTENT;
+
+	ip->short_packet_bufs =
+	    kzalloc(sizeof(struct ipu_isys_private_buffer) *
+		    IPU_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL);
+	if (!ip->short_packet_bufs)
+		return -ENOMEM;
+
+	for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) {
+		struct ipu_isys_private_buffer *buf = &ip->short_packet_bufs[i];
+
+		buf->index = (unsigned int)i;
+		buf->ip = ip;
+		buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER;
+		buf->bytesused = buf_size;
+		buf->buffer = dma_alloc_attrs(&av->isys->adev->dev, buf_size,
+					      &buf->dma_addr, GFP_KERNEL,
+					      attrs);
+		if (!buf->buffer) {
+			short_packet_queue_destroy(ip);
+			return -ENOMEM;
+		}
+		list_add(&buf->ib.head, &ip->short_packet_incoming);
+	}
+
+	return 0;
+}
+
+static void
+csi_short_packet_prepare_fw_cfg(struct ipu_isys_pipeline *ip,
+				struct ipu_fw_isys_stream_cfg_data_abi *cfg)
+{
+	int input_pin = cfg->nof_input_pins++;
+	int output_pin = cfg->nof_output_pins++;
+	struct ipu_fw_isys_input_pin_info_abi *input_info =
+	    &cfg->input_pins[input_pin];
+	struct ipu_fw_isys_output_pin_info_abi *output_info =
+	    &cfg->output_pins[output_pin];
+	struct ipu_isys *isys = ip->isys;
+
+	/*
+	 * Setting dt as IPU_ISYS_SHORT_PACKET_GENERAL_DT will cause
+	 * MIPI receiver to receive all MIPI short packets.
+	 */
+	input_info->dt = IPU_ISYS_SHORT_PACKET_GENERAL_DT;
+	input_info->input_res.width = IPU_ISYS_SHORT_PACKET_WIDTH;
+	input_info->input_res.height = ip->num_short_packet_lines;
+
+	ip->output_pins[output_pin].pin_ready =
+	    ipu_isys_queue_short_packet_ready;
+	ip->output_pins[output_pin].aq = NULL;
+	ip->short_packet_output_pin = output_pin;
+
+	output_info->input_pin_id = input_pin;
+	output_info->output_res.width = IPU_ISYS_SHORT_PACKET_WIDTH;
+	output_info->output_res.height = ip->num_short_packet_lines;
+	output_info->stride = IPU_ISYS_SHORT_PACKET_WIDTH *
+	    IPU_ISYS_SHORT_PACKET_UNITSIZE;
+	output_info->pt = IPU_ISYS_SHORT_PACKET_PT;
+	output_info->ft = IPU_ISYS_SHORT_PACKET_FT;
+	output_info->send_irq = 1;
+	memset(output_info->ts_offsets, 0, sizeof(output_info->ts_offsets));
+	output_info->s2m_pixel_soc_pixel_remapping =
+	    S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+	output_info->csi_be_soc_pixel_remapping =
+	    CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+	output_info->sensor_type = isys->sensor_info.sensor_metadata;
+	output_info->snoopable = true;
+	output_info->error_handling_enable = false;
+}
+
+void
+ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av,
+				struct ipu_fw_isys_stream_cfg_data_abi *cfg)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct ipu_isys_queue *aq = &av->aq;
+	struct ipu_fw_isys_output_pin_info_abi *pin_info;
+	struct ipu_isys *isys = av->isys;
+	unsigned int type_index, type;
+	int pin = cfg->nof_output_pins++;
+
+	aq->fw_output = pin;
+	ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready;
+	ip->output_pins[pin].aq = aq;
+
+	pin_info = &cfg->output_pins[pin];
+	pin_info->input_pin_id = 0;
+	pin_info->output_res.width = av->mpix.width;
+	pin_info->output_res.height = av->mpix.height;
+
+	if (!av->pfmt->bpp_planar)
+		pin_info->stride = av->mpix.plane_fmt[0].bytesperline;
+	else
+		pin_info->stride = ALIGN(DIV_ROUND_UP(av->mpix.width *
+						      av->pfmt->bpp_planar,
+						      BITS_PER_BYTE),
+					 av->isys->line_align);
+
+	pin_info->pt = aq->css_pin_type;
+	pin_info->ft = av->pfmt->css_pixelformat;
+	pin_info->send_irq = 1;
+	memset(pin_info->ts_offsets, 0, sizeof(pin_info->ts_offsets));
+	pin_info->s2m_pixel_soc_pixel_remapping =
+	    S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+	pin_info->csi_be_soc_pixel_remapping =
+	    CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+	cfg->vc = 0;
+
+	switch (pin_info->pt) {
+	/* non-snoopable sensor data to PSYS */
+	case IPU_FW_ISYS_PIN_TYPE_RAW_NS:
+		type_index = IPU_FW_ISYS_VC1_SENSOR_DATA;
+		pin_info->sensor_type = isys->sensor_types[type_index]++;
+		pin_info->snoopable = false;
+		pin_info->error_handling_enable = false;
+		type = isys->sensor_types[type_index];
+		if (type > isys->sensor_info.vc1_data_end)
+			isys->sensor_types[type_index] =
+				isys->sensor_info.vc1_data_start;
+
+		break;
+	/* snoopable META/Stats data to CPU */
+	case IPU_FW_ISYS_PIN_TYPE_METADATA_0:
+	case IPU_FW_ISYS_PIN_TYPE_METADATA_1:
+		pin_info->sensor_type = isys->sensor_info.sensor_metadata;
+		pin_info->snoopable = true;
+		pin_info->error_handling_enable = false;
+		break;
+	/* snoopable sensor data to CPU */
+	case IPU_FW_ISYS_PIN_TYPE_MIPI:
+	case IPU_FW_ISYS_PIN_TYPE_RAW_SOC:
+		type_index = IPU_FW_ISYS_VC0_SENSOR_DATA;
+		pin_info->sensor_type = isys->sensor_types[type_index]++;
+		pin_info->snoopable = true;
+		pin_info->error_handling_enable = false;
+		type = isys->sensor_types[type_index];
+		if (type > isys->sensor_info.vc0_data_end)
+			isys->sensor_types[type_index] =
+				isys->sensor_info.vc0_data_start;
+
+		break;
+	default:
+		dev_err(&av->isys->adev->dev,
+			"Unknown pin type, use metadata type as default\n");
+
+		pin_info->sensor_type = isys->sensor_info.sensor_metadata;
+		pin_info->snoopable = true;
+		pin_info->error_handling_enable = false;
+	}
+}
+
+static unsigned int ipu_isys_get_compression_scheme(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+	case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+		return 3;
+	default:
+		return 0;
+	}
+}
+
+static unsigned int get_comp_format(u32 code)
+{
+	unsigned int predictor = 0;	/* currently hard coded */
+	unsigned int udt = ipu_isys_mbus_code_to_mipi(code);
+	unsigned int scheme = ipu_isys_get_compression_scheme(code);
+
+	/* if data type is not user defined return here */
+	if (udt < IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1) ||
+	    udt > IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(8))
+		return 0;
+
+	/*
+	 * For each user defined type (1..8) there is configuration bitfield for
+	 * decompression.
+	 *
+	 * | bit 3     | bits 2:0 |
+	 * | predictor | scheme   |
+	 * compression schemes:
+	 * 000 = no compression
+	 * 001 = 10 - 6 - 10
+	 * 010 = 10 - 7 - 10
+	 * 011 = 10 - 8 - 10
+	 * 100 = 12 - 6 - 12
+	 * 101 = 12 - 7 - 12
+	 * 110 = 12 - 8 - 12
+	 */
+
+	return ((predictor << 3) | scheme) <<
+	    ((udt - IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4);
+}
+
+/* Create stream and start it using the CSS FW ABI. */
+static int start_stream_firmware(struct ipu_isys_video *av,
+				 struct ipu_isys_buffer_list *bl)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct device *dev = &av->isys->adev->dev;
+	struct v4l2_subdev_selection sel_fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP,
+		.pad = CSI2_BE_PAD_SOURCE,
+	};
+	struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg;
+	struct isys_fw_msgs *msg = NULL;
+	struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+	struct ipu_isys_queue *aq;
+	struct ipu_isys_video *isl_av = NULL;
+	struct v4l2_subdev_format source_fmt = { 0 };
+	struct v4l2_subdev *be_sd = NULL;
+	struct media_pad *source_pad = media_entity_remote_pad(&av->pad);
+	struct ipu_fw_isys_cropping_abi *crop;
+	enum ipu_fw_isys_send_type send_type;
+	int rval, rvalout, tout;
+
+	rval = get_external_facing_format(ip, &source_fmt);
+	if (rval)
+		return rval;
+
+	msg = ipu_get_fw_msg_buf(ip);
+	if (!msg)
+		return -ENOMEM;
+
+	stream_cfg = to_stream_cfg_msg_buf(msg);
+	stream_cfg->compfmt = get_comp_format(source_fmt.format.code);
+	stream_cfg->input_pins[0].input_res.width = source_fmt.format.width;
+	stream_cfg->input_pins[0].input_res.height = source_fmt.format.height;
+	stream_cfg->input_pins[0].dt =
+	    ipu_isys_mbus_code_to_mipi(source_fmt.format.code);
+	stream_cfg->input_pins[0].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE;
+	stream_cfg->input_pins[0].mipi_decompression =
+	    IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION;
+	stream_cfg->input_pins[0].capture_mode =
+		IPU_FW_ISYS_CAPTURE_MODE_REGULAR;
+	if (ip->csi2 && !v4l2_ctrl_g_ctrl(ip->csi2->store_csi2_header))
+		stream_cfg->input_pins[0].mipi_store_mode =
+		    IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER;
+	else if (ip->tpg && !v4l2_ctrl_g_ctrl(ip->tpg->store_csi2_header))
+		stream_cfg->input_pins[0].mipi_store_mode =
+		    IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER;
+
+	stream_cfg->src = ip->source;
+	stream_cfg->vc = 0;
+	stream_cfg->isl_use = ip->isl_mode;
+	stream_cfg->nof_input_pins = 1;
+	stream_cfg->sensor_type = IPU_FW_ISYS_SENSOR_MODE_NORMAL;
+
+	/*
+	 * Only CSI2-BE and SOC BE has the capability to do crop,
+	 * so get the crop info from csi2-be or csi2-be-soc.
+	 */
+	if (ip->csi2_be) {
+		be_sd = &ip->csi2_be->asd.sd;
+	} else if (ip->csi2_be_soc) {
+		be_sd = &ip->csi2_be_soc->asd.sd;
+		if (source_pad)
+			sel_fmt.pad = source_pad->index;
+	}
+	crop = &stream_cfg->crop;
+	if (be_sd &&
+	    !v4l2_subdev_call(be_sd, pad, get_selection, NULL, &sel_fmt)) {
+		crop->left_offset = sel_fmt.r.left;
+		crop->top_offset = sel_fmt.r.top;
+		crop->right_offset = sel_fmt.r.left + sel_fmt.r.width;
+		crop->bottom_offset = sel_fmt.r.top + sel_fmt.r.height;
+
+	} else {
+		crop->right_offset = source_fmt.format.width;
+		crop->bottom_offset = source_fmt.format.height;
+	}
+
+	/*
+	 * If the CSI-2 backend's video node is part of the pipeline
+	 * it must be arranged first in the output pin list. This is
+	 * the most probably a firmware requirement.
+	 */
+	if (ip->isl_mode == IPU_ISL_CSI2_BE)
+		isl_av = &ip->csi2_be->av;
+
+	if (isl_av) {
+		struct ipu_isys_queue *safe;
+
+		list_for_each_entry_safe(aq, safe, &ip->queues, node) {
+			struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+			if (av != isl_av)
+				continue;
+
+			list_del(&aq->node);
+			list_add(&aq->node, &ip->queues);
+			break;
+		}
+	}
+
+	list_for_each_entry(aq, &ip->queues, node) {
+		struct ipu_isys_video *__av = ipu_isys_queue_to_video(aq);
+
+		__av->prepare_fw_stream(__av, stream_cfg);
+	}
+
+	if (ip->interlaced && ip->isys->short_packet_source ==
+	    IPU_ISYS_SHORT_PACKET_FROM_RECEIVER)
+		csi_short_packet_prepare_fw_cfg(ip, stream_cfg);
+
+	ipu_fw_isys_dump_stream_cfg(dev, stream_cfg);
+
+	ip->nr_output_pins = stream_cfg->nof_output_pins;
+
+	rval = get_stream_handle(av);
+	if (rval) {
+		dev_dbg(dev, "Can't get stream_handle\n");
+		return rval;
+	}
+
+	reinit_completion(&ip->stream_open_completion);
+
+	ipu_fw_isys_set_params(stream_cfg);
+
+	rval = ipu_fw_isys_complex_cmd(av->isys,
+				       ip->stream_handle,
+				       stream_cfg,
+				       to_dma_addr(msg),
+				       sizeof(*stream_cfg),
+				       IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN);
+	ipu_put_fw_mgs_buf(av->isys, (uintptr_t)stream_cfg);
+
+	if (rval < 0) {
+		dev_err(dev, "can't open stream (%d)\n", rval);
+		goto out_put_stream_handle;
+	}
+
+	get_stream_opened(av);
+
+	tout = wait_for_completion_timeout(&ip->stream_open_completion,
+					   IPU_LIB_CALL_TIMEOUT_JIFFIES);
+	if (!tout) {
+		dev_err(dev, "stream open time out\n");
+		rval = -ETIMEDOUT;
+		goto out_put_stream_opened;
+	}
+	if (ip->error) {
+		dev_err(dev, "stream open error: %d\n", ip->error);
+		rval = -EIO;
+		goto out_put_stream_opened;
+	}
+	dev_dbg(dev, "start stream: open complete\n");
+
+	if (bl) {
+		msg = ipu_get_fw_msg_buf(ip);
+		if (!msg) {
+			rval = -ENOMEM;
+			goto out_put_stream_opened;
+		}
+		buf = to_frame_msg_buf(msg);
+	}
+
+	if (bl) {
+		ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl);
+		ipu_isys_buffer_list_queue(bl,
+					   IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+	}
+
+	reinit_completion(&ip->stream_start_completion);
+
+	if (bl) {
+		send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE;
+		ipu_fw_isys_dump_frame_buff_set(dev, buf,
+						stream_cfg->nof_output_pins);
+		rval = ipu_fw_isys_complex_cmd(av->isys,
+					       ip->stream_handle,
+					       buf, to_dma_addr(msg),
+					       sizeof(*buf),
+					       send_type);
+		ipu_put_fw_mgs_buf(av->isys, (uintptr_t)buf);
+	} else {
+		send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START;
+		rval = ipu_fw_isys_simple_cmd(av->isys,
+					      ip->stream_handle,
+					      send_type);
+	}
+
+	if (rval < 0) {
+		dev_err(dev, "can't start streaming (%d)\n", rval);
+		goto out_stream_close;
+	}
+
+	tout = wait_for_completion_timeout(&ip->stream_start_completion,
+					   IPU_LIB_CALL_TIMEOUT_JIFFIES);
+	if (!tout) {
+		dev_err(dev, "stream start time out\n");
+		rval = -ETIMEDOUT;
+		goto out_stream_close;
+	}
+	if (ip->error) {
+		dev_err(dev, "stream start error: %d\n", ip->error);
+		rval = -EIO;
+		goto out_stream_close;
+	}
+	dev_dbg(dev, "start stream: complete\n");
+
+	return 0;
+
+out_stream_close:
+	reinit_completion(&ip->stream_close_completion);
+
+	rvalout = ipu_fw_isys_simple_cmd(av->isys,
+					 ip->stream_handle,
+					 IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+	if (rvalout < 0) {
+		dev_dbg(dev, "can't close stream (%d)\n", rvalout);
+		goto out_put_stream_opened;
+	}
+
+	tout = wait_for_completion_timeout(&ip->stream_close_completion,
+					   IPU_LIB_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_err(dev, "stream close time out\n");
+	else if (ip->error)
+		dev_err(dev, "stream close error: %d\n", ip->error);
+	else
+		dev_dbg(dev, "stream close complete\n");
+
+out_put_stream_opened:
+	put_stream_opened(av);
+
+out_put_stream_handle:
+	put_stream_handle(av);
+	return rval;
+}
+
+static void stop_streaming_firmware(struct ipu_isys_video *av)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct device *dev = &av->isys->adev->dev;
+	int rval, tout;
+	enum ipu_fw_isys_send_type send_type =
+		IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH;
+
+	reinit_completion(&ip->stream_stop_completion);
+
+	rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle,
+				      send_type);
+
+	if (rval < 0) {
+		dev_err(dev, "can't stop stream (%d)\n", rval);
+		return;
+	}
+
+	tout = wait_for_completion_timeout(&ip->stream_stop_completion,
+					   IPU_LIB_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_err(dev, "stream stop time out\n");
+	else if (ip->error)
+		dev_err(dev, "stream stop error: %d\n", ip->error);
+	else
+		dev_dbg(dev, "stop stream: complete\n");
+}
+
+static void close_streaming_firmware(struct ipu_isys_video *av)
+{
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct device *dev = &av->isys->adev->dev;
+	int rval, tout;
+
+	reinit_completion(&ip->stream_close_completion);
+
+	rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle,
+				      IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+	if (rval < 0) {
+		dev_err(dev, "can't close stream (%d)\n", rval);
+		return;
+	}
+
+	tout = wait_for_completion_timeout(&ip->stream_close_completion,
+					   IPU_LIB_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_err(dev, "stream close time out\n");
+	else if (ip->error)
+		dev_err(dev, "stream close error: %d\n", ip->error);
+	else
+		dev_dbg(dev, "close stream: complete\n");
+
+	put_stream_opened(av);
+	put_stream_handle(av);
+}
+
+void
+ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip,
+				void (*capture_done)
+				 (struct ipu_isys_pipeline *ip,
+				  struct ipu_fw_isys_resp_info_abi *resp))
+{
+	unsigned int i;
+
+	/* Different instances may register same function. Add only once */
+	for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+		if (ip->capture_done[i] == capture_done)
+			return;
+
+	for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) {
+		if (!ip->capture_done[i]) {
+			ip->capture_done[i] = capture_done;
+			return;
+		}
+	}
+	/*
+	 * Too many call backs registered. Change to IPU_NUM_CAPTURE_DONE
+	 * constant probably required.
+	 */
+	WARN_ON(1);
+}
+
+int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av,
+				     unsigned int state)
+{
+	struct ipu_isys *isys = av->isys;
+	struct device *dev = &isys->adev->dev;
+	struct ipu_isys_pipeline *ip;
+	struct media_graph graph;
+	struct media_entity *entity;
+	struct media_device *mdev = &av->isys->media_dev;
+	int rval;
+	unsigned int i;
+
+	dev_dbg(dev, "prepare stream: %d\n", state);
+
+	if (!state) {
+		ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+
+		if (ip->interlaced && isys->short_packet_source ==
+		    IPU_ISYS_SHORT_PACKET_FROM_RECEIVER)
+			short_packet_queue_destroy(ip);
+		media_pipeline_stop(&av->vdev.entity);
+		media_entity_enum_cleanup(&ip->entity_enum);
+		return 0;
+	}
+
+	ip = &av->ip;
+
+	WARN_ON(ip->nr_streaming);
+	ip->has_sof = false;
+	ip->nr_queues = 0;
+	ip->external = NULL;
+	atomic_set(&ip->sequence, 0);
+	ip->isl_mode = IPU_ISL_OFF;
+
+	for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+		ip->capture_done[i] = NULL;
+	ip->csi2_be = NULL;
+	ip->csi2_be_soc = NULL;
+	ip->csi2 = NULL;
+	ip->tpg = NULL;
+	ip->seq_index = 0;
+	memset(ip->seq, 0, sizeof(ip->seq));
+
+	WARN_ON(!list_empty(&ip->queues));
+	ip->interlaced = false;
+
+	rval = media_entity_enum_init(&ip->entity_enum, mdev);
+	if (rval)
+		return rval;
+
+	rval = media_pipeline_start(&av->vdev.entity, &ip->pipe);
+	if (rval < 0) {
+		dev_dbg(dev, "pipeline start failed\n");
+		goto out_enum_cleanup;
+	}
+
+	if (!ip->external) {
+		dev_err(dev, "no external entity set! Driver bug?\n");
+		rval = -EINVAL;
+		goto out_pipeline_stop;
+	}
+
+	rval = media_graph_walk_init(&graph, mdev);
+	if (rval)
+		goto out_pipeline_stop;
+
+	/* Gather all entities in the graph. */
+	mutex_lock(&mdev->graph_mutex);
+	media_graph_walk_start(&graph, &av->vdev.entity);
+	while ((entity = media_graph_walk_next(&graph)))
+		media_entity_enum_set(&ip->entity_enum, entity);
+
+	mutex_unlock(&mdev->graph_mutex);
+
+	media_graph_walk_cleanup(&graph);
+
+	if (ip->interlaced) {
+		rval = short_packet_queue_setup(ip);
+		if (rval) {
+			dev_err(&isys->adev->dev,
+				"Failed to setup short packet buffer.\n");
+			goto out_pipeline_stop;
+		}
+	}
+
+	dev_dbg(dev, "prepare stream: external entity %s\n",
+		ip->external->entity->name);
+
+	return 0;
+
+out_pipeline_stop:
+	media_pipeline_stop(&av->vdev.entity);
+
+out_enum_cleanup:
+	media_entity_enum_cleanup(&ip->entity_enum);
+
+	return rval;
+}
+
+static void configure_stream_watermark(struct ipu_isys_video *av)
+{
+	u32 vblank, hblank;
+	u64 pixel_rate;
+	int ret = 0;
+	struct v4l2_subdev *esd;
+	struct v4l2_ctrl *ctrl;
+	struct ipu_isys_pipeline *ip;
+	struct isys_iwake_watermark *iwake_watermark;
+	struct v4l2_control vb = { .id = V4L2_CID_VBLANK, .value = 0 };
+	struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 };
+
+	ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	if (!ip->external->entity) {
+		WARN_ON(1);
+		return;
+	}
+	esd = media_entity_to_v4l2_subdev(ip->external->entity);
+
+	av->watermark->width = av->mpix.width;
+	av->watermark->height = av->mpix.height;
+
+	ret = v4l2_g_ctrl(esd->ctrl_handler, &vb);
+	if (!ret && vb.value >= 0)
+		vblank = vb.value;
+	else
+		vblank = 0;
+
+	ret = v4l2_g_ctrl(esd->ctrl_handler, &hb);
+	if (!ret && hb.value >= 0)
+		hblank = hb.value;
+	else
+		hblank = 0;
+
+	ctrl = v4l2_ctrl_find(esd->ctrl_handler, V4L2_CID_PIXEL_RATE);
+
+	if (!ctrl)
+		pixel_rate = DEFAULT_PIXEL_RATE;
+	else
+		pixel_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+	av->watermark->vblank = vblank;
+	av->watermark->hblank = hblank;
+	av->watermark->pixel_rate = pixel_rate;
+	if (!pixel_rate) {
+		iwake_watermark = av->isys->iwake_watermark;
+		mutex_lock(&iwake_watermark->mutex);
+		iwake_watermark->force_iwake_disable = true;
+		mutex_unlock(&iwake_watermark->mutex);
+		WARN(1, "%s Invalid pixel_rate, disable iwake.\n", __func__);
+		return;
+	}
+}
+
+static void calculate_stream_datarate(struct video_stream_watermark *watermark)
+{
+	u64 pixels_per_line, bytes_per_line, line_time_ns;
+	u64 pages_per_line, pb_bytes_per_line, stream_data_rate;
+	u16 sram_granulrity_shift = (ipu_ver == IPU_VER_6) ?
+		IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT;
+	u16 sram_granulrity_size = (ipu_ver == IPU_VER_6) ?
+		IPU6_SRAM_GRANULRITY_SIZE : IPU6SE_SRAM_GRANULRITY_SIZE;
+
+	pixels_per_line = watermark->width + watermark->hblank;
+	line_time_ns =
+		pixels_per_line * 1000 / (watermark->pixel_rate / 1000000);
+	/* 2 bytes per Bayer pixel */
+	bytes_per_line = watermark->width << 1;
+	/* bytes to IS pixel buffer pages */
+	pages_per_line = bytes_per_line >> sram_granulrity_shift;
+
+	/* pages for each line */
+	pages_per_line = DIV_ROUND_UP(bytes_per_line,
+				      sram_granulrity_size);
+	pb_bytes_per_line = pages_per_line << sram_granulrity_shift;
+
+	/* data rate MB/s */
+	stream_data_rate = (pb_bytes_per_line * 1000) / line_time_ns;
+	watermark->stream_data_rate = stream_data_rate;
+}
+
+static void update_stream_watermark(struct ipu_isys_video *av, bool state)
+{
+	struct isys_iwake_watermark *iwake_watermark;
+
+	iwake_watermark = av->isys->iwake_watermark;
+	if (state) {
+		calculate_stream_datarate(av->watermark);
+		mutex_lock(&iwake_watermark->mutex);
+		list_add(&av->watermark->stream_node,
+			 &iwake_watermark->video_list);
+		mutex_unlock(&iwake_watermark->mutex);
+	} else {
+		av->watermark->stream_data_rate = 0;
+		mutex_lock(&iwake_watermark->mutex);
+		list_del(&av->watermark->stream_node);
+		mutex_unlock(&iwake_watermark->mutex);
+	}
+	update_watermark_setting(av->isys);
+}
+
+int ipu_isys_video_set_streaming(struct ipu_isys_video *av,
+				 unsigned int state,
+				 struct ipu_isys_buffer_list *bl)
+{
+	struct device *dev = &av->isys->adev->dev;
+	struct media_device *mdev = av->vdev.entity.graph_obj.mdev;
+	struct media_entity_enum entities;
+
+	struct media_entity *entity, *entity2;
+	struct ipu_isys_pipeline *ip =
+	    to_ipu_isys_pipeline(av->vdev.entity.pipe);
+	struct v4l2_subdev *sd, *esd;
+	int rval = 0;
+
+	dev_dbg(dev, "set stream: %d\n", state);
+
+	if (!ip->external->entity) {
+		WARN_ON(1);
+		return -ENODEV;
+	}
+	esd = media_entity_to_v4l2_subdev(ip->external->entity);
+
+	if (state) {
+		rval = media_graph_walk_init(&ip->graph, mdev);
+		if (rval)
+			return rval;
+		rval = media_entity_enum_init(&entities, mdev);
+		if (rval)
+			goto out_media_entity_graph_init;
+	}
+
+	if (!state) {
+		stop_streaming_firmware(av);
+
+		/* stop external sub-device now. */
+		dev_info(dev, "stream off %s\n", ip->external->entity->name);
+
+		v4l2_subdev_call(esd, video, s_stream, state);
+	}
+
+	mutex_lock(&mdev->graph_mutex);
+
+	media_graph_walk_start(&ip->graph,
+			       &av->vdev.entity);
+
+	while ((entity = media_graph_walk_next(&ip->graph))) {
+		sd = media_entity_to_v4l2_subdev(entity);
+
+		/* Non-subdev nodes can be safely ignored here. */
+		if (!is_media_entity_v4l2_subdev(entity))
+			continue;
+
+		/* Don't start truly external devices quite yet. */
+		if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+			    strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 ||
+		    ip->external->entity == entity)
+			continue;
+
+		dev_dbg(dev, "s_stream %s entity %s\n", state ? "on" : "off",
+			entity->name);
+		rval = v4l2_subdev_call(sd, video, s_stream, state);
+		if (!state)
+			continue;
+		if (rval && rval != -ENOIOCTLCMD) {
+			mutex_unlock(&mdev->graph_mutex);
+			goto out_media_entity_stop_streaming;
+		}
+
+		media_entity_enum_set(&entities, entity);
+	}
+
+	mutex_unlock(&mdev->graph_mutex);
+
+	if (av->aq.css_pin_type == IPU_FW_ISYS_PIN_TYPE_RAW_SOC) {
+		if (state)
+			configure_stream_watermark(av);
+		update_stream_watermark(av, state);
+	}
+
+	/* Oh crap */
+	if (state) {
+		rval = start_stream_firmware(av, bl);
+		if (rval)
+			goto out_media_entity_stop_streaming;
+
+		dev_dbg(dev, "set stream: source %d, stream_handle %d\n",
+			ip->source, ip->stream_handle);
+
+		/* Start external sub-device now. */
+		dev_info(dev, "stream on %s\n", ip->external->entity->name);
+
+		rval = v4l2_subdev_call(esd, video, s_stream, state);
+		if (rval)
+			goto out_media_entity_stop_streaming_firmware;
+	} else {
+		close_streaming_firmware(av);
+	}
+
+	if (state)
+		media_entity_enum_cleanup(&entities);
+	else
+		media_graph_walk_cleanup(&ip->graph);
+	av->streaming = state;
+
+	return 0;
+
+out_media_entity_stop_streaming_firmware:
+	stop_streaming_firmware(av);
+
+out_media_entity_stop_streaming:
+	mutex_lock(&mdev->graph_mutex);
+
+	media_graph_walk_start(&ip->graph,
+			       &av->vdev.entity);
+
+	while (state && (entity2 = media_graph_walk_next(&ip->graph)) &&
+	       entity2 != entity) {
+		sd = media_entity_to_v4l2_subdev(entity2);
+
+		if (!media_entity_enum_test(&entities, entity2))
+			continue;
+
+		v4l2_subdev_call(sd, video, s_stream, 0);
+	}
+
+	mutex_unlock(&mdev->graph_mutex);
+
+	media_entity_enum_cleanup(&entities);
+
+out_media_entity_graph_init:
+	media_graph_walk_cleanup(&ip->graph);
+
+	return rval;
+}
+
+#ifdef CONFIG_COMPAT
+static long ipu_isys_compat_ioctl(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	long ret = -ENOIOCTLCMD;
+	void __user *up = compat_ptr(arg);
+
+	/*
+	 * at present, there is not any private IOCTL need to compat handle
+	 */
+	if (file->f_op->unlocked_ioctl)
+		ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)up);
+
+	return ret;
+}
+#endif
+
+static const struct v4l2_ioctl_ops ioctl_ops_mplane = {
+	.vidioc_querycap = ipu_isys_vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt,
+	.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+	.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+	.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_default = ipu_isys_vidioc_private,
+	.vidioc_enum_input = vidioc_enum_input,
+	.vidioc_g_input = vidioc_g_input,
+	.vidioc_s_input = vidioc_s_input,
+};
+
+static const struct media_entity_operations entity_ops = {
+	.link_validate = link_validate,
+};
+
+static const struct v4l2_file_operations isys_fops = {
+	.owner = THIS_MODULE,
+	.poll = vb2_fop_poll,
+	.unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = ipu_isys_compat_ioctl,
+#endif
+	.mmap = vb2_fop_mmap,
+	.open = video_open,
+	.release = video_release,
+};
+
+/*
+ * Do everything that's needed to initialise things related to video
+ * buffer queue, video node, and the related media entity. The caller
+ * is expected to assign isys field and set the name of the video
+ * device.
+ */
+int ipu_isys_video_init(struct ipu_isys_video *av,
+			struct media_entity *entity,
+			unsigned int pad, unsigned long pad_flags,
+			unsigned int flags)
+{
+	const struct v4l2_ioctl_ops *ioctl_ops = NULL;
+	int rval;
+
+	mutex_init(&av->mutex);
+	init_completion(&av->ip.stream_open_completion);
+	init_completion(&av->ip.stream_close_completion);
+	init_completion(&av->ip.stream_start_completion);
+	init_completion(&av->ip.stream_stop_completion);
+	INIT_LIST_HEAD(&av->ip.queues);
+	spin_lock_init(&av->ip.short_packet_queue_lock);
+	av->ip.isys = av->isys;
+
+	if (!av->watermark) {
+		av->watermark = kzalloc(sizeof(*av->watermark), GFP_KERNEL);
+		if (!av->watermark) {
+			rval = -ENOMEM;
+			goto out_mutex_destroy;
+		}
+	}
+
+	av->vdev.device_caps = V4L2_CAP_STREAMING;
+	if (pad_flags & MEDIA_PAD_FL_SINK) {
+		av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+		ioctl_ops = &ioctl_ops_mplane;
+		av->vdev.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+		av->vdev.vfl_dir = VFL_DIR_RX;
+	} else {
+		av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+		av->vdev.vfl_dir = VFL_DIR_TX;
+		av->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+	}
+	rval = ipu_isys_queue_init(&av->aq);
+	if (rval)
+		goto out_mutex_destroy;
+
+	av->pad.flags = pad_flags | MEDIA_PAD_FL_MUST_CONNECT;
+	rval = media_entity_pads_init(&av->vdev.entity, 1, &av->pad);
+	if (rval)
+		goto out_ipu_isys_queue_cleanup;
+
+	av->vdev.entity.ops = &entity_ops;
+	av->vdev.release = video_device_release_empty;
+	av->vdev.fops = &isys_fops;
+	av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+	if (!av->vdev.ioctl_ops)
+		av->vdev.ioctl_ops = ioctl_ops;
+	av->vdev.queue = &av->aq.vbq;
+	av->vdev.lock = &av->mutex;
+	set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
+	video_set_drvdata(&av->vdev, av);
+
+	mutex_lock(&av->mutex);
+
+	rval = video_register_device(&av->vdev, VFL_TYPE_GRABBER, -1);
+	if (rval)
+		goto out_media_entity_cleanup;
+
+	if (pad_flags & MEDIA_PAD_FL_SINK)
+		rval = media_create_pad_link(entity, pad,
+					     &av->vdev.entity, 0, flags);
+	else
+		rval = media_create_pad_link(&av->vdev.entity, 0, entity,
+					     pad, flags);
+	if (rval) {
+		dev_info(&av->isys->adev->dev, "can't create link\n");
+		goto out_media_entity_cleanup;
+	}
+
+	av->pfmt = av->try_fmt_vid_mplane(av, &av->mpix);
+
+	mutex_unlock(&av->mutex);
+
+	return rval;
+
+out_media_entity_cleanup:
+	video_unregister_device(&av->vdev);
+	mutex_unlock(&av->mutex);
+	media_entity_cleanup(&av->vdev.entity);
+
+out_ipu_isys_queue_cleanup:
+	ipu_isys_queue_cleanup(&av->aq);
+
+out_mutex_destroy:
+	kfree(av->watermark);
+	mutex_destroy(&av->mutex);
+
+	return rval;
+}
+
+void ipu_isys_video_cleanup(struct ipu_isys_video *av)
+{
+	kfree(av->watermark);
+	video_unregister_device(&av->vdev);
+	media_entity_cleanup(&av->vdev.entity);
+	mutex_destroy(&av->mutex);
+	ipu_isys_queue_cleanup(&av->aq);
+}
diff --git a/drivers/media/pci/intel/ipu-isys-video.h b/drivers/media/pci/intel/ipu-isys-video.h
new file mode 100644
index 000000000000..ebfc30a5fae9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys-video.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_VIDEO_H
+#define IPU_ISYS_VIDEO_H
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu-isys-queue.h"
+
+#define IPU_ISYS_OUTPUT_PINS 11
+#define IPU_NUM_CAPTURE_DONE 2
+#define IPU_ISYS_MAX_PARALLEL_SOF 2
+
+struct ipu_isys;
+struct ipu_isys_csi2_be_soc;
+struct ipu_fw_isys_stream_cfg_data_abi;
+
+struct ipu_isys_pixelformat {
+	u32 pixelformat;
+	u32 bpp;
+	u32 bpp_packed;
+	u32 bpp_planar;
+	u32 code;
+	u32 css_pixelformat;
+};
+
+struct sequence_info {
+	unsigned int sequence;
+	u64 timestamp;
+};
+
+struct output_pin_data {
+	void (*pin_ready)(struct ipu_isys_pipeline *ip,
+			  struct ipu_fw_isys_resp_info_abi *info);
+	struct ipu_isys_queue *aq;
+};
+
+struct ipu_isys_pipeline {
+	struct media_pipeline pipe;
+	struct media_pad *external;
+	atomic_t sequence;
+	unsigned int seq_index;
+	struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF];
+	int source;	/* SSI stream source */
+	int stream_handle;	/* stream handle for CSS API */
+	unsigned int nr_output_pins;	/* How many firmware pins? */
+	enum ipu_isl_mode isl_mode;
+	struct ipu_isys_csi2_be *csi2_be;
+	struct ipu_isys_csi2_be_soc *csi2_be_soc;
+	struct ipu_isys_csi2 *csi2;
+	struct ipu_isys_tpg *tpg;
+	/*
+	 * Number of capture queues, write access serialised using struct
+	 * ipu_isys.stream_mutex
+	 */
+	int nr_queues;
+	int nr_streaming;	/* Number of capture queues streaming */
+	int streaming;	/* Has streaming been really started? */
+	struct list_head queues;
+	struct completion stream_open_completion;
+	struct completion stream_close_completion;
+	struct completion stream_start_completion;
+	struct completion stream_stop_completion;
+	struct ipu_isys *isys;
+
+	void (*capture_done[IPU_NUM_CAPTURE_DONE])
+	 (struct ipu_isys_pipeline *ip,
+	  struct ipu_fw_isys_resp_info_abi *resp);
+	struct output_pin_data output_pins[IPU_ISYS_OUTPUT_PINS];
+	bool has_sof;
+	bool interlaced;
+	int error;
+	struct ipu_isys_private_buffer *short_packet_bufs;
+	size_t short_packet_buffer_size;
+	unsigned int num_short_packet_lines;
+	unsigned int short_packet_output_pin;
+	unsigned int cur_field;
+	struct list_head short_packet_incoming;
+	struct list_head short_packet_active;
+	/* Serialize access to short packet active and incoming lists */
+	spinlock_t short_packet_queue_lock;
+	struct list_head pending_interlaced_bufs;
+	unsigned int short_packet_trace_index;
+	struct media_graph graph;
+	struct media_entity_enum entity_enum;
+};
+
+#define to_ipu_isys_pipeline(__pipe)				\
+	container_of((__pipe), struct ipu_isys_pipeline, pipe)
+
+struct video_stream_watermark {
+	u32 width;
+	u32 height;
+	u32 vblank;
+	u32 hblank;
+	u32 frame_rate;
+	u64 pixel_rate;
+	u64 stream_data_rate;
+	struct list_head stream_node;
+};
+
+struct ipu_isys_video {
+	/* Serialise access to other fields in the struct. */
+	struct mutex mutex;
+	struct media_pad pad;
+	struct video_device vdev;
+	struct v4l2_pix_format_mplane mpix;
+	const struct ipu_isys_pixelformat *pfmts;
+	const struct ipu_isys_pixelformat *pfmt;
+	struct ipu_isys_queue aq;
+	struct ipu_isys *isys;
+	struct ipu_isys_pipeline ip;
+	unsigned int streaming;
+	bool packed;
+	unsigned int line_header_length;	/* bits */
+	unsigned int line_footer_length;	/* bits */
+
+	struct video_stream_watermark *watermark;
+
+	const struct ipu_isys_pixelformat *
+		(*try_fmt_vid_mplane)(struct ipu_isys_video *av,
+				      struct v4l2_pix_format_mplane *mpix);
+	void (*prepare_fw_stream)(struct ipu_isys_video *av,
+				  struct ipu_fw_isys_stream_cfg_data_abi *cfg);
+};
+
+#define ipu_isys_queue_to_video(__aq) \
+	container_of(__aq, struct ipu_isys_video, aq)
+
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts[];
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[];
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[];
+
+const struct ipu_isys_pixelformat *
+ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat);
+
+int ipu_isys_vidioc_querycap(struct file *file, void *fh,
+			     struct v4l2_capability *cap);
+
+int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh,
+			     struct v4l2_fmtdesc *f);
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av,
+					  struct v4l2_pix_format_mplane *mpix);
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av,
+				  struct v4l2_pix_format_mplane *mpix,
+				  int store_csi2_header);
+
+void
+ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av,
+				struct ipu_fw_isys_stream_cfg_data_abi *cfg);
+int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av,
+				     unsigned int state);
+int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state,
+				 struct ipu_isys_buffer_list *bl);
+int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *source,
+			unsigned int source_pad, unsigned long pad_flags,
+			unsigned int flags);
+void ipu_isys_video_cleanup(struct ipu_isys_video *av);
+void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip,
+				     void (*capture_done)
+				      (struct ipu_isys_pipeline *ip,
+				       struct ipu_fw_isys_resp_info_abi *resp));
+
+#endif /* IPU_ISYS_VIDEO_H */
diff --git a/drivers/media/pci/intel/ipu-isys.c b/drivers/media/pci/intel/ipu-isys.c
new file mode 100644
index 000000000000..c1657ed95599
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys.c
@@ -0,0 +1,1435 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+
+#include <media/ipu-isys.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-async.h>
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-cpd.h"
+#include "ipu-mmu.h"
+#include "ipu-dma.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-tpg.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-regs.h"
+#include "ipu-buttress.h"
+#include "ipu-platform.h"
+#include "ipu-platform-buttress-regs.h"
+
+#define ISYS_PM_QOS_VALUE	300
+
+#define IPU_BUTTRESS_FABIC_CONTROL	    0x68
+#define GDA_ENABLE_IWAKE_INDEX		    2
+#define GDA_IWAKE_THRESHOLD_INDEX           1
+#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX    0
+
+/* LTR & DID value are 10 bit at most */
+#define LTR_DID_VAL_MAX		1023
+#define LTR_DEFAULT_VALUE	0x64503C19
+#define FILL_TIME_DEFAULT_VALUE 0xFFF0783C
+#define LTR_DID_PKGC_2R		20
+#define LTR_DID_PKGC_8		100
+#define LTR_SCALE_DEFAULT	5
+#define LTR_SCALE_1024NS	2
+#define REG_PKGC_PMON_CFG	0xB00
+
+#define VAL_PKGC_PMON_CFG_RESET 0x38
+#define VAL_PKGC_PMON_CFG_START 0x7
+
+#define IS_PIXEL_BUFFER_PAGES		0x80
+/* BIOS provides the driver the LTR and threshold information in IPU,
+ * IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6.
+ */
+#define IPU6_MAX_SRAM_SIZE			(200 << 10)
+/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE.
+ */
+#define IPU6SE_MAX_SRAM_SIZE			(96 << 10)
+/* When iwake mode is disabled the critical threshold is statically set to 75%
+ * of the IS pixel buffer criticalThreshold = (128 * 3) / 4
+ */
+#define CRITICAL_THRESHOLD_IWAKE_DISABLE	(IS_PIXEL_BUFFER_PAGES * 3 / 4)
+
+union fabric_ctrl {
+	struct {
+		u16 ltr_val   : 10;
+		u16 ltr_scale : 3;
+		u16 RSVD1     : 3;
+		u16 did_val   : 10;
+		u16 did_scale : 3;
+		u16 RSVD2     : 1;
+		u16 keep_power_in_D0   : 1;
+		u16 keep_power_override : 1;
+	} bits;
+	u32 value;
+};
+
+enum ltr_did_type {
+	LTR_IWAKE_ON,
+	LTR_IWAKE_OFF,
+	LTR_ISYS_ON,
+	LTR_ISYS_OFF,
+	LTR_TYPE_MAX
+};
+
+static int
+isys_complete_ext_device_registration(struct ipu_isys *isys,
+				      struct v4l2_subdev *sd,
+				      struct ipu_isys_csi2_config *csi2)
+{
+	unsigned int i;
+	int rval;
+
+	v4l2_set_subdev_hostdata(sd, csi2);
+
+	for (i = 0; i < sd->entity.num_pads; i++) {
+		if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+			break;
+	}
+
+	if (i == sd->entity.num_pads) {
+		dev_warn(&isys->adev->dev,
+			 "no source pad in external entity\n");
+		rval = -ENOENT;
+		goto skip_unregister_subdev;
+	}
+
+	rval = media_create_pad_link(&sd->entity, i,
+				     &isys->csi2[csi2->port].asd.sd.entity,
+				     0, 0);
+	if (rval) {
+		dev_warn(&isys->adev->dev, "can't create link\n");
+		goto skip_unregister_subdev;
+	}
+
+	isys->csi2[csi2->port].nlanes = csi2->nlanes;
+	return 0;
+
+skip_unregister_subdev:
+	v4l2_device_unregister_subdev(sd);
+	return rval;
+}
+
+static void isys_unregister_subdevices(struct ipu_isys *isys)
+{
+	const struct ipu_isys_internal_tpg_pdata *tpg =
+	    &isys->pdata->ipdata->tpg;
+	const struct ipu_isys_internal_csi2_pdata *csi2 =
+	    &isys->pdata->ipdata->csi2;
+	unsigned int i;
+
+	ipu_isys_csi2_be_cleanup(&isys->csi2_be);
+	for (i = 0; i < NR_OF_CSI2_BE_SOC_DEV; i++)
+		ipu_isys_csi2_be_soc_cleanup(&isys->csi2_be_soc[i]);
+
+	for (i = 0; i < tpg->ntpgs; i++)
+		ipu_isys_tpg_cleanup(&isys->tpg[i]);
+
+	for (i = 0; i < csi2->nports; i++)
+		ipu_isys_csi2_cleanup(&isys->csi2[i]);
+}
+
+static int isys_register_subdevices(struct ipu_isys *isys)
+{
+	const struct ipu_isys_internal_tpg_pdata *tpg =
+	    &isys->pdata->ipdata->tpg;
+	const struct ipu_isys_internal_csi2_pdata *csi2 =
+	    &isys->pdata->ipdata->csi2;
+	struct ipu_isys_csi2_be_soc *csi2_be_soc;
+	unsigned int i, k;
+	int rval;
+
+	isys->csi2 = devm_kcalloc(&isys->adev->dev, csi2->nports,
+				  sizeof(*isys->csi2), GFP_KERNEL);
+	if (!isys->csi2) {
+		rval = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < csi2->nports; i++) {
+		rval = ipu_isys_csi2_init(&isys->csi2[i], isys,
+					  isys->pdata->base +
+					  csi2->offsets[i], i);
+		if (rval)
+			goto fail;
+
+		isys->isr_csi2_bits |= IPU_ISYS_UNISPART_IRQ_CSI2(i);
+	}
+
+	isys->tpg = devm_kcalloc(&isys->adev->dev, tpg->ntpgs,
+				 sizeof(*isys->tpg), GFP_KERNEL);
+	if (!isys->tpg) {
+		rval = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < tpg->ntpgs; i++) {
+		rval = ipu_isys_tpg_init(&isys->tpg[i], isys,
+					 isys->pdata->base +
+					 tpg->offsets[i],
+					 tpg->sels ? (isys->pdata->base +
+						      tpg->sels[i]) : NULL, i);
+		if (rval)
+			goto fail;
+	}
+
+	for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) {
+		rval = ipu_isys_csi2_be_soc_init(&isys->csi2_be_soc[k],
+						 isys, k);
+		if (rval) {
+			dev_info(&isys->adev->dev,
+				 "can't register csi2 soc be device %d\n", k);
+			goto fail;
+		}
+	}
+
+	rval = ipu_isys_csi2_be_init(&isys->csi2_be, isys);
+	if (rval) {
+		dev_info(&isys->adev->dev,
+			 "can't register raw csi2 be device\n");
+		goto fail;
+	}
+
+	for (i = 0; i < csi2->nports; i++) {
+		rval = media_create_pad_link(&isys->csi2[i].asd.sd.entity,
+					     CSI2_PAD_SOURCE,
+					     &isys->csi2_be.asd.sd.entity,
+					     CSI2_BE_PAD_SINK, 0);
+		if (rval) {
+			dev_info(&isys->adev->dev,
+				 "can't create link csi2 <=> csi2_be\n");
+			goto fail;
+		}
+		for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) {
+			csi2_be_soc = &isys->csi2_be_soc[k];
+			rval =
+			    media_create_pad_link(&isys->csi2[i].asd.sd.entity,
+						  CSI2_PAD_SOURCE,
+						  &csi2_be_soc->asd.sd.entity,
+						  CSI2_BE_SOC_PAD_SINK, 0);
+			if (rval) {
+				dev_info(&isys->adev->dev,
+					 "can't create link csi2->be_soc\n");
+				goto fail;
+			}
+		}
+	}
+
+	for (i = 0; i < tpg->ntpgs; i++) {
+		rval = media_create_pad_link(&isys->tpg[i].asd.sd.entity,
+					     TPG_PAD_SOURCE,
+					     &isys->csi2_be.asd.sd.entity,
+					     CSI2_BE_PAD_SINK, 0);
+		if (rval) {
+			dev_info(&isys->adev->dev,
+				 "can't create link between tpg and csi2_be\n");
+			goto fail;
+		}
+
+		for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) {
+			csi2_be_soc = &isys->csi2_be_soc[k];
+			rval =
+			    media_create_pad_link(&isys->tpg[i].asd.sd.entity,
+						  TPG_PAD_SOURCE,
+						  &csi2_be_soc->asd.sd.entity,
+						  CSI2_BE_SOC_PAD_SINK, 0);
+			if (rval) {
+				dev_info(&isys->adev->dev,
+					 "can't create link tpg->be_soc\n");
+				goto fail;
+			}
+		}
+	}
+
+	return 0;
+
+fail:
+	isys_unregister_subdevices(isys);
+	return rval;
+}
+
+/* read ltrdid threshold values from BIOS or system configuration */
+static void get_lut_ltrdid(struct ipu_isys *isys, struct ltr_did *pltr_did)
+{
+	struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+	/* default values*/
+	struct ltr_did ltrdid_default;
+
+	ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE;
+	ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE;
+
+	if (iwake_watermark->ltrdid.lut_ltr.value)
+		*pltr_did = iwake_watermark->ltrdid;
+	else
+		*pltr_did = ltrdid_default;
+}
+
+static int set_iwake_register(struct ipu_isys *isys, u32 index, u32 value)
+{
+	int ret = 0;
+	u32 req_id = index;
+	u32 offset = 0;
+
+	ret = ipu_fw_isys_send_proxy_token(isys, req_id, index, offset, value);
+	if (ret)
+		dev_err(&isys->adev->dev, "write %d failed %d", index, ret);
+
+	return ret;
+}
+
+/*
+ * When input system is powered up and before enabling any new sensor capture,
+ * or after disabling any sensor capture the following values need to be set:
+ * LTR_value = LTR(usec) from calculation;
+ * LTR_scale = 2;
+ * DID_value = DID(usec) from calculation;
+ * DID_scale = 2;
+ *
+ * When input system is powered down, the LTR and DID values
+ * must be returned to the default values:
+ * LTR_value = 1023;
+ * LTR_scale = 5;
+ * DID_value = 1023;
+ * DID_scale = 2;
+ */
+static void set_iwake_ltrdid(struct ipu_isys *isys,
+			     u16 ltr,
+			     u16 did,
+			     enum ltr_did_type use)
+{
+	/* did_scale will set to 2= 1us */
+	u16 ltr_val, ltr_scale, did_val;
+	union fabric_ctrl fc;
+	struct ipu_device *isp = isys->adev->isp;
+
+	switch (use) {
+	case LTR_IWAKE_ON:
+		ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX);
+		did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX);
+		ltr_scale = (ltr == LTR_DID_VAL_MAX &&
+				did == LTR_DID_VAL_MAX) ?
+				LTR_SCALE_DEFAULT : LTR_SCALE_1024NS;
+		break;
+	case LTR_ISYS_ON:
+	case LTR_IWAKE_OFF:
+		ltr_val = LTR_DID_PKGC_2R;
+		did_val = LTR_DID_PKGC_2R;
+		ltr_scale = LTR_SCALE_1024NS;
+		break;
+	case LTR_ISYS_OFF:
+		ltr_val   = LTR_DID_VAL_MAX;
+		did_val   = LTR_DID_VAL_MAX;
+		ltr_scale = LTR_SCALE_DEFAULT;
+		break;
+	default:
+		return;
+	}
+
+	fc.value = readl(isp->base + IPU_BUTTRESS_FABIC_CONTROL);
+	fc.bits.ltr_val = ltr_val;
+	fc.bits.ltr_scale = ltr_scale;
+	fc.bits.did_val = did_val;
+	fc.bits.did_scale = 2;
+	writel(fc.value, isp->base + IPU_BUTTRESS_FABIC_CONTROL);
+}
+
+/* SW driver may clear register GDA_ENABLE_IWAKE before the FW configures the
+ * stream for debug purposes. Otherwise SW should not access this register.
+ */
+static int enable_iwake(struct ipu_isys *isys, bool enable)
+{
+	int ret = 0;
+	struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+	mutex_lock(&iwake_watermark->mutex);
+	if (iwake_watermark->iwake_enabled == enable) {
+		mutex_unlock(&iwake_watermark->mutex);
+		return ret;
+	}
+	ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable);
+	if (!ret)
+		iwake_watermark->iwake_enabled = enable;
+	mutex_unlock(&iwake_watermark->mutex);
+	return ret;
+}
+
+void update_watermark_setting(struct ipu_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+	struct list_head *stream_node;
+	struct video_stream_watermark *p_watermark;
+	struct ltr_did ltrdid;
+	u16 calc_fill_time_us = 0;
+	u16 ltr, did;
+	u32 iwake_threshold, iwake_critical_threshold;
+	u64 threshold_bytes;
+	u64 isys_pb_datarate_mbs = 0;
+	u16 sram_granulrity_shift = (ipu_ver == IPU_VER_6) ?
+		IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT;
+	int max_sram_size = (ipu_ver == IPU_VER_6) ?
+		IPU6_MAX_SRAM_SIZE : IPU6SE_MAX_SRAM_SIZE;
+
+	mutex_lock(&iwake_watermark->mutex);
+	if (iwake_watermark->force_iwake_disable) {
+		set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+		set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+				   CRITICAL_THRESHOLD_IWAKE_DISABLE);
+		mutex_unlock(&iwake_watermark->mutex);
+		return;
+	}
+
+	if (list_empty(&iwake_watermark->video_list)) {
+		isys_pb_datarate_mbs = 0;
+	} else {
+		list_for_each(stream_node, &iwake_watermark->video_list)
+		{
+			p_watermark = list_entry(stream_node,
+						 struct video_stream_watermark,
+						 stream_node);
+			isys_pb_datarate_mbs += p_watermark->stream_data_rate;
+		}
+	}
+	mutex_unlock(&iwake_watermark->mutex);
+
+	if (!isys_pb_datarate_mbs) {
+		set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+		mutex_lock(&iwake_watermark->mutex);
+		set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+				   CRITICAL_THRESHOLD_IWAKE_DISABLE);
+		mutex_unlock(&iwake_watermark->mutex);
+	} else {
+		/* should enable iwake by default according to FW */
+		enable_iwake(isys, true);
+		calc_fill_time_us = (u16)(max_sram_size / isys_pb_datarate_mbs);
+		get_lut_ltrdid(isys, &ltrdid);
+
+		if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0)
+			ltr = 0;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1)
+			ltr = ltrdid.lut_ltr.bits.val0;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2)
+			ltr = ltrdid.lut_ltr.bits.val1;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3)
+			ltr = ltrdid.lut_ltr.bits.val2;
+		else
+			ltr = ltrdid.lut_ltr.bits.val3;
+
+		did = calc_fill_time_us - ltr;
+
+		threshold_bytes = did * isys_pb_datarate_mbs;
+		/* calculate iwake threshold with 2KB granularity pages */
+		iwake_threshold =
+			max_t(u32, 1, threshold_bytes >> sram_granulrity_shift);
+
+		iwake_threshold = min_t(u32, iwake_threshold, max_sram_size);
+
+		/* set the critical threshold to halfway between
+		 * iwake threshold and the full buffer.
+		 */
+		iwake_critical_threshold = iwake_threshold +
+			(IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2;
+
+		set_iwake_ltrdid(isys, ltr, did, LTR_IWAKE_ON);
+		mutex_lock(&iwake_watermark->mutex);
+		set_iwake_register(isys,
+				   GDA_IWAKE_THRESHOLD_INDEX, iwake_threshold);
+
+		set_iwake_register(isys,
+				   GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+				   iwake_critical_threshold);
+		mutex_unlock(&iwake_watermark->mutex);
+
+		writel(VAL_PKGC_PMON_CFG_RESET,
+		       isys->adev->isp->base + REG_PKGC_PMON_CFG);
+		writel(VAL_PKGC_PMON_CFG_START,
+		       isys->adev->isp->base + REG_PKGC_PMON_CFG);
+	}
+}
+
+static int isys_iwake_watermark_init(struct ipu_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark;
+
+	if (isys->iwake_watermark)
+		return 0;
+
+	iwake_watermark = devm_kzalloc(&isys->adev->dev,
+				       sizeof(*iwake_watermark), GFP_KERNEL);
+	if (!iwake_watermark)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&iwake_watermark->video_list);
+	mutex_init(&iwake_watermark->mutex);
+
+	iwake_watermark->ltrdid.lut_ltr.value = 0;
+	isys->iwake_watermark = iwake_watermark;
+	iwake_watermark->isys = isys;
+	iwake_watermark->iwake_enabled = false;
+	iwake_watermark->force_iwake_disable = false;
+	return 0;
+}
+
+static int isys_iwake_watermark_cleanup(struct ipu_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+	if (!iwake_watermark)
+		return -EINVAL;
+	mutex_lock(&iwake_watermark->mutex);
+	list_del(&iwake_watermark->video_list);
+	mutex_unlock(&iwake_watermark->mutex);
+	mutex_destroy(&iwake_watermark->mutex);
+	isys->iwake_watermark = NULL;
+	return 0;
+}
+
+/* The .bound() notifier callback when a match is found */
+static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
+			       struct v4l2_subdev *sd,
+			       struct v4l2_async_subdev *asd)
+{
+	struct ipu_isys *isys = container_of(notifier,
+					struct ipu_isys, notifier);
+	struct sensor_async_subdev *s_asd = container_of(asd,
+					struct sensor_async_subdev, asd);
+
+	dev_info(&isys->adev->dev, "bind %s nlanes is %d port is %d\n",
+		 sd->name, s_asd->csi2.nlanes, s_asd->csi2.port);
+	isys_complete_ext_device_registration(isys, sd, &s_asd->csi2);
+
+	return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static void isys_notifier_unbind(struct v4l2_async_notifier *notifier,
+				 struct v4l2_subdev *sd,
+				 struct v4l2_async_subdev *asd)
+{
+	struct ipu_isys *isys = container_of(notifier,
+					struct ipu_isys, notifier);
+
+	dev_info(&isys->adev->dev, "unbind %s\n", sd->name);
+}
+
+static int isys_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+	struct ipu_isys *isys = container_of(notifier,
+					struct ipu_isys, notifier);
+
+	dev_info(&isys->adev->dev, "All sensor registration completed.\n");
+
+	return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations isys_async_ops = {
+	.bound = isys_notifier_bound,
+	.unbind = isys_notifier_unbind,
+	.complete = isys_notifier_complete,
+};
+
+static int isys_fwnode_parse(struct device *dev,
+			     struct v4l2_fwnode_endpoint *vep,
+			     struct v4l2_async_subdev *asd)
+{
+	struct sensor_async_subdev *s_asd =
+			container_of(asd, struct sensor_async_subdev, asd);
+
+	s_asd->csi2.port = vep->base.port;
+	s_asd->csi2.nlanes = vep->bus.mipi_csi2.num_data_lanes;
+
+	return 0;
+}
+
+static int isys_notifier_init(struct ipu_isys *isys)
+{
+	struct ipu_device *isp = isys->adev->isp;
+	size_t asd_struct_size = sizeof(struct sensor_async_subdev);
+	int ret;
+
+	v4l2_async_notifier_init(&isys->notifier);
+	ret = v4l2_async_notifier_parse_fwnode_endpoints(&isp->pdev->dev,
+							 &isys->notifier,
+							 asd_struct_size,
+							 isys_fwnode_parse);
+
+	if (ret < 0)
+		return ret;
+
+	if (list_empty(&isys->notifier.asd_list))
+		return -ENODEV;	/* no endpoint */
+
+	isys->notifier.ops = &isys_async_ops;
+	ret = v4l2_async_notifier_register(&isys->v4l2_dev, &isys->notifier);
+	if (ret) {
+		dev_err(&isys->adev->dev,
+			"failed to register async notifier : %d\n", ret);
+		v4l2_async_notifier_cleanup(&isys->notifier);
+	}
+
+	return ret;
+}
+
+static struct media_device_ops isys_mdev_ops = {
+	.link_notify = v4l2_pipeline_link_notify,
+};
+
+static int isys_register_devices(struct ipu_isys *isys)
+{
+	int rval;
+
+	isys->media_dev.dev = &isys->adev->dev;
+	isys->media_dev.ops = &isys_mdev_ops;
+	strlcpy(isys->media_dev.model,
+		IPU_MEDIA_DEV_MODEL_NAME, sizeof(isys->media_dev.model));
+	snprintf(isys->media_dev.bus_info, sizeof(isys->media_dev.bus_info),
+		 "pci:%s", dev_name(isys->adev->dev.parent->parent));
+	strlcpy(isys->v4l2_dev.name, isys->media_dev.model,
+		sizeof(isys->v4l2_dev.name));
+
+	media_device_init(&isys->media_dev);
+
+	rval = media_device_register(&isys->media_dev);
+	if (rval < 0) {
+		dev_info(&isys->adev->dev, "can't register media device\n");
+		goto out_media_device_unregister;
+	}
+
+	isys->v4l2_dev.mdev = &isys->media_dev;
+
+	rval = v4l2_device_register(&isys->adev->dev, &isys->v4l2_dev);
+	if (rval < 0) {
+		dev_info(&isys->adev->dev, "can't register v4l2 device\n");
+		goto out_media_device_unregister;
+	}
+
+	rval = isys_register_subdevices(isys);
+	if (rval)
+		goto out_v4l2_device_unregister;
+	rval = isys_notifier_init(isys);
+	if (rval)
+		goto out_isys_unregister_subdevices;
+	rval = v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+	if (rval)
+		goto out_isys_unregister_subdevices;
+
+	return 0;
+
+out_isys_unregister_subdevices:
+	isys_unregister_subdevices(isys);
+
+out_v4l2_device_unregister:
+	v4l2_device_unregister(&isys->v4l2_dev);
+
+out_media_device_unregister:
+	media_device_unregister(&isys->media_dev);
+	media_device_cleanup(&isys->media_dev);
+
+	return rval;
+}
+
+static void isys_unregister_devices(struct ipu_isys *isys)
+{
+	isys_unregister_subdevices(isys);
+	v4l2_device_unregister(&isys->v4l2_dev);
+	media_device_unregister(&isys->media_dev);
+	media_device_cleanup(&isys->media_dev);
+}
+
+#ifdef CONFIG_PM
+static int isys_runtime_pm_resume(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_device *isp = adev->isp;
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	unsigned long flags;
+	int ret;
+
+	if (!isys)
+		return 0;
+
+	ret = ipu_mmu_hw_init(adev->mmu);
+	if (ret)
+		return ret;
+
+	ipu_trace_restore(dev);
+
+	pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE);
+
+	ret = ipu_buttress_start_tsc_sync(isp);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&isys->power_lock, flags);
+	isys->power = 1;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+
+	if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+		mutex_lock(&isys->short_packet_tracing_mutex);
+		isys->short_packet_tracing_count = 0;
+		mutex_unlock(&isys->short_packet_tracing_mutex);
+	}
+	isys_setup_hw(isys);
+
+	set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON);
+	return 0;
+}
+
+static int isys_runtime_pm_suspend(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	unsigned long flags;
+
+	if (!isys)
+		return 0;
+
+	spin_lock_irqsave(&isys->power_lock, flags);
+	isys->power = 0;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+
+	ipu_trace_stop(dev);
+	mutex_lock(&isys->mutex);
+	isys->reset_needed = false;
+	mutex_unlock(&isys->mutex);
+
+	pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF);
+	return 0;
+}
+
+static int isys_suspend(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+
+	/* If stream is open, refuse to suspend */
+	if (isys->stream_opened)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int isys_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops isys_pm_ops = {
+	.runtime_suspend = isys_runtime_pm_suspend,
+	.runtime_resume = isys_runtime_pm_resume,
+	.suspend = isys_suspend,
+	.resume = isys_resume,
+};
+
+#define ISYS_PM_OPS (&isys_pm_ops)
+#else
+#define ISYS_PM_OPS NULL
+#endif
+
+static void isys_remove(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	struct ipu_device *isp = adev->isp;
+	struct isys_fw_msgs *fwmsg, *safe;
+
+	dev_info(&adev->dev, "removed\n");
+#ifdef CONFIG_DEBUG_FS
+	if (isp->ipu_dir)
+		debugfs_remove_recursive(isys->debugfsdir);
+#endif
+
+	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) {
+		dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs),
+			       fwmsg, fwmsg->dma_addr,
+			       0);
+	}
+
+	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) {
+		dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs),
+			       fwmsg, fwmsg->dma_addr,
+			       0
+		    );
+	}
+
+	isys_iwake_watermark_cleanup(isys);
+
+	ipu_trace_uninit(&adev->dev);
+	isys_unregister_devices(isys);
+	pm_qos_remove_request(&isys->pm_qos);
+
+	if (!isp->secure_mode) {
+		ipu_cpd_free_pkg_dir(adev, isys->pkg_dir,
+				     isys->pkg_dir_dma_addr,
+				     isys->pkg_dir_size);
+		ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt);
+		release_firmware(isys->fw);
+	}
+
+	mutex_destroy(&isys->stream_mutex);
+	mutex_destroy(&isys->mutex);
+
+	if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+		u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE;
+		unsigned long attrs;
+
+		attrs = DMA_ATTR_NON_CONSISTENT;
+		dma_free_attrs(&adev->dev, trace_size,
+			       isys->short_packet_trace_buffer,
+			       isys->short_packet_trace_buffer_dma_addr, attrs);
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int ipu_isys_icache_prefetch_get(void *data, u64 *val)
+{
+	struct ipu_isys *isys = data;
+
+	*val = isys->icache_prefetch;
+	return 0;
+}
+
+static int ipu_isys_icache_prefetch_set(void *data, u64 val)
+{
+	struct ipu_isys *isys = data;
+
+	if (val != !!val)
+		return -EINVAL;
+
+	isys->icache_prefetch = val;
+
+	return 0;
+}
+
+static int isys_iwake_control_get(void *data, u64 *val)
+{
+	struct ipu_isys *isys = data;
+	struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+	mutex_lock(&iwake_watermark->mutex);
+	*val = isys->iwake_watermark->force_iwake_disable;
+	mutex_unlock(&iwake_watermark->mutex);
+	return 0;
+}
+
+static int isys_iwake_control_set(void *data, u64 val)
+{
+	struct ipu_isys *isys = data;
+	struct isys_iwake_watermark *iwake_watermark;
+
+	if (val != !!val)
+		return -EINVAL;
+	/* If stream is open, refuse to set iwake */
+	if (isys->stream_opened)
+		return -EBUSY;
+
+	iwake_watermark = isys->iwake_watermark;
+	mutex_lock(&iwake_watermark->mutex);
+	isys->iwake_watermark->force_iwake_disable = !!val;
+	mutex_unlock(&iwake_watermark->mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops,
+			ipu_isys_icache_prefetch_get,
+			ipu_isys_icache_prefetch_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_iwake_control_fops,
+			isys_iwake_control_get,
+			isys_iwake_control_set, "%llu\n");
+
+static int ipu_isys_init_debugfs(struct ipu_isys *isys)
+{
+	struct dentry *file;
+	struct dentry *dir;
+#ifdef IPU_ISYS_GPC
+	int ret;
+#endif
+
+	dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir);
+	if (IS_ERR(dir))
+		return -ENOMEM;
+
+	file = debugfs_create_file("icache_prefetch", 0600,
+				   dir, isys, &isys_icache_prefetch_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	file = debugfs_create_file("iwake_disable", 0600,
+				   dir, isys, &isys_iwake_control_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	isys->debugfsdir = dir;
+
+#ifdef IPU_ISYS_GPC
+	ret = ipu_isys_gpc_init_debugfs(isys);
+	if (ret)
+		return ret;
+#endif
+
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+#endif
+
+static int alloc_fw_msg_bufs(struct ipu_isys *isys, int amount)
+{
+	dma_addr_t dma_addr;
+	struct isys_fw_msgs *addr;
+	unsigned int i;
+	unsigned long flags;
+
+	for (i = 0; i < amount; i++) {
+		addr = dma_alloc_attrs(&isys->adev->dev,
+				       sizeof(struct isys_fw_msgs),
+				       &dma_addr, GFP_KERNEL,
+				       0);
+		if (!addr)
+			break;
+		addr->dma_addr = dma_addr;
+
+		spin_lock_irqsave(&isys->listlock, flags);
+		list_add(&addr->head, &isys->framebuflist);
+		spin_unlock_irqrestore(&isys->listlock, flags);
+	}
+	if (i == amount)
+		return 0;
+	spin_lock_irqsave(&isys->listlock, flags);
+	while (!list_empty(&isys->framebuflist)) {
+		addr = list_first_entry(&isys->framebuflist,
+					struct isys_fw_msgs, head);
+		list_del(&addr->head);
+		spin_unlock_irqrestore(&isys->listlock, flags);
+		dma_free_attrs(&isys->adev->dev,
+			       sizeof(struct isys_fw_msgs),
+			       addr, addr->dma_addr,
+			       0);
+		spin_lock_irqsave(&isys->listlock, flags);
+	}
+	spin_unlock_irqrestore(&isys->listlock, flags);
+	return -ENOMEM;
+}
+
+struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip)
+{
+	struct ipu_isys_video *pipe_av =
+	    container_of(ip, struct ipu_isys_video, ip);
+	struct ipu_isys *isys;
+	struct isys_fw_msgs *msg;
+	unsigned long flags;
+
+	isys = pipe_av->isys;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	if (list_empty(&isys->framebuflist)) {
+		spin_unlock_irqrestore(&isys->listlock, flags);
+		dev_dbg(&isys->adev->dev, "Frame list empty - Allocate more");
+
+		alloc_fw_msg_bufs(isys, 5);
+
+		spin_lock_irqsave(&isys->listlock, flags);
+		if (list_empty(&isys->framebuflist)) {
+			spin_unlock_irqrestore(&isys->listlock, flags);
+			dev_err(&isys->adev->dev, "Frame list empty");
+			return NULL;
+		}
+	}
+	msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head);
+	list_move(&msg->head, &isys->framebuflist_fw);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+	memset(&msg->fw_msg, 0, sizeof(msg->fw_msg));
+
+	return msg;
+}
+
+void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys)
+{
+	struct isys_fw_msgs *fwmsg, *fwmsg0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head)
+		list_move(&fwmsg->head, &isys->framebuflist);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data)
+{
+	struct isys_fw_msgs *msg;
+	unsigned long flags;
+	u64 *ptr = (u64 *)(unsigned long)data;
+
+	if (!ptr)
+		return;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy);
+	list_move(&msg->head, &isys->framebuflist);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_put_fw_mgs_buf);
+
+static int isys_probe(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys;
+	struct ipu_device *isp = adev->isp;
+	const struct firmware *fw;
+	int rval = 0;
+
+	isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL);
+	if (!isys)
+		return -ENOMEM;
+
+	rval = ipu_mmu_hw_init(adev->mmu);
+	if (rval)
+		return rval;
+
+	/* By default, short packet is captured from T-Unit. */
+	isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_RECEIVER;
+	isys->adev = adev;
+	isys->pdata = adev->pdata;
+
+	/* initial streamID for different sensor types */
+	if (ipu_ver == IPU_VER_6) {
+		isys->sensor_info.vc1_data_start =
+			IPU6_FW_ISYS_VC1_SENSOR_DATA_START;
+		isys->sensor_info.vc1_data_end =
+			IPU6_FW_ISYS_VC1_SENSOR_DATA_END;
+		isys->sensor_info.vc0_data_start =
+			IPU6_FW_ISYS_VC0_SENSOR_DATA_START;
+		isys->sensor_info.vc0_data_end =
+			IPU6_FW_ISYS_VC0_SENSOR_DATA_END;
+		isys->sensor_info.vc1_pdaf_start =
+			IPU6_FW_ISYS_VC1_SENSOR_PDAF_START;
+		isys->sensor_info.vc1_pdaf_end =
+			IPU6_FW_ISYS_VC1_SENSOR_PDAF_END;
+		isys->sensor_info.sensor_metadata =
+			IPU6_FW_ISYS_SENSOR_METADATA;
+
+		isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] =
+			IPU6_FW_ISYS_VC1_SENSOR_DATA_START;
+		isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] =
+			IPU6_FW_ISYS_VC1_SENSOR_PDAF_START;
+		isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] =
+			IPU6_FW_ISYS_VC0_SENSOR_DATA_START;
+	} else if (ipu_ver == IPU_VER_6SE) {
+		isys->sensor_info.vc1_data_start =
+			IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START;
+		isys->sensor_info.vc1_data_end =
+			IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END;
+		isys->sensor_info.vc0_data_start =
+			IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START;
+		isys->sensor_info.vc0_data_end =
+			IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END;
+		isys->sensor_info.vc1_pdaf_start =
+			IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START;
+		isys->sensor_info.vc1_pdaf_end =
+			IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END;
+		isys->sensor_info.sensor_metadata =
+			IPU6SE_FW_ISYS_SENSOR_METADATA;
+
+		isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] =
+			IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START;
+		isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] =
+			IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START;
+		isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] =
+			IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START;
+	}
+
+	INIT_LIST_HEAD(&isys->requests);
+
+	spin_lock_init(&isys->lock);
+	spin_lock_init(&isys->power_lock);
+	isys->power = 0;
+
+	mutex_init(&isys->mutex);
+	mutex_init(&isys->stream_mutex);
+	mutex_init(&isys->lib_mutex);
+
+	spin_lock_init(&isys->listlock);
+	INIT_LIST_HEAD(&isys->framebuflist);
+	INIT_LIST_HEAD(&isys->framebuflist_fw);
+
+	dev_dbg(&adev->dev, "isys probe %p %p\n", adev, &adev->dev);
+	ipu_bus_set_drvdata(adev, isys);
+
+	isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN;
+	isys->icache_prefetch = 0;
+
+#ifndef CONFIG_PM
+	isys_setup_hw(isys);
+#endif
+
+	if (!isp->secure_mode) {
+		fw = isp->cpd_fw;
+		rval = ipu_buttress_map_fw_image(adev, fw, &isys->fw_sgt);
+		if (rval)
+			goto release_firmware;
+
+		isys->pkg_dir =
+		    ipu_cpd_create_pkg_dir(adev, isp->cpd_fw->data,
+					   sg_dma_address(isys->fw_sgt.sgl),
+					   &isys->pkg_dir_dma_addr,
+					   &isys->pkg_dir_size);
+		if (!isys->pkg_dir) {
+			rval = -ENOMEM;
+			goto remove_shared_buffer;
+		}
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	/* Debug fs failure is not fatal. */
+	ipu_isys_init_debugfs(isys);
+#endif
+
+	ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev,
+		       isys_trace_blocks);
+
+	pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+	alloc_fw_msg_bufs(isys, 20);
+
+	rval = isys_register_devices(isys);
+	if (rval)
+		goto out_remove_pkg_dir_shared_buffer;
+	rval = isys_iwake_watermark_init(isys);
+	if (rval)
+		goto out_unregister_devices;
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	return 0;
+
+out_unregister_devices:
+	isys_iwake_watermark_cleanup(isys);
+	isys_unregister_devices(isys);
+out_remove_pkg_dir_shared_buffer:
+	if (!isp->secure_mode)
+		ipu_cpd_free_pkg_dir(adev, isys->pkg_dir,
+				     isys->pkg_dir_dma_addr,
+				     isys->pkg_dir_size);
+remove_shared_buffer:
+	if (!isp->secure_mode)
+		ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt);
+release_firmware:
+	if (!isp->secure_mode)
+		release_firmware(isys->fw);
+	ipu_trace_uninit(&adev->dev);
+
+	mutex_destroy(&isys->mutex);
+	mutex_destroy(&isys->stream_mutex);
+
+	if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT)
+		mutex_destroy(&isys->short_packet_tracing_mutex);
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	return rval;
+}
+
+struct fwmsg {
+	int type;
+	char *msg;
+	bool valid_ts;
+};
+
+static const struct fwmsg fw_msg[] = {
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+	 "STREAM_START_AND_CAPTURE_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+	 "STREAM_START_AND_CAPTURE_DONE", 1},
+	{IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1},
+	{IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1},
+	{IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1},
+	{IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1},
+	{-1, "UNKNOWN MESSAGE", 0},
+};
+
+static int resp_type_to_index(int type)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_msg); i++)
+		if (fw_msg[i].type == type)
+			return i;
+
+	return i - 1;
+}
+
+int isys_isr_one(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	struct ipu_fw_isys_resp_info_abi resp_data;
+	struct ipu_fw_isys_resp_info_abi *resp;
+	struct ipu_isys_pipeline *pipe;
+	u64 ts;
+	unsigned int i;
+
+	if (!isys->fwcom)
+		return 0;
+
+	resp = ipu_fw_isys_get_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES,
+				    &resp_data);
+	if (!resp)
+		return 1;
+
+	ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0];
+
+	if (resp->error_info.error == IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION)
+		/* Suspension is kind of special case: not enough buffers */
+		dev_dbg(&adev->dev,
+			"hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n",
+			resp->type,
+			fw_msg[resp_type_to_index(resp->type)].msg,
+			resp->stream_handle,
+			resp->error_info.error_details,
+			fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+			ts : 0, resp->pin_id);
+	else if (resp->error_info.error)
+		dev_dbg(&adev->dev,
+			"hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n",
+			resp->type,
+			fw_msg[resp_type_to_index(resp->type)].msg,
+			resp->stream_handle,
+			resp->error_info.error, resp->error_info.error_details,
+			fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+			ts : 0, resp->pin_id);
+	else
+		dev_dbg(&adev->dev,
+			"hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n",
+			resp->type,
+			fw_msg[resp_type_to_index(resp->type)].msg,
+			resp->stream_handle,
+			fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+			ts : 0, resp->pin_id);
+
+	if (resp->stream_handle >= IPU_ISYS_MAX_STREAMS) {
+		dev_err(&adev->dev, "bad stream handle %u\n",
+			resp->stream_handle);
+		goto leave;
+	}
+
+	pipe = isys->pipes[resp->stream_handle];
+	if (!pipe) {
+		dev_err(&adev->dev, "no pipeline for stream %u\n",
+			resp->stream_handle);
+		goto leave;
+	}
+	pipe->error = resp->error_info.error;
+
+	switch (resp->type) {
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE:
+		ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev), resp->buf_id);
+		complete(&pipe->stream_open_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK:
+		complete(&pipe->stream_close_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK:
+		complete(&pipe->stream_start_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK:
+		ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev), resp->buf_id);
+		complete(&pipe->stream_start_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK:
+		complete(&pipe->stream_stop_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK:
+		complete(&pipe->stream_stop_completion);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY:
+		if (resp->pin_id < IPU_ISYS_OUTPUT_PINS &&
+		    pipe->output_pins[resp->pin_id].pin_ready)
+			pipe->output_pins[resp->pin_id].pin_ready(pipe, resp);
+		else
+			dev_err(&adev->dev,
+				"%d:No data pin ready handler for pin id %d\n",
+				resp->stream_handle, resp->pin_id);
+		if (pipe->csi2)
+			ipu_isys_csi2_error(pipe->csi2);
+
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK:
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE:
+	case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE:
+		if (pipe->interlaced) {
+			struct ipu_isys_buffer *ib, *ib_safe;
+			struct list_head list;
+			unsigned long flags;
+			unsigned int *ts = resp->timestamp;
+
+			if (pipe->isys->short_packet_source ==
+			    IPU_ISYS_SHORT_PACKET_FROM_TUNIT)
+				pipe->cur_field =
+				    ipu_isys_csi2_get_current_field(pipe, ts);
+
+			/*
+			 * Move the pending buffers to a local temp list.
+			 * Then we do not need to handle the lock during
+			 * the loop.
+			 */
+			spin_lock_irqsave(&pipe->short_packet_queue_lock,
+					  flags);
+			list_cut_position(&list,
+					  &pipe->pending_interlaced_bufs,
+					  pipe->pending_interlaced_bufs.prev);
+			spin_unlock_irqrestore(&pipe->short_packet_queue_lock,
+					       flags);
+
+			list_for_each_entry_safe(ib, ib_safe, &list, head) {
+				struct vb2_buffer *vb;
+
+				vb = ipu_isys_buffer_to_vb2_buffer(ib);
+				to_vb2_v4l2_buffer(vb)->field = pipe->cur_field;
+				list_del(&ib->head);
+
+				ipu_isys_queue_buf_done(ib);
+			}
+		}
+		for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+			if (pipe->capture_done[i])
+				pipe->capture_done[i] (pipe, resp);
+
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF:
+		if (pipe->csi2)
+			ipu_isys_csi2_sof_event(pipe->csi2);
+
+#ifdef IPU_TPG_FRAME_SYNC
+		if (pipe->tpg)
+			ipu_isys_tpg_sof_event(pipe->tpg);
+#endif
+		pipe->seq[pipe->seq_index].sequence =
+		    atomic_read(&pipe->sequence) - 1;
+		pipe->seq[pipe->seq_index].timestamp = ts;
+		dev_dbg(&adev->dev,
+			"sof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+			resp->stream_handle,
+			pipe->seq[pipe->seq_index].sequence, ts);
+		pipe->seq_index = (pipe->seq_index + 1)
+		    % IPU_ISYS_MAX_PARALLEL_SOF;
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF:
+		if (pipe->csi2)
+			ipu_isys_csi2_eof_event(pipe->csi2);
+
+#ifdef IPU_TPG_FRAME_SYNC
+		if (pipe->tpg)
+			ipu_isys_tpg_eof_event(pipe->tpg);
+#endif
+
+		dev_dbg(&adev->dev,
+			"eof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+			resp->stream_handle,
+			pipe->seq[pipe->seq_index].sequence, ts);
+		break;
+	case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY:
+		break;
+	default:
+		dev_err(&adev->dev, "%d:unknown response type %u\n",
+			resp->stream_handle, resp->type);
+		break;
+	}
+
+leave:
+	ipu_fw_isys_put_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES);
+	return 0;
+}
+
+static void isys_isr_poll(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+
+	if (!isys->fwcom) {
+		dev_dbg(&isys->adev->dev,
+			"got interrupt but device not configured yet\n");
+		return;
+	}
+
+	mutex_lock(&isys->mutex);
+	isys_isr(adev);
+	mutex_unlock(&isys->mutex);
+}
+
+int ipu_isys_isr_run(void *ptr)
+{
+	struct ipu_isys *isys = ptr;
+
+	while (!kthread_should_stop()) {
+		usleep_range(500, 1000);
+		if (isys->stream_opened)
+			isys_isr_poll(isys->adev);
+	}
+
+	return 0;
+}
+
+static struct ipu_bus_driver isys_driver = {
+	.probe = isys_probe,
+	.remove = isys_remove,
+	.isr = isys_isr,
+	.wanted = IPU_ISYS_NAME,
+	.drv = {
+		.name = IPU_ISYS_NAME,
+		.owner = THIS_MODULE,
+		.pm = ISYS_PM_OPS,
+	},
+};
+
+module_ipu_bus_driver(isys_driver);
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus at linux.intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo at intel.com>");
+MODULE_AUTHOR("Jouni Högander <jouni.hogander at intel.com>");
+MODULE_AUTHOR("Jouni Ukkonen <jouni.ukkonen at intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng at intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu at intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu at intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao at intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding at intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang at intel.com>");
+MODULE_AUTHOR("Leifu Zhao <leifu.zhao at intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu at intel.com>");
+MODULE_AUTHOR("Kun Jiang <kun.jiang at intel.com>");
+MODULE_AUTHOR("Yu Xia <yu.y.xia at intel.com>");
+MODULE_AUTHOR("Jerry Hu <jerry.w.hu at intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu input system driver");
diff --git a/drivers/media/pci/intel/ipu-isys.h b/drivers/media/pci/intel/ipu-isys.h
new file mode 100644
index 000000000000..5d82b934b453
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-isys.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_H
+#define IPU_ISYS_H
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-device.h>
+#include <media/media-device.h>
+
+#include <uapi/linux/ipu-isys.h>
+
+#include "ipu.h"
+#include "ipu-isys-media.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-tpg.h"
+#include "ipu-isys-video.h"
+#include "ipu-pdata.h"
+#include "ipu-fw-isys.h"
+#include "ipu-platform-isys.h"
+
+#define IPU_ISYS_2600_MEM_LINE_ALIGN	64
+
+/* for TPG */
+#define IPU_ISYS_FREQ		533000000UL
+
+/*
+ * Current message queue configuration. These must be big enough
+ * so that they never gets full. Queues are located in system memory
+ */
+#define IPU_ISYS_SIZE_RECV_QUEUE 40
+#define IPU_ISYS_SIZE_SEND_QUEUE 40
+#define IPU_ISYS_SIZE_PROXY_RECV_QUEUE 5
+#define IPU_ISYS_SIZE_PROXY_SEND_QUEUE 5
+#define IPU_ISYS_NUM_RECV_QUEUE 1
+
+/*
+ * Device close takes some time from last ack message to actual stopping
+ * of the SP processor. As long as the SP processor runs we can't proceed with
+ * clean up of resources.
+ */
+#define IPU_ISYS_OPEN_TIMEOUT_US		1000
+#define IPU_ISYS_OPEN_RETRY		1000
+#define IPU_ISYS_TURNOFF_DELAY_US		1000
+#define IPU_ISYS_TURNOFF_TIMEOUT		1000
+#define IPU_LIB_CALL_TIMEOUT_JIFFIES \
+	msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS)
+
+#define IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE	32
+#define IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE	32
+
+#define IPU_ISYS_MIN_WIDTH		1U
+#define IPU_ISYS_MIN_HEIGHT		1U
+#define IPU_ISYS_MAX_WIDTH		16384U
+#define IPU_ISYS_MAX_HEIGHT		16384U
+
+#define NR_OF_CSI2_BE_SOC_DEV 8
+
+/* the threshold granularity is 2KB on IPU6 */
+#define IPU6_SRAM_GRANULRITY_SHIFT	11
+#define IPU6_SRAM_GRANULRITY_SIZE	2048
+/* the threshold granularity is 1KB on IPU6SE */
+#define IPU6SE_SRAM_GRANULRITY_SHIFT	10
+#define IPU6SE_SRAM_GRANULRITY_SIZE	1024
+
+struct task_struct;
+
+struct ltr_did {
+	union {
+		u32 value;
+		struct {
+			u8 val0;
+			u8 val1;
+			u8 val2;
+			u8 val3;
+		} bits;
+	} lut_ltr;
+	union {
+		u32 value;
+		struct {
+			u8 th0;
+			u8 th1;
+			u8 th2;
+			u8 th3;
+		} bits;
+	} lut_fill_time;
+};
+
+struct isys_iwake_watermark {
+	bool iwake_enabled;
+	bool force_iwake_disable;
+	u32 iwake_threshold;
+	u64 isys_pixelbuffer_datarate;
+	struct ltr_did ltrdid;
+	struct mutex mutex; /* protect whole struct */
+	struct ipu_isys *isys;
+	struct list_head video_list;
+};
+struct ipu_isys_sensor_info {
+	unsigned int vc1_data_start;
+	unsigned int vc1_data_end;
+	unsigned int vc0_data_start;
+	unsigned int vc0_data_end;
+	unsigned int vc1_pdaf_start;
+	unsigned int vc1_pdaf_end;
+	unsigned int sensor_metadata;
+};
+
+/*
+ * struct ipu_isys
+ *
+ * @media_dev: Media device
+ * @v4l2_dev: V4L2 device
+ * @adev: ISYS bus device
+ * @power: Is ISYS powered on or not?
+ * @isr_bits: Which bits does the ISR handle?
+ * @power_lock: Serialise access to power (power state in general)
+ * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers
+ * @lock: serialise access to pipes
+ * @pipes: pipelines per stream ID
+ * @fwcom: fw communication layer private pointer
+ *         or optional external library private pointer
+ * @line_align: line alignment in memory
+ * @reset_needed: Isys requires d0i0->i3 transition
+ * @video_opened: total number of opened file handles on video nodes
+ * @mutex: serialise access isys video open/release related operations
+ * @stream_mutex: serialise stream start and stop, queueing requests
+ * @lib_mutex: optional external library mutex
+ * @pdata: platform data pointer
+ * @csi2: CSI-2 receivers
+ * @tpg: test pattern generators
+ * @csi2_be: CSI-2 back-ends
+ * @fw: ISYS firmware binary (unsecure firmware)
+ * @fw_sgt: fw scatterlist
+ * @pkg_dir: host pointer to pkg_dir
+ * @pkg_dir_dma_addr: I/O virtual address for pkg_dir
+ * @pkg_dir_size: size of pkg_dir in bytes
+ * @short_packet_source: select short packet capture mode
+ */
+struct ipu_isys {
+	struct media_device media_dev;
+	struct v4l2_device v4l2_dev;
+	struct ipu_bus_device *adev;
+
+	int power;
+	spinlock_t power_lock;	/* Serialise access to power */
+	u32 isr_csi2_bits;
+	u32 csi2_rx_ctrl_cached;
+	spinlock_t lock;	/* Serialise access to pipes */
+	struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS];
+	void *fwcom;
+	unsigned int line_align;
+	bool reset_needed;
+	bool icache_prefetch;
+	bool csi2_cse_ipc_not_supported;
+	unsigned int video_opened;
+	unsigned int stream_opened;
+	struct ipu_isys_sensor_info sensor_info;
+	unsigned int sensor_types[N_IPU_FW_ISYS_SENSOR_TYPE];
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfsdir;
+#endif
+	struct mutex mutex;	/* Serialise isys video open/release related */
+	struct mutex stream_mutex;	/* Stream start, stop, queueing reqs */
+	struct mutex lib_mutex;	/* Serialise optional external library mutex */
+
+	struct ipu_isys_pdata *pdata;
+
+	struct ipu_isys_csi2 *csi2;
+	struct ipu_isys_tpg *tpg;
+	struct ipu_isys_csi2_be csi2_be;
+	struct ipu_isys_csi2_be_soc csi2_be_soc[NR_OF_CSI2_BE_SOC_DEV];
+	const struct firmware *fw;
+	struct sg_table fw_sgt;
+
+	u64 *pkg_dir;
+	dma_addr_t pkg_dir_dma_addr;
+	unsigned int pkg_dir_size;
+
+	struct list_head requests;
+	struct pm_qos_request pm_qos;
+	unsigned int short_packet_source;
+	struct ipu_isys_csi2_monitor_message *short_packet_trace_buffer;
+	dma_addr_t short_packet_trace_buffer_dma_addr;
+	unsigned int short_packet_tracing_count;
+	struct mutex short_packet_tracing_mutex;	/* For tracing count */
+	u64 tsc_timer_base;
+	u64 tunit_timer_base;
+	spinlock_t listlock;	/* Protect framebuflist */
+	struct list_head framebuflist;
+	struct list_head framebuflist_fw;
+	struct v4l2_async_notifier notifier;
+	struct isys_iwake_watermark *iwake_watermark;
+
+};
+
+void update_watermark_setting(struct ipu_isys *isys);
+
+struct isys_fw_msgs {
+	union {
+		u64 dummy;
+		struct ipu_fw_isys_frame_buff_set_abi frame;
+		struct ipu_fw_isys_stream_cfg_data_abi stream;
+	} fw_msg;
+	struct list_head head;
+	dma_addr_t dma_addr;
+};
+
+#define to_frame_msg_buf(a) (&(a)->fw_msg.frame)
+#define to_stream_cfg_msg_buf(a) (&(a)->fw_msg.stream)
+#define to_dma_addr(a) ((a)->dma_addr)
+
+struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip);
+void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data);
+void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys);
+
+extern const struct v4l2_ioctl_ops ipu_isys_ioctl_ops;
+
+void isys_setup_hw(struct ipu_isys *isys);
+int isys_isr_one(struct ipu_bus_device *adev);
+int ipu_isys_isr_run(void *ptr);
+irqreturn_t isys_isr(struct ipu_bus_device *adev);
+#ifdef IPU_ISYS_GPC
+int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys);
+#endif
+
+#endif /* IPU_ISYS_H */
diff --git a/drivers/media/pci/intel/ipu-mmu.c b/drivers/media/pci/intel/ipu-mmu.c
new file mode 100644
index 000000000000..f570d5e08eef
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-mmu.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/device.h>
+#include <linux/iova.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+
+#include "ipu.h"
+#include "ipu-platform.h"
+#include "ipu-dma.h"
+#include "ipu-mmu.h"
+#include "ipu-platform-regs.h"
+
+#define ISP_PAGE_SHIFT		12
+#define ISP_PAGE_SIZE		BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK		(~(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_SHIFT		22
+#define ISP_L1PT_MASK		(~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT		12
+#define ISP_L2PT_MASK		(~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES           1024
+#define ISP_L2PT_PTES           1024
+
+#define ISP_PADDR_SHIFT		12
+
+#define REG_TLB_INVALIDATE	0x0000
+
+#define REG_L1_PHYS		0x0004	/* 27-bit pfn */
+#define REG_INFO		0x0008
+
+/* The range of stream ID i in L1 cache is from 0 to 15 */
+#define MMUV2_REG_L1_STREAMID(i)	(0x0c + ((i) * 4))
+
+/* The range of stream ID i in L2 cache is from 0 to 15 */
+#define MMUV2_REG_L2_STREAMID(i)	(0x4c + ((i) * 4))
+
+/* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */
+#define MMUV2_AT_REG_L1_ZLW_EN_SID(i)		(0x100 + ((i) * 0x20))
+
+/* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */
+#define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i)	(0x100 + ((i) * 0x20) + 0x0004)
+
+/* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */
+#define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i)	(0x100 + ((i) * 0x20) + 0x0008)
+
+/* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */
+#define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i)	(0x100 + ((i) * 0x20) + 0x0010)
+
+/* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */
+#define MMUV2_AT_REG_L1_ZLW_INSERTION(i)	(0x100 + ((i) * 0x20) + 0x000c)
+
+#define MMUV2_AT_REG_L1_FW_ZLW_FIFO		(0x100 + \
+			(IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c)
+
+/* FW ZLW has prioty - needed for ZLW invalidations */
+#define MMUV2_AT_REG_L1_FW_ZLW_PRIO		(0x100 + \
+			(IPU_MMU_MAX_TLB_L1_STREAMS * 0x20))
+
+#define TBL_PHYS_ADDR(a)	((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+#define TBL_VIRT_ADDR(a)	phys_to_virt(TBL_PHYS_ADDR(a))
+
+static void zlw_invalidate(struct ipu_mmu *mmu, struct ipu_mmu_hw *mmu_hw)
+{
+	unsigned int retry = 0;
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < mmu_hw->nr_l1streams; i++) {
+		/* We need to invalidate only the zlw enabled stream IDs */
+		if (mmu_hw->l1_zlw_en[i]) {
+			/*
+			 * Maximum 16 blocks per L1 stream
+			 * Write trash buffer iova offset to the FW_ZLW
+			 * register. This will trigger pre-fetching of next 16
+			 * pages from the page table. So we need to increment
+			 * iova address by 16 * 4K to trigger the next 16 pages.
+			 * Once this loop is completed, the L1 cache will be
+			 * filled with trash buffer translation.
+			 *
+			 * TODO: Instead of maximum 16 blocks, use the allocated
+			 * block size
+			 */
+			for (j = 0; j < mmu_hw->l1_block_sz[i]; j++)
+				writel(mmu->iova_addr_trash +
+					   j * MMUV2_TRASH_L1_BLOCK_OFFSET,
+					   mmu_hw->base +
+					   MMUV2_AT_REG_L1_ZLW_INSERTION(i));
+
+			/*
+			 * Now we need to fill the L2 cache entry. L2 cache
+			 * entries will be automatically updated, based on the
+			 * L1 entry. The above loop for L1 will update only one
+			 * of the two entries in L2 as the L1 is under 4MB
+			 * range. To force the other entry in L2 to update, we
+			 * just need to trigger another pre-fetch which is
+			 * outside the above 4MB range.
+			 */
+			writel(mmu->iova_addr_trash +
+				   MMUV2_TRASH_L2_BLOCK_OFFSET,
+				   mmu_hw->base +
+				   MMUV2_AT_REG_L1_ZLW_INSERTION(0));
+		}
+	}
+
+	/*
+	 * Wait until AT is ready. FIFO read should return 2 when AT is ready.
+	 * Retry value of 1000 is just by guess work to avoid the forever loop.
+	 */
+	do {
+		if (retry > 1000) {
+			dev_err(mmu->dev, "zlw invalidation failed\n");
+			return;
+		}
+		ret = readl(mmu_hw->base + MMUV2_AT_REG_L1_FW_ZLW_FIFO);
+		retry++;
+	} while (ret != 2);
+}
+
+static void tlb_invalidate(struct ipu_mmu *mmu)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	if (!mmu->ready) {
+		spin_unlock_irqrestore(&mmu->ready_lock, flags);
+		return;
+	}
+
+	for (i = 0; i < mmu->nr_mmus; i++) {
+		/*
+		 * To avoid the HW bug induced dead lock in some of the IPU4
+		 * MMUs on successive invalidate calls, we need to first do a
+		 * read to the page table base before writing the invalidate
+		 * register. MMUs which need to implement this WA, will have
+		 * the insert_read_before_invalidate flasg set as true.
+		 * Disregard the return value of the read.
+		 */
+		if (mmu->mmu_hw[i].insert_read_before_invalidate)
+			readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+		/* Normal invalidate or zlw invalidate */
+		if (mmu->mmu_hw[i].zlw_invalidate) {
+			/* trash buffer must be mapped by now, just in case! */
+			WARN_ON(!mmu->iova_addr_trash);
+
+			zlw_invalidate(mmu, &mmu->mmu_hw[i]);
+		} else {
+			writel(0xffffffff, mmu->mmu_hw[i].base +
+				   REG_TLB_INVALIDATE);
+		}
+	}
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+#ifdef DEBUG
+static void page_table_dump(struct ipu_mmu_info *mmu_info)
+{
+	u32 l1_idx;
+
+	pr_debug("begin IOMMU page table dump\n");
+
+	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+		u32 l2_idx;
+		u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+
+		if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
+			continue;
+		pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n",
+			 l1_idx, iova, iova + ISP_PAGE_SIZE,
+			 (void *)TBL_PHYS_ADDR(mmu_info->pgtbl[l1_idx]));
+
+		for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+			u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+			u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
+
+			if (l2_pt[l2_idx] == mmu_info->dummy_page)
+				continue;
+
+			pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n",
+				 l2_idx, iova2,
+				 (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
+		}
+	}
+
+	pr_debug("end IOMMU page table dump\n");
+}
+#endif /* DEBUG */
+
+static u32 *alloc_page_table(struct ipu_mmu_info *mmu_info, bool l1)
+{
+	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	int i;
+
+	if (!pt)
+		return NULL;
+
+	pr_debug("get_zeroed_page() == %p\n", pt);
+
+	for (i = 0; i < ISP_L1PT_PTES; i++)
+		pt[i] = l1 ? mmu_info->dummy_l2_tbl : mmu_info->dummy_page;
+
+	return pt;
+}
+
+static int l2_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		  phys_addr_t paddr, size_t size)
+{
+	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+	u32 l1_entry = mmu_info->pgtbl[l1_idx];
+	u32 *l2_pt;
+	u32 iova_start = iova;
+	unsigned int l2_idx;
+	unsigned long flags;
+
+	pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n",
+		 l1_idx, (u32)iova);
+
+	spin_lock_irqsave(&mmu_info->lock, flags);
+	if (l1_entry == mmu_info->dummy_l2_tbl) {
+		u32 *l2_virt = alloc_page_table(mmu_info, false);
+
+		if (!l2_virt) {
+			spin_unlock_irqrestore(&mmu_info->lock, flags);
+			return -ENOMEM;
+		}
+
+		l1_entry = virt_to_phys(l2_virt) >> ISP_PADDR_SHIFT;
+		pr_debug("allocated page for l1_idx %u\n", l1_idx);
+
+		if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl) {
+			mmu_info->pgtbl[l1_idx] = l1_entry;
+#ifdef CONFIG_X86
+			clflush_cache_range(&mmu_info->pgtbl[l1_idx],
+					    sizeof(mmu_info->pgtbl[l1_idx]));
+#endif /* CONFIG_X86 */
+		} else {
+			free_page((unsigned long)TBL_VIRT_ADDR(l1_entry));
+		}
+	}
+
+	l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+
+	pr_debug("l2_pt at %p\n", l2_pt);
+
+	paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+
+	l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+
+	pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
+	if (l2_pt[l2_idx] != mmu_info->dummy_page) {
+		spin_unlock_irqrestore(&mmu_info->lock, flags);
+		return -EBUSY;
+	}
+
+	l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+#ifdef CONFIG_X86
+	clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+#endif /* CONFIG_X86 */
+	spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+	pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
+
+	return 0;
+}
+
+static int __ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+			 phys_addr_t paddr, size_t size)
+{
+	u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+	u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+	pr_debug
+	    ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+	     iova_start, iova_end, size, paddr);
+
+	return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static size_t l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		       phys_addr_t dummy, size_t size)
+{
+	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+	u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+	u32 iova_start = iova;
+	unsigned int l2_idx;
+	size_t unmapped = 0;
+
+	pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+		 l1_idx, iova);
+
+	if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
+		return -EINVAL;
+
+	pr_debug("l2_pt at %p\n", l2_pt);
+
+	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+	     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+		unsigned long flags;
+
+		pr_debug("l2 index %u unmapped, was 0x%10.10llx\n",
+			 l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+		spin_lock_irqsave(&mmu_info->lock, flags);
+		l2_pt[l2_idx] = mmu_info->dummy_page;
+		spin_unlock_irqrestore(&mmu_info->lock, flags);
+#ifdef CONFIG_X86
+		clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+#endif /* CONFIG_X86 */
+		unmapped++;
+	}
+
+	return unmapped << ISP_PAGE_SHIFT;
+}
+
+static size_t __ipu_mmu_unmap(struct ipu_mmu_info *mmu_info,
+			      unsigned long iova, size_t size)
+{
+	return l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu_mmu *mmu)
+{
+	unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT;
+	struct iova *iova;
+	u32 iova_addr;
+	unsigned int i;
+	int ret;
+
+	/* Allocate 8MB in iova range */
+	iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+			  mmu->dmap->mmu_info->aperture_end >> PAGE_SHIFT, 0);
+	if (!iova) {
+		dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Map the 8MB iova address range to the same physical trash page
+	 * mmu->trash_page which is already reserved at the probe
+	 */
+	iova_addr = iova->pfn_lo;
+	for (i = 0; i < n_pages; i++) {
+		ret = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
+				  page_to_phys(mmu->trash_page), PAGE_SIZE);
+		if (ret) {
+			dev_err(mmu->dev,
+				"mapping trash buffer range failed\n");
+			goto out_unmap;
+		}
+
+		iova_addr++;
+	}
+
+	/* save the address for the ZLW invalidation */
+	mmu->iova_addr_trash = iova->pfn_lo << PAGE_SHIFT;
+	dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+		mmu->mmid, (unsigned int)mmu->iova_addr_trash);
+	return 0;
+
+out_unmap:
+	ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+		      (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+	__free_iova(&mmu->dmap->iovad, iova);
+	return ret;
+}
+
+int ipu_mmu_hw_init(struct ipu_mmu *mmu)
+{
+	unsigned int i;
+	unsigned long flags;
+	struct ipu_mmu_info *mmu_info;
+
+	dev_dbg(mmu->dev, "mmu hw init\n");
+
+	mmu_info = mmu->dmap->mmu_info;
+
+	/* Initialise the each MMU HW block */
+	for (i = 0; i < mmu->nr_mmus; i++) {
+		struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+		unsigned int j;
+		u16 block_addr;
+
+		/* Write page table address per MMU */
+		writel((phys_addr_t)virt_to_phys(mmu_info->pgtbl)
+			   >> ISP_PADDR_SHIFT,
+			   mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+		/* Set info bits per MMU */
+		writel(mmu->mmu_hw[i].info_bits,
+		       mmu->mmu_hw[i].base + REG_INFO);
+
+		/* Configure MMU TLB stream configuration for L1 */
+		for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
+		     block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
+			if (block_addr > IPU_MAX_LI_BLOCK_ADDR) {
+				dev_err(mmu->dev, "invalid L1 configuration\n");
+				return -EINVAL;
+			}
+
+			/* Write block start address for each streams */
+			writel(block_addr, mmu_hw->base +
+				   mmu_hw->l1_stream_id_reg_offset + 4 * j);
+		}
+
+		/* Configure MMU TLB stream configuration for L2 */
+		for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
+		     block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
+			if (block_addr > IPU_MAX_L2_BLOCK_ADDR) {
+				dev_err(mmu->dev, "invalid L2 configuration\n");
+				return -EINVAL;
+			}
+
+			writel(block_addr, mmu_hw->base +
+				   mmu_hw->l2_stream_id_reg_offset + 4 * j);
+		}
+	}
+
+	/*
+	 * Allocate 1 page of physical memory for the trash buffer.
+	 */
+	if (!mmu->trash_page) {
+		mmu->trash_page = alloc_page(GFP_KERNEL);
+		if (!mmu->trash_page) {
+			dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* Allocate trash buffer, if not allocated. Only once per MMU */
+	if (!mmu->iova_addr_trash) {
+		int ret;
+
+		ret = allocate_trash_buffer(mmu);
+		if (ret) {
+			__free_page(mmu->trash_page);
+			mmu->trash_page = NULL;
+			dev_err(mmu->dev, "trash buffer allocation failed\n");
+			return ret;
+		}
+	}
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	mmu->ready = true;
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipu_mmu_hw_init);
+
+struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp)
+{
+	struct ipu_mmu_info *mmu_info;
+	void *ptr;
+
+	mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+	if (!mmu_info)
+		return NULL;
+
+	mmu_info->aperture_start = 0;
+	mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+				      IPU_MMU_ADDRESS_BITS :
+				      IPU_MMU_ADDRESS_BITS_NON_SECURE);
+	mmu_info->pgsize_bitmap = SZ_4K;
+
+	ptr = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+	if (!ptr)
+		goto err_mem;
+
+	mmu_info->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
+
+	ptr = alloc_page_table(mmu_info, false);
+	if (!ptr)
+		goto err;
+
+	mmu_info->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
+
+	/*
+	 * We always map the L1 page table (a single page as well as
+	 * the L2 page tables).
+	 */
+	mmu_info->pgtbl = alloc_page_table(mmu_info, true);
+	if (!mmu_info->pgtbl)
+		goto err;
+
+	spin_lock_init(&mmu_info->lock);
+
+	pr_debug("domain initialised\n");
+
+	return mmu_info;
+
+err:
+	free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
+	free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
+err_mem:
+	kfree(mmu_info);
+
+	return NULL;
+}
+
+int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	mmu->ready = false;
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipu_mmu_hw_cleanup);
+
+static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp)
+{
+	struct ipu_dma_mapping *dmap;
+
+	dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+	if (!dmap)
+		return NULL;
+
+	dmap->mmu_info = ipu_mmu_alloc(isp);
+	if (!dmap->mmu_info) {
+		kfree(dmap);
+		return NULL;
+	}
+	init_iova_domain(&dmap->iovad, SZ_4K, 1);
+	dmap->mmu_info->dmap = dmap;
+
+	kref_init(&dmap->ref);
+
+	pr_debug("alloc mapping\n");
+
+	iova_cache_get();
+
+	return dmap;
+}
+
+phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
+				 dma_addr_t iova)
+{
+	u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[iova >> ISP_L1PT_SHIFT]);
+
+	return (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]
+	    << ISP_PAGE_SHIFT;
+}
+
+/**
+ * The following four functions are implemented based on iommu.c
+ * drivers/iommu/iommu.c/iommu_pgsize().
+ */
+static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap,
+			     unsigned long addr_merge, size_t size)
+{
+	unsigned int pgsize_idx;
+	size_t pgsize;
+
+	/* Max page size that still fits into 'size' */
+	pgsize_idx = __fls(size);
+
+	/* need to consider alignment requirements ? */
+	if (likely(addr_merge)) {
+		/* Max page size allowed by address */
+		unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+	}
+
+	/* build a mask of acceptable page sizes */
+	pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+	/* throw away page sizes not supported by the hardware */
+	pgsize &= pgsize_bitmap;
+
+	/* make sure we're still sane */
+	WARN_ON(!pgsize);
+
+	/* pick the biggest page */
+	pgsize_idx = __fls(pgsize);
+	pgsize = 1UL << pgsize_idx;
+
+	return pgsize;
+}
+
+/* drivers/iommu/iommu.c/iommu_unmap() */
+size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		     size_t size)
+{
+	size_t unmapped_page, unmapped = 0;
+	unsigned int min_pagesz;
+
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+	/*
+	 * The virtual address, as well as the size of the mapping, must be
+	 * aligned (at least) to the size of the smallest page supported
+	 * by the hardware
+	 */
+	if (!IS_ALIGNED(iova | size, min_pagesz)) {
+		dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+			iova, size, min_pagesz);
+		return -EINVAL;
+	}
+
+	/*
+	 * Keep iterating until we either unmap 'size' bytes (or more)
+	 * or we hit an area that isn't mapped.
+	 */
+	while (unmapped < size) {
+		size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
+						iova, size - unmapped);
+
+		unmapped_page = __ipu_mmu_unmap(mmu_info, iova, pgsize);
+		if (!unmapped_page)
+			break;
+
+		dev_dbg(NULL, "unmapped: iova 0x%lx size 0x%zx\n",
+			iova, unmapped_page);
+
+		iova += unmapped_page;
+		unmapped += unmapped_page;
+	}
+
+	return unmapped;
+}
+
+/* drivers/iommu/iommu.c/iommu_map() */
+int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		phys_addr_t paddr, size_t size)
+{
+	unsigned long orig_iova = iova;
+	unsigned int min_pagesz;
+	size_t orig_size = size;
+	int ret = 0;
+
+	if (mmu_info->pgsize_bitmap == 0UL)
+		return -ENODEV;
+
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+	/*
+	 * both the virtual address and the physical one, as well as
+	 * the size of the mapping, must be aligned (at least) to the
+	 * size of the smallest page supported by the hardware
+	 */
+	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+		       iova, &paddr, size, min_pagesz);
+		return -EINVAL;
+	}
+
+	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
+
+	while (size) {
+		size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
+					       iova | paddr, size);
+
+		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+			 iova, &paddr, pgsize);
+
+		ret = __ipu_mmu_map(mmu_info, iova, paddr, pgsize);
+		if (ret)
+			break;
+
+		iova += pgsize;
+		paddr += pgsize;
+		size -= pgsize;
+	}
+
+	/* unroll mapping in case something went wrong */
+	if (ret)
+		ipu_mmu_unmap(mmu_info, orig_iova, orig_size - size);
+
+	return ret;
+}
+
+static void ipu_mmu_destroy(struct ipu_mmu *mmu)
+{
+	struct ipu_dma_mapping *dmap = mmu->dmap;
+	struct ipu_mmu_info *mmu_info = dmap->mmu_info;
+	struct iova *iova;
+	u32 l1_idx;
+
+	if (mmu->iova_addr_trash) {
+		iova = find_iova(&dmap->iovad,
+				 mmu->iova_addr_trash >> PAGE_SHIFT);
+		if (iova) {
+			/* unmap and free the trash buffer iova */
+			ipu_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT,
+				      (iova->pfn_hi - iova->pfn_lo + 1) <<
+				      PAGE_SHIFT);
+			__free_iova(&dmap->iovad, iova);
+		} else {
+			dev_err(mmu->dev, "trash buffer iova not found.\n");
+		}
+
+		mmu->iova_addr_trash = 0;
+	}
+
+	if (mmu->trash_page)
+		__free_page(mmu->trash_page);
+
+	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++)
+		if (mmu_info->pgtbl[l1_idx] != mmu_info->dummy_l2_tbl)
+			free_page((unsigned long)
+				  TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]));
+
+	free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
+	free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
+	free_page((unsigned long)mmu_info->pgtbl);
+	kfree(mmu_info);
+}
+
+struct ipu_mmu *ipu_mmu_init(struct device *dev,
+			     void __iomem *base, int mmid,
+			     const struct ipu_hw_variants *hw)
+{
+	struct ipu_mmu *mmu;
+	struct ipu_mmu_pdata *pdata;
+	struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+	unsigned int i;
+
+	if (hw->nr_mmus > IPU_MMU_MAX_DEVICES)
+		return ERR_PTR(-EINVAL);
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < hw->nr_mmus; i++) {
+		struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+		const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+		if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS ||
+		    src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS)
+			return ERR_PTR(-EINVAL);
+
+		*pdata_mmu = *src_mmu;
+		pdata_mmu->base = base + src_mmu->offset;
+	}
+
+	mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+	if (!mmu)
+		return ERR_PTR(-ENOMEM);
+
+	mmu->mmid = mmid;
+	mmu->mmu_hw = pdata->mmu_hw;
+	mmu->nr_mmus = hw->nr_mmus;
+	mmu->tlb_invalidate = tlb_invalidate;
+	mmu->ready = false;
+	INIT_LIST_HEAD(&mmu->vma_list);
+	spin_lock_init(&mmu->ready_lock);
+
+	mmu->dmap = alloc_dma_mapping(isp);
+	if (!mmu->dmap) {
+		dev_err(dev, "can't alloc dma mapping\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return mmu;
+}
+EXPORT_SYMBOL(ipu_mmu_init);
+
+void ipu_mmu_cleanup(struct ipu_mmu *mmu)
+{
+	struct ipu_dma_mapping *dmap = mmu->dmap;
+
+	ipu_mmu_destroy(mmu);
+	mmu->dmap = NULL;
+	iova_cache_put();
+	put_iova_domain(&dmap->iovad);
+	kfree(dmap);
+}
+EXPORT_SYMBOL(ipu_mmu_cleanup);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus at linux.intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo at intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu mmu driver");
diff --git a/drivers/media/pci/intel/ipu-mmu.h b/drivers/media/pci/intel/ipu-mmu.h
new file mode 100644
index 000000000000..d810024d37fe
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-mmu.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_MMU_H
+#define IPU_MMU_H
+
+#include <linux/dma-mapping.h>
+
+#include "ipu.h"
+#include "ipu-pdata.h"
+
+#define ISYS_MMID 1
+#define PSYS_MMID 0
+
+/*
+ * @pgtbl: virtual address of the l1 page table (one page)
+ */
+struct ipu_mmu_info {
+	u32 __iomem *pgtbl;
+	dma_addr_t aperture_start;
+	dma_addr_t aperture_end;
+	unsigned long pgsize_bitmap;
+
+	spinlock_t lock;	/* Serialize access to users */
+	struct ipu_dma_mapping *dmap;
+	u32 dummy_l2_tbl;
+	u32 dummy_page;
+};
+
+/*
+ * @pgtbl: physical address of the l1 page table
+ */
+struct ipu_mmu {
+	struct list_head node;
+
+	struct ipu_mmu_hw *mmu_hw;
+	unsigned int nr_mmus;
+	int mmid;
+
+	phys_addr_t pgtbl;
+	struct device *dev;
+
+	struct ipu_dma_mapping *dmap;
+	struct list_head vma_list;
+
+	struct page *trash_page;
+	dma_addr_t iova_addr_trash;
+
+	bool ready;
+	spinlock_t ready_lock;	/* Serialize access to bool ready */
+
+	void (*tlb_invalidate)(struct ipu_mmu *mmu);
+};
+
+struct ipu_mmu *ipu_mmu_init(struct device *dev,
+			     void __iomem *base, int mmid,
+			     const struct ipu_hw_variants *hw);
+void ipu_mmu_cleanup(struct ipu_mmu *mmu);
+int ipu_mmu_hw_init(struct ipu_mmu *mmu);
+int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu);
+int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		phys_addr_t paddr, size_t size);
+size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+		     size_t size);
+phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
+				 dma_addr_t iova);
+#endif
diff --git a/drivers/media/pci/intel/ipu-pdata.h b/drivers/media/pci/intel/ipu-pdata.h
new file mode 100644
index 000000000000..575899746323
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-pdata.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_PDATA_H
+#define IPU_PDATA_H
+
+#define IPU_MMU_NAME IPU_NAME "-mmu"
+#define IPU_ISYS_CSI2_NAME IPU_NAME "-csi2"
+#define IPU_ISYS_NAME IPU_NAME "-isys"
+#define IPU_PSYS_NAME IPU_NAME "-psys"
+#define IPU_BUTTRESS_NAME IPU_NAME "-buttress"
+
+#define IPU_MMU_MAX_DEVICES		4
+#define IPU_MMU_ADDRESS_BITS		32
+/* The firmware is accessible within the first 2 GiB only in non-secure mode. */
+#define IPU_MMU_ADDRESS_BITS_NON_SECURE	31
+
+#define IPU_MMU_MAX_TLB_L1_STREAMS	32
+#define IPU_MMU_MAX_TLB_L2_STREAMS	32
+#define IPU_MAX_LI_BLOCK_ADDR		128
+#define IPU_MAX_L2_BLOCK_ADDR		64
+
+#define IPU_ISYS_MAX_CSI2_LEGACY_PORTS	4
+#define IPU_ISYS_MAX_CSI2_COMBO_PORTS	2
+
+#define IPU_MAX_FRAME_COUNTER	0xff
+
+/*
+ * To maximize the IOSF utlization, IPU need to send requests in bursts.
+ * At the DMA interface with the buttress, there are CDC FIFOs with burst
+ * collection capability. CDC FIFO burst collectors have a configurable
+ * threshold and is configured based on the outcome of performance measurements.
+ *
+ * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
+ * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
+ *
+ * Threshold values are pre-defined and are arrived at after performance
+ * evaluations on a type of IPU4
+ */
+#define IPU_MAX_VC_IOSF_PORTS		4
+
+/*
+ * IPU must configure correct arbitration mechanism related to the IOSF VC
+ * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
+ * stall and 1 means stall until the request is completed.
+ */
+#define IPU_BTRS_ARB_MODE_TYPE_REARB	0
+#define IPU_BTRS_ARB_MODE_TYPE_STALL	1
+
+/* Currently chosen arbitration mechanism for VC0 */
+#define IPU_BTRS_ARB_STALL_MODE_VC0	\
+			IPU_BTRS_ARB_MODE_TYPE_REARB
+
+/* Currently chosen arbitration mechanism for VC1 */
+#define IPU_BTRS_ARB_STALL_MODE_VC1	\
+			IPU_BTRS_ARB_MODE_TYPE_REARB
+
+struct ipu_isys_subdev_pdata;
+
+/*
+ * MMU Invalidation HW bug workaround by ZLW mechanism
+ *
+ * IPU4 MMUV2 has a bug in the invalidation mechanism which might result in
+ * wrong translation or replication of the translation. This will cause data
+ * corruption. So we cannot directly use the MMU V2 invalidation registers
+ * to invalidate the MMU. Instead, whenever an invalidate is called, we need to
+ * clear the TLB by evicting all the valid translations by filling it with trash
+ * buffer (which is guaranteed not to be used by any other processes). ZLW is
+ * used to fill the L1 and L2 caches with the trash buffer translations. ZLW
+ * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in
+ * advance to the L1 and L2 caches without triggering any memory operations.
+ *
+ * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream
+ * One L1 block has 16 entries, hence points to 16 * 4K pages
+ * L2 -> 16 streams and 32 blocks. 2 blocks per streams
+ * One L2 block maps to 1024 L1 entries, hence points to 4MB address range
+ * 2 blocks per L2 stream means, 1 stream points to 8MB range
+ *
+ * As we need to clear the caches and 8MB being the biggest cache size, we need
+ * to have trash buffer which points to 8MB address range. As these trash
+ * buffers are not used for any memory transactions, we need only the least
+ * amount of physical memory. So we reserve 8MB IOVA address range but only
+ * one page is reserved from physical memory. Each of this 8MB IOVA address
+ * range is then mapped to the same physical memory page.
+ */
+/* One L2 entry maps 1024 L1 entries and one L1 entry per page */
+#define IPU_MMUV2_L2_RANGE		(1024 * PAGE_SIZE)
+/* Max L2 blocks per stream */
+#define IPU_MMUV2_MAX_L2_BLOCKS		2
+/* Max L1 blocks per stream */
+#define IPU_MMUV2_MAX_L1_BLOCKS		16
+#define IPU_MMUV2_TRASH_RANGE		(IPU_MMUV2_L2_RANGE * \
+						 IPU_MMUV2_MAX_L2_BLOCKS)
+/* Entries per L1 block */
+#define MMUV2_ENTRIES_PER_L1_BLOCK		16
+#define MMUV2_TRASH_L1_BLOCK_OFFSET		(MMUV2_ENTRIES_PER_L1_BLOCK * \
+						 PAGE_SIZE)
+#define MMUV2_TRASH_L2_BLOCK_OFFSET		IPU_MMUV2_L2_RANGE
+
+/*
+ * In some of the IPU4 MMUs, there is provision to configure L1 and L2 page
+ * table caches. Both these L1 and L2 caches are divided into multiple sections
+ * called streams. There is maximum 16 streams for both caches. Each of these
+ * sections are subdivided into multiple blocks. When nr_l1streams = 0 and
+ * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support
+ * L1/L2 page table caches.
+ *
+ * L1 stream per block sizes are configurable and varies per usecase.
+ * L2 has constant block sizes - 2 blocks per stream.
+ *
+ * MMU1 support pre-fetching of the pages to have less cache lookup misses. To
+ * enable the pre-fetching, MMU1 AT (Address Translator) device registers
+ * need to be configured.
+ *
+ * There are four types of memory accesses which requires ZLW configuration.
+ * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU.
+ *
+ * 1. Sequential Access or 1D mode
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 1
+ *	Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where
+ *		  N is pre-defined and hardcoded in the platform data
+ *	Set ZLW_2D -> 0
+ *
+ * 2. ZLW 2D mode
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 1,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 1
+ *
+ * 3. ZLW Enable (no 1D or 2D mode)
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 0,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 0
+ *
+ * 4. ZLW disable
+ *	Set ZLW_EN -> 0
+ *	set ZLW_PAGE_CROSS_1D -> 0,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 0
+ *
+ * To configure the ZLW for the above memory access, four registers are
+ * available. Hence to track these four settings, we have the following entries
+ * in the struct ipu_mmu_hw. Each of these entries are per stream and
+ * available only for the L1 streams.
+ *
+ * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN)
+ * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary
+ * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted
+ *			Insert ZLW request N pages ahead address.
+ * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D)
+ *
+ *
+ * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined
+ * as per the usecase specific calculations. Any change to this pre-defined
+ * table has to happen in sync with IPU4 FW.
+ */
+struct ipu_mmu_hw {
+	union {
+		unsigned long offset;
+		void __iomem *base;
+	};
+	unsigned int info_bits;
+	u8 nr_l1streams;
+	/*
+	 * L1 has variable blocks per stream - total of 64 blocks and maximum of
+	 * 16 blocks per stream. Configurable by using the block start address
+	 * per stream. Block start address is calculated from the block size
+	 */
+	u8 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS];
+	/* Is ZLW is enabled in each stream */
+	bool l1_zlw_en[IPU_MMU_MAX_TLB_L1_STREAMS];
+	bool l1_zlw_1d_mode[IPU_MMU_MAX_TLB_L1_STREAMS];
+	u8 l1_ins_zlw_ahead_pages[IPU_MMU_MAX_TLB_L1_STREAMS];
+	bool l1_zlw_2d_mode[IPU_MMU_MAX_TLB_L1_STREAMS];
+
+	u32 l1_stream_id_reg_offset;
+	u32 l2_stream_id_reg_offset;
+
+	u8 nr_l2streams;
+	/*
+	 * L2 has fixed 2 blocks per stream. Block address is calculated
+	 * from the block size
+	 */
+	u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS];
+	/* flag to track if WA is needed for successive invalidate HW bug */
+	bool insert_read_before_invalidate;
+	/* flag to track if zlw based mmu invalidation is needed */
+	bool zlw_invalidate;
+};
+
+struct ipu_mmu_pdata {
+	unsigned int nr_mmus;
+	struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES];
+	int mmid;
+};
+
+struct ipu_isys_csi2_pdata {
+	void __iomem *base;
+};
+
+#define IPU_EV_AUTO 0xff
+
+struct ipu_isys_internal_csi2_pdata {
+	unsigned int nports;
+	unsigned int *offsets;
+};
+
+struct ipu_isys_internal_tpg_pdata {
+	unsigned int ntpgs;
+	unsigned int *offsets;
+	unsigned int *sels;
+};
+
+/*
+ * One place to handle all the IPU HW variations
+ */
+struct ipu_hw_variants {
+	unsigned long offset;
+	unsigned int nr_mmus;
+	struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES];
+	u8 cdc_fifos;
+	u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS];
+	u32 dmem_offset;
+	u32 spc_offset;	/* SPC offset from psys base */
+};
+
+struct ipu_isys_internal_pdata {
+	struct ipu_isys_internal_csi2_pdata csi2;
+	struct ipu_isys_internal_tpg_pdata tpg;
+	struct ipu_hw_variants hw_variant;
+	u32 num_parallel_streams;
+	u32 isys_dma_overshoot;
+};
+
+struct ipu_isys_pdata {
+	void __iomem *base;
+	const struct ipu_isys_internal_pdata *ipdata;
+};
+
+struct ipu_psys_internal_pdata {
+	struct ipu_hw_variants hw_variant;
+};
+
+struct ipu_psys_pdata {
+	void __iomem *base;
+	const struct ipu_psys_internal_pdata *ipdata;
+};
+
+#endif
diff --git a/drivers/media/pci/intel/ipu-psys-compat32.c b/drivers/media/pci/intel/ipu-psys-compat32.c
new file mode 100644
index 000000000000..5283c85bd57b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-psys-compat32.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-psys.h"
+
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret = -ENOTTY;
+
+	if (file->f_op->unlocked_ioctl)
+		ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+
+	return ret;
+}
+
+struct ipu_psys_buffer32 {
+	u64 len;
+	union {
+		int fd;
+		compat_uptr_t userptr;
+		u64 reserved;
+	} base;
+	u32 data_offset;
+	u32 bytes_used;
+	u32 flags;
+	u32 reserved[2];
+} __packed;
+
+struct ipu_psys_command32 {
+	u64 issue_id;
+	u64 user_token;
+	u32 priority;
+	compat_uptr_t pg_manifest;
+	compat_uptr_t buffers;
+	int pg;
+	u32 pg_manifest_size;
+	u32 bufcount;
+	u32 min_psys_freq;
+	u32 frame_counter;
+	u32 reserved[2];
+} __packed;
+
+struct ipu_psys_manifest32 {
+	u32 index;
+	u32 size;
+	compat_uptr_t manifest;
+	u32 reserved[5];
+} __packed;
+
+static int
+get_ipu_psys_command32(struct ipu_psys_command *kp,
+		       struct ipu_psys_command32 __user *up)
+{
+	compat_uptr_t pgm, bufs;
+	bool access_ok;
+
+	access_ok = access_ok(up, sizeof(struct ipu_psys_command32));
+	if (!access_ok || get_user(kp->issue_id, &up->issue_id) ||
+	    get_user(kp->user_token, &up->user_token) ||
+	    get_user(kp->priority, &up->priority) ||
+	    get_user(pgm, &up->pg_manifest) ||
+	    get_user(bufs, &up->buffers) ||
+	    get_user(kp->pg, &up->pg) ||
+	    get_user(kp->pg_manifest_size, &up->pg_manifest_size) ||
+	    get_user(kp->bufcount, &up->bufcount) ||
+	    get_user(kp->min_psys_freq, &up->min_psys_freq) ||
+	    get_user(kp->frame_counter, &up->frame_counter)
+	    )
+		return -EFAULT;
+
+	kp->pg_manifest = compat_ptr(pgm);
+	kp->buffers = compat_ptr(bufs);
+
+	return 0;
+}
+
+static int
+get_ipu_psys_buffer32(struct ipu_psys_buffer *kp,
+		      struct ipu_psys_buffer32 __user *up)
+{
+	compat_uptr_t ptr;
+	bool access_ok;
+
+	access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32));
+	if (!access_ok || get_user(kp->len, &up->len) ||
+	    get_user(ptr, &up->base.userptr) ||
+	    get_user(kp->data_offset, &up->data_offset) ||
+	    get_user(kp->bytes_used, &up->bytes_used) ||
+	    get_user(kp->flags, &up->flags))
+		return -EFAULT;
+
+	kp->base.userptr = compat_ptr(ptr);
+
+	return 0;
+}
+
+static int
+put_ipu_psys_buffer32(struct ipu_psys_buffer *kp,
+		      struct ipu_psys_buffer32 __user *up)
+{
+	bool access_ok;
+
+	access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32));
+	if (!access_ok || put_user(kp->len, &up->len) ||
+	    put_user(kp->base.fd, &up->base.fd) ||
+	    put_user(kp->data_offset, &up->data_offset) ||
+	    put_user(kp->bytes_used, &up->bytes_used) ||
+	    put_user(kp->flags, &up->flags))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int
+get_ipu_psys_manifest32(struct ipu_psys_manifest *kp,
+			struct ipu_psys_manifest32 __user *up)
+{
+	compat_uptr_t ptr;
+	bool access_ok;
+
+	access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32));
+	if (!access_ok || get_user(kp->index, &up->index) ||
+	    get_user(kp->size, &up->size) || get_user(ptr, &up->manifest))
+		return -EFAULT;
+
+	kp->manifest = compat_ptr(ptr);
+
+	return 0;
+}
+
+static int
+put_ipu_psys_manifest32(struct ipu_psys_manifest *kp,
+			struct ipu_psys_manifest32 __user *up)
+{
+	compat_uptr_t ptr = (u32)((unsigned long)kp->manifest);
+	bool access_ok;
+
+	access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32));
+	if (!access_ok || put_user(kp->index, &up->index) ||
+	    put_user(kp->size, &up->size) || put_user(ptr, &up->manifest))
+		return -EFAULT;
+
+	return 0;
+}
+
+#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32)
+#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32)
+#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32)
+#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32)
+#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32)
+
+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	union {
+		struct ipu_psys_buffer buf;
+		struct ipu_psys_command cmd;
+		struct ipu_psys_event ev;
+		struct ipu_psys_manifest m;
+	} karg;
+	int compatible_arg = 1;
+	int err = 0;
+	void __user *up = compat_ptr(arg);
+
+	switch (cmd) {
+	case IPU_IOC_GETBUF32:
+		cmd = IPU_IOC_GETBUF;
+		break;
+	case IPU_IOC_PUTBUF32:
+		cmd = IPU_IOC_PUTBUF;
+		break;
+	case IPU_IOC_QCMD32:
+		cmd = IPU_IOC_QCMD;
+		break;
+	case IPU_IOC_GET_MANIFEST32:
+		cmd = IPU_IOC_GET_MANIFEST;
+		break;
+	}
+
+	switch (cmd) {
+	case IPU_IOC_GETBUF:
+	case IPU_IOC_PUTBUF:
+		err = get_ipu_psys_buffer32(&karg.buf, up);
+		compatible_arg = 0;
+		break;
+	case IPU_IOC_QCMD:
+		err = get_ipu_psys_command32(&karg.cmd, up);
+		compatible_arg = 0;
+		break;
+	case IPU_IOC_GET_MANIFEST:
+		err = get_ipu_psys_manifest32(&karg.m, up);
+		compatible_arg = 0;
+		break;
+	}
+	if (err)
+		return err;
+
+	if (compatible_arg) {
+		err = native_ioctl(file, cmd, (unsigned long)up);
+	} else {
+		mm_segment_t old_fs = get_fs();
+
+		set_fs(KERNEL_DS);
+		err = native_ioctl(file, cmd, (unsigned long)&karg);
+		set_fs(old_fs);
+	}
+
+	if (err)
+		return err;
+
+	switch (cmd) {
+	case IPU_IOC_GETBUF:
+		err = put_ipu_psys_buffer32(&karg.buf, up);
+		break;
+	case IPU_IOC_GET_MANIFEST:
+		err = put_ipu_psys_manifest32(&karg.m, up);
+		break;
+	}
+	return err;
+}
+EXPORT_SYMBOL_GPL(ipu_psys_compat_ioctl32);
diff --git a/drivers/media/pci/intel/ipu-psys.c b/drivers/media/pci/intel/ipu-psys.c
new file mode 100644
index 000000000000..c8370659d61a
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-psys.c
@@ -0,0 +1,1632 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/init_task.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/poll.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu.h"
+#include "ipu-mmu.h"
+#include "ipu-bus.h"
+#include "ipu-platform.h"
+#include "ipu-buttress.h"
+#include "ipu-cpd.h"
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+#include "ipu-platform-psys.h"
+#include "ipu-platform-regs.h"
+#include "ipu-fw-com.h"
+
+static bool async_fw_init;
+module_param(async_fw_init, bool, 0664);
+MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization");
+
+#define IPU_PSYS_NUM_DEVICES		4
+#define IPU_PSYS_AUTOSUSPEND_DELAY	2000
+
+#ifdef CONFIG_PM
+static int psys_runtime_pm_resume(struct device *dev);
+static int psys_runtime_pm_suspend(struct device *dev);
+#else
+#define pm_runtime_dont_use_autosuspend(d)
+#define pm_runtime_use_autosuspend(d)
+#define pm_runtime_set_autosuspend_delay(d, f)	0
+#define pm_runtime_get_sync(d)			0
+#define pm_runtime_put(d)			0
+#define pm_runtime_put_sync(d)			0
+#define pm_runtime_put_noidle(d)		0
+#define pm_runtime_put_autosuspend(d)		0
+#endif
+
+static dev_t ipu_psys_dev_t;
+static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES);
+static DEFINE_MUTEX(ipu_psys_mutex);
+
+static struct fw_init_task {
+	struct delayed_work work;
+	struct ipu_psys *psys;
+} fw_init_task;
+
+static void ipu_psys_remove(struct ipu_bus_device *adev);
+
+static struct bus_type ipu_psys_bus = {
+	.name = IPU_PSYS_NAME,
+};
+
+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size)
+{
+	struct ipu_psys_pg *kpg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&psys->pgs_lock, flags);
+	list_for_each_entry(kpg, &psys->pgs, list) {
+		if (!kpg->pg_size && kpg->size >= pg_size) {
+			kpg->pg_size = pg_size;
+			spin_unlock_irqrestore(&psys->pgs_lock, flags);
+			return kpg;
+		}
+	}
+	spin_unlock_irqrestore(&psys->pgs_lock, flags);
+	/* no big enough buffer available, allocate new one */
+	kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
+	if (!kpg)
+		return NULL;
+
+	kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size,
+				  &kpg->pg_dma_addr, GFP_KERNEL, 0);
+	if (!kpg->pg) {
+		kfree(kpg);
+		return NULL;
+	}
+
+	kpg->pg_size = pg_size;
+	kpg->size = pg_size;
+	spin_lock_irqsave(&psys->pgs_lock, flags);
+	list_add(&kpg->list, &psys->pgs);
+	spin_unlock_irqrestore(&psys->pgs_lock, flags);
+
+	return kpg;
+}
+
+static int ipu_psys_unmapbuf_with_lock(int fd, struct ipu_psys_fh *fh,
+				       struct ipu_psys_kbuffer *kbuf);
+struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd)
+{
+	struct ipu_psys_kbuffer *kbuf;
+
+	list_for_each_entry(kbuf, &fh->bufmap, list) {
+		if (kbuf->fd == fd)
+			return kbuf;
+	}
+
+	return NULL;
+}
+
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr)
+{
+	struct ipu_psys_kbuffer *kbuffer;
+
+	list_for_each_entry(kbuffer, &fh->bufmap, list) {
+		if (kbuffer->kaddr == kaddr)
+			return kbuffer;
+	}
+
+	return NULL;
+}
+
+static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach)
+{
+	struct vm_area_struct *vma;
+	unsigned long start, end;
+	int npages, array_size;
+	struct page **pages;
+	struct sg_table *sgt;
+	int nr = 0;
+	int ret = -ENOMEM;
+
+	start = (unsigned long)attach->userptr;
+	end = PAGE_ALIGN(start + attach->len);
+	npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT;
+	array_size = npages * sizeof(struct page *);
+
+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return -ENOMEM;
+
+	if (attach->npages != 0) {
+		pages = attach->pages;
+		npages = attach->npages;
+		attach->vma_is_io = 1;
+		goto skip_pages;
+	}
+
+	pages = kvzalloc(array_size, GFP_KERNEL);
+	if (!pages)
+		goto free_sgt;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, start);
+	if (!vma) {
+		ret = -EFAULT;
+		goto error_up_read;
+	}
+
+	/*
+	 * For buffers from Gralloc, VM_PFNMAP is expected,
+	 * but VM_IO is set. Possibly bug in Gralloc.
+	 */
+	attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP);
+
+	if (attach->vma_is_io) {
+		unsigned long io_start = start;
+
+		if (vma->vm_end < start + attach->len) {
+			dev_err(attach->dev,
+				"vma at %lu is too small for %llu bytes\n",
+				start, attach->len);
+			ret = -EFAULT;
+			goto error_up_read;
+		}
+
+		for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) {
+			unsigned long pfn;
+
+			ret = follow_pfn(vma, io_start, &pfn);
+			if (ret)
+				goto error_up_read;
+			pages[nr] = pfn_to_page(pfn);
+		}
+	} else {
+		nr = get_user_pages(start & PAGE_MASK, npages,
+				    FOLL_WRITE,
+				    pages, NULL);
+		if (nr < npages)
+			goto error_up_read;
+	}
+	up_read(&current->mm->mmap_sem);
+
+	attach->pages = pages;
+	attach->npages = npages;
+
+skip_pages:
+	ret = sg_alloc_table_from_pages(sgt, pages, npages,
+					start & ~PAGE_MASK, attach->len,
+					GFP_KERNEL);
+	if (ret < 0)
+		goto error;
+
+	attach->sgt = sgt;
+
+	return 0;
+
+error_up_read:
+	up_read(&current->mm->mmap_sem);
+error:
+	if (!attach->vma_is_io)
+		while (nr > 0)
+			put_page(pages[--nr]);
+
+	if (array_size <= PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+free_sgt:
+	kfree(sgt);
+
+	dev_err(attach->dev, "failed to get userpages:%d\n", ret);
+
+	return ret;
+}
+
+static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach)
+{
+	if (!attach || !attach->userptr || !attach->sgt)
+		return;
+
+	if (!attach->vma_is_io) {
+		int i = attach->npages;
+
+		while (--i >= 0) {
+			set_page_dirty_lock(attach->pages[i]);
+			put_page(attach->pages[i]);
+		}
+	}
+
+	kvfree(attach->pages);
+
+	sg_free_table(attach->sgt);
+	kfree(attach->sgt);
+	attach->sgt = NULL;
+}
+
+static int ipu_dma_buf_attach(struct dma_buf *dbuf,
+			      struct dma_buf_attachment *attach)
+{
+	struct ipu_psys_kbuffer *kbuf = dbuf->priv;
+	struct ipu_dma_buf_attach *ipu_attach;
+
+	ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL);
+	if (!ipu_attach)
+		return -ENOMEM;
+
+	ipu_attach->len = kbuf->len;
+	ipu_attach->userptr = kbuf->userptr;
+
+	attach->priv = ipu_attach;
+	return 0;
+}
+
+static void ipu_dma_buf_detach(struct dma_buf *dbuf,
+			       struct dma_buf_attachment *attach)
+{
+	struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+
+	kfree(ipu_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach,
+					enum dma_data_direction dir)
+{
+	struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+	unsigned long attrs;
+	int ret;
+
+	ret = ipu_psys_get_userpages(ipu_attach);
+	if (ret)
+		return NULL;
+
+	attrs = DMA_ATTR_SKIP_CPU_SYNC;
+	ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl,
+			       ipu_attach->sgt->orig_nents, dir, attrs);
+	if (ret < ipu_attach->sgt->orig_nents) {
+		ipu_psys_put_userpages(ipu_attach);
+		dev_dbg(attach->dev, "buf map failed\n");
+
+		return ERR_PTR(-EIO);
+	}
+
+	/*
+	 * Initial cache flush to avoid writing dirty pages for buffers which
+	 * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
+	 */
+	dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl,
+			       ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL);
+
+	return ipu_attach->sgt;
+}
+
+static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach,
+			      struct sg_table *sg, enum dma_data_direction dir)
+{
+	struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+
+	dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir);
+	ipu_psys_put_userpages(ipu_attach);
+}
+
+static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+	return -ENOTTY;
+}
+
+static void ipu_dma_buf_release(struct dma_buf *buf)
+{
+	struct ipu_psys_kbuffer *kbuf = buf->priv;
+
+	if (!kbuf)
+		return;
+
+	if (kbuf->db_attach) {
+		dev_dbg(kbuf->db_attach->dev,
+			"releasing buffer %d\n", kbuf->fd);
+		ipu_psys_put_userpages(kbuf->db_attach->priv);
+	}
+	kfree(kbuf);
+}
+
+static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+					enum dma_data_direction dir)
+{
+	return -ENOTTY;
+}
+
+static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	struct dma_buf_attachment *attach;
+	struct ipu_dma_buf_attach *ipu_attach;
+
+	if (list_empty(&dmabuf->attachments))
+		return NULL;
+
+	attach = list_last_entry(&dmabuf->attachments,
+				 struct dma_buf_attachment, node);
+	ipu_attach = attach->priv;
+
+	if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)
+		return NULL;
+
+	return vm_map_ram(ipu_attach->pages,
+			  ipu_attach->npages, 0, PAGE_KERNEL);
+}
+
+static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	struct dma_buf_attachment *attach;
+	struct ipu_dma_buf_attach *ipu_attach;
+
+	if (WARN_ON(list_empty(&dmabuf->attachments)))
+		return;
+
+	attach = list_last_entry(&dmabuf->attachments,
+				 struct dma_buf_attachment, node);
+	ipu_attach = attach->priv;
+
+	if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages))
+		return;
+
+	vm_unmap_ram(vaddr, ipu_attach->npages);
+}
+
+struct dma_buf_ops ipu_dma_buf_ops = {
+	.attach = ipu_dma_buf_attach,
+	.detach = ipu_dma_buf_detach,
+	.map_dma_buf = ipu_dma_buf_map,
+	.unmap_dma_buf = ipu_dma_buf_unmap,
+	.release = ipu_dma_buf_release,
+	.begin_cpu_access = ipu_dma_buf_begin_cpu_access,
+	.mmap = ipu_dma_buf_mmap,
+	.vmap = ipu_dma_buf_vmap,
+	.vunmap = ipu_dma_buf_vunmap,
+};
+
+static int ipu_psys_open(struct inode *inode, struct file *file)
+{
+	struct ipu_psys *psys = inode_to_ipu_psys(inode);
+	struct ipu_device *isp = psys->adev->isp;
+	struct ipu_psys_fh *fh;
+	int rval;
+
+	if (isp->flr_done)
+		return -EIO;
+
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh)
+		return -ENOMEM;
+
+	fh->psys = psys;
+
+	file->private_data = fh;
+
+	mutex_init(&fh->mutex);
+	INIT_LIST_HEAD(&fh->bufmap);
+	init_waitqueue_head(&fh->wait);
+
+	rval = ipu_psys_fh_init(fh);
+	if (rval)
+		goto open_failed;
+
+	mutex_lock(&psys->mutex);
+	list_add_tail(&fh->list, &psys->fhs);
+	mutex_unlock(&psys->mutex);
+
+	return 0;
+
+open_failed:
+	mutex_destroy(&fh->mutex);
+	kfree(fh);
+	return rval;
+}
+
+static int ipu_psys_release(struct inode *inode, struct file *file)
+{
+	struct ipu_psys *psys = inode_to_ipu_psys(inode);
+	struct ipu_psys_fh *fh = file->private_data;
+	struct ipu_psys_kbuffer *kbuf, *kbuf0;
+	struct dma_buf_attachment *db_attach;
+
+	mutex_lock(&fh->mutex);
+	/* clean up buffers */
+	if (!list_empty(&fh->bufmap)) {
+		list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) {
+			list_del(&kbuf->list);
+			db_attach = kbuf->db_attach;
+
+			/* Unmap and release buffers */
+			if (kbuf->dbuf && db_attach) {
+				struct dma_buf *dbuf;
+
+				kbuf->valid = false;
+				dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr);
+				dma_buf_unmap_attachment(db_attach,
+							 kbuf->sgt,
+							 DMA_BIDIRECTIONAL);
+				dma_buf_detach(kbuf->dbuf, db_attach);
+				dbuf = kbuf->dbuf;
+				kbuf->dbuf = NULL;
+				db_attach = NULL;
+				kbuf->db_attach = NULL;
+				dma_buf_put(dbuf);
+			} else {
+				if (db_attach)
+					ipu_psys_put_userpages(db_attach->priv);
+				kfree(kbuf);
+			}
+		}
+	}
+	mutex_unlock(&fh->mutex);
+
+	mutex_lock(&psys->mutex);
+	list_del(&fh->list);
+
+	mutex_unlock(&psys->mutex);
+	ipu_psys_fh_deinit(fh);
+
+	mutex_lock(&psys->mutex);
+	if (list_empty(&psys->fhs))
+		psys->power_gating = 0;
+	mutex_unlock(&psys->mutex);
+	mutex_destroy(&fh->mutex);
+	kfree(fh);
+
+	return 0;
+}
+
+static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
+{
+	struct ipu_psys_kbuffer *kbuf;
+	struct ipu_psys *psys = fh->psys;
+
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dbuf;
+	int ret;
+
+	if (!buf->base.userptr) {
+		dev_err(&psys->adev->dev, "Buffer allocation not supported\n");
+		return -EINVAL;
+	}
+
+	kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	kbuf->len = buf->len;
+	kbuf->userptr = buf->base.userptr;
+	kbuf->flags = buf->flags;
+
+	exp_info.ops = &ipu_dma_buf_ops;
+	exp_info.size = kbuf->len;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = kbuf;
+
+	dbuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dbuf)) {
+		kfree(kbuf);
+		return PTR_ERR(dbuf);
+	}
+
+	ret = dma_buf_fd(dbuf, 0);
+	if (ret < 0) {
+		kfree(kbuf);
+		dma_buf_put(dbuf);
+		return ret;
+	}
+
+	kbuf->fd = ret;
+	buf->base.fd = ret;
+	kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR;
+	kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE;
+
+	mutex_lock(&fh->mutex);
+	list_add_tail(&kbuf->list, &fh->bufmap);
+	mutex_unlock(&fh->mutex);
+
+	dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p size %lu to fd %d",
+		buf->base.userptr, buf->len, buf->base.fd);
+
+	return 0;
+}
+
+static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
+{
+	return 0;
+}
+
+int ipu_psys_mapbuf_with_lock(int fd, struct ipu_psys_fh *fh,
+			      struct ipu_psys_kbuffer *kbuf)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct dma_buf *dbuf;
+	int ret;
+
+	dbuf = dma_buf_get(fd);
+	if (IS_ERR(dbuf))
+		return -EINVAL;
+
+	if (!kbuf) {
+		/* This fd isn't generated by ipu_psys_getbuf, it
+		 * is a new fd. Create a new kbuf item for this fd, and
+		 * add this kbuf to bufmap list.
+		 */
+		kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+		if (!kbuf)
+			return -ENOMEM;
+
+		list_add_tail(&kbuf->list, &fh->bufmap);
+	}
+
+	/* fd valid and found, need remap */
+	if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) {
+		dev_dbg(&psys->adev->dev,
+			"dmabuf fd %d with kbuf %p changed, need remap.\n",
+			fd, kbuf);
+		ret = ipu_psys_unmapbuf_with_lock(fd, fh, kbuf);
+		if (ret)
+			return ret;
+
+		kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+		/* changed external dmabuf */
+		if (!kbuf) {
+			kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+			if (!kbuf)
+				return -ENOMEM;
+			list_add_tail(&kbuf->list, &fh->bufmap);
+		}
+	}
+
+	if (kbuf->sgt) {
+		dev_dbg(&psys->adev->dev, "fd %d has been mapped!\n", fd);
+		dma_buf_put(dbuf);
+		goto mapbuf_end;
+	}
+
+	kbuf->dbuf = dbuf;
+
+	if (kbuf->len == 0)
+		kbuf->len = kbuf->dbuf->size;
+
+	kbuf->fd = fd;
+
+	kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev);
+	if (IS_ERR(kbuf->db_attach)) {
+		ret = PTR_ERR(kbuf->db_attach);
+		goto error_put;
+	}
+
+	kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(kbuf->sgt)) {
+		ret = -EINVAL;
+		kbuf->sgt = NULL;
+		dev_dbg(&psys->adev->dev, "map attachment failed\n");
+		goto error_detach;
+	}
+
+	kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl);
+
+	kbuf->kaddr = dma_buf_vmap(kbuf->dbuf);
+	if (!kbuf->kaddr) {
+		ret = -EINVAL;
+		goto error_unmap;
+	}
+
+	dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %lu mapped\n",
+		__func__, kbuf, fd, kbuf->len);
+mapbuf_end:
+
+	kbuf->valid = true;
+
+	return 0;
+
+error_unmap:
+	dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL);
+error_detach:
+	dma_buf_detach(kbuf->dbuf, kbuf->db_attach);
+	kbuf->db_attach = NULL;
+error_put:
+	list_del(&kbuf->list);
+
+	if (!kbuf->userptr)
+		kfree(kbuf);
+
+	dma_buf_put(dbuf);
+
+	dev_err(&psys->adev->dev, "%s failed\n", __func__);
+	return ret;
+}
+
+static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh)
+{
+	long ret;
+	struct ipu_psys_kbuffer *kbuf;
+
+	mutex_lock(&fh->mutex);
+	kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+	ret = ipu_psys_mapbuf_with_lock(fd, fh, kbuf);
+	mutex_unlock(&fh->mutex);
+
+	dev_dbg(&fh->psys->adev->dev, "IOC_MAPBUF ret %ld\n", ret);
+
+	return ret;
+}
+
+static int ipu_psys_unmapbuf_with_lock(int fd, struct ipu_psys_fh *fh,
+				       struct ipu_psys_kbuffer *kbuf)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct dma_buf *dmabuf;
+
+	if (!kbuf || fd != kbuf->fd) {
+		dev_err(&psys->adev->dev, "invalid kbuffer\n");
+		return -EINVAL;
+	}
+
+	/* From now on it is not safe to use this kbuffer */
+	kbuf->valid = false;
+
+	dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr);
+	dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL);
+
+	dma_buf_detach(kbuf->dbuf, kbuf->db_attach);
+
+	dmabuf = kbuf->dbuf;
+
+	kbuf->db_attach = NULL;
+	kbuf->dbuf = NULL;
+	kbuf->sgt = NULL;
+
+	list_del(&kbuf->list);
+
+	if (!kbuf->userptr)
+		kfree(kbuf);
+
+	dma_buf_put(dmabuf);
+
+	dev_dbg(&psys->adev->dev, "%s fd %d unmapped\n", __func__, fd);
+
+	return 0;
+}
+
+static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh)
+{
+	struct ipu_psys_kbuffer *kbuf;
+	long ret;
+
+	mutex_lock(&fh->mutex);
+	kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+	if (!kbuf) {
+		dev_err(&fh->psys->adev->dev,
+			"buffer with fd %d not found\n", fd);
+		mutex_unlock(&fh->mutex);
+		return -EINVAL;
+	}
+	ret = ipu_psys_unmapbuf_with_lock(fd, fh, kbuf);
+	mutex_unlock(&fh->mutex);
+
+	dev_dbg(&fh->psys->adev->dev, "IOC_UNMAPBUF\n");
+
+	return ret;
+}
+
+static unsigned int ipu_psys_poll(struct file *file,
+				  struct poll_table_struct *wait)
+{
+	struct ipu_psys_fh *fh = file->private_data;
+	struct ipu_psys *psys = fh->psys;
+	unsigned int res = 0;
+
+	dev_dbg(&psys->adev->dev, "ipu psys poll\n");
+
+	poll_wait(file, &fh->wait, wait);
+
+	if (ipu_get_completed_kcmd(fh))
+		res = POLLIN;
+
+	dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res);
+
+	return res;
+}
+
+static long ipu_get_manifest(struct ipu_psys_manifest *manifest,
+			     struct ipu_psys_fh *fh)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_device *isp = psys->adev->isp;
+	struct ipu_cpd_client_pkg_hdr *client_pkg;
+	u32 entries;
+	void *host_fw_data;
+	dma_addr_t dma_fw_data;
+	u32 client_pkg_offset;
+
+	host_fw_data = (void *)isp->cpd_fw->data;
+	dma_fw_data = sg_dma_address(psys->fw_sgt.sgl);
+
+	entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
+	if (!manifest || manifest->index > entries - 1) {
+		dev_err(&psys->adev->dev, "invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) ||
+	    ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) <
+	    IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) {
+		dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n");
+		return -ENOENT;
+	}
+
+	client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir,
+							manifest->index);
+	client_pkg_offset -= dma_fw_data;
+
+	client_pkg = host_fw_data + client_pkg_offset;
+	manifest->size = client_pkg->pg_manifest_size;
+
+	if (!manifest->manifest)
+		return 0;
+
+	if (copy_to_user(manifest->manifest,
+			 (uint8_t *)client_pkg + client_pkg->pg_manifest_offs,
+			 manifest->size)) {
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long ipu_psys_ioctl(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	union {
+		struct ipu_psys_buffer buf;
+		struct ipu_psys_command cmd;
+		struct ipu_psys_event ev;
+		struct ipu_psys_capability caps;
+		struct ipu_psys_manifest m;
+	} karg;
+	struct ipu_psys_fh *fh = file->private_data;
+	long err = 0;
+	void __user *up = (void __user *)arg;
+	bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF);
+
+	if (copy) {
+		if (_IOC_SIZE(cmd) > sizeof(karg))
+			return -ENOTTY;
+
+		if (_IOC_DIR(cmd) & _IOC_WRITE) {
+			err = copy_from_user(&karg, up, _IOC_SIZE(cmd));
+			if (err)
+				return -EFAULT;
+		}
+	}
+
+	switch (cmd) {
+	case IPU_IOC_MAPBUF:
+		err = ipu_psys_mapbuf(arg, fh);
+		break;
+	case IPU_IOC_UNMAPBUF:
+		err = ipu_psys_unmapbuf(arg, fh);
+		break;
+	case IPU_IOC_QUERYCAP:
+		karg.caps = fh->psys->caps;
+		break;
+	case IPU_IOC_GETBUF:
+		err = ipu_psys_getbuf(&karg.buf, fh);
+		break;
+	case IPU_IOC_PUTBUF:
+		err = ipu_psys_putbuf(&karg.buf, fh);
+		break;
+	case IPU_IOC_QCMD:
+		err = ipu_psys_kcmd_new(&karg.cmd, fh);
+		break;
+	case IPU_IOC_DQEVENT:
+		err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags);
+		break;
+	case IPU_IOC_GET_MANIFEST:
+		err = ipu_get_manifest(&karg.m, fh);
+		break;
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	if (err)
+		return err;
+
+	if (copy && _IOC_DIR(cmd) & _IOC_READ)
+		if (copy_to_user(up, &karg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	return 0;
+}
+
+static const struct file_operations ipu_psys_fops = {
+	.open = ipu_psys_open,
+	.release = ipu_psys_release,
+	.unlocked_ioctl = ipu_psys_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = ipu_psys_compat_ioctl32,
+#endif
+	.poll = ipu_psys_poll,
+	.owner = THIS_MODULE,
+};
+
+static void ipu_psys_dev_release(struct device *dev)
+{
+}
+
+#ifdef CONFIG_PM
+static int psys_runtime_pm_resume(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	unsigned long flags;
+	int retval;
+
+	if (!psys)
+		return 0;
+
+	retval = ipu_mmu_hw_init(adev->mmu);
+	if (retval)
+		return retval;
+
+	/*
+	 * In runtime autosuspend mode, if the psys is in power on state, no
+	 * need to resume again.
+	 */
+	spin_lock_irqsave(&psys->power_lock, flags);
+	if (psys->power) {
+		spin_unlock_irqrestore(&psys->power_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&psys->power_lock, flags);
+
+	if (async_fw_init && !psys->fwcom) {
+		dev_err(dev,
+			"%s: asynchronous firmware init not finished, skipping\n",
+			__func__);
+		return 0;
+	}
+
+	if (!ipu_buttress_auth_done(adev->isp)) {
+		dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__);
+		return 0;
+	}
+
+	ipu_psys_setup_hw(psys);
+
+	ipu_psys_subdomains_power(psys, 1);
+	ipu_trace_restore(&psys->adev->dev);
+
+	ipu_configure_spc(adev->isp,
+			  &psys->pdata->ipdata->hw_variant,
+			  IPU_CPD_PKG_DIR_PSYS_SERVER_IDX,
+			  psys->pdata->base, psys->pkg_dir,
+			  psys->pkg_dir_dma_addr);
+
+	retval = ipu_fw_psys_open(psys);
+	if (retval) {
+		dev_err(&psys->adev->dev, "Failed to open abi.\n");
+		return retval;
+	}
+
+	spin_lock_irqsave(&psys->power_lock, flags);
+	psys->power = 1;
+	spin_unlock_irqrestore(&psys->power_lock, flags);
+
+	return 0;
+}
+
+static int psys_runtime_pm_suspend(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	unsigned long flags;
+	int rval;
+
+	if (!psys)
+		return 0;
+
+	if (!psys->power)
+		return 0;
+
+	spin_lock_irqsave(&psys->power_lock, flags);
+	psys->power = 0;
+	spin_unlock_irqrestore(&psys->power_lock, flags);
+
+	/*
+	 * We can trace failure but better to not return an error.
+	 * At suspend we are progressing towards psys power gated state.
+	 * Any hang / failure inside psys will be forgotten soon.
+	 */
+	rval = ipu_fw_psys_close(psys);
+	if (rval)
+		dev_err(dev, "Device close failure: %d\n", rval);
+
+	ipu_psys_subdomains_power(psys, 0);
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	return 0;
+}
+
+/* The following PM callbacks are needed to enable runtime PM in IPU PCI
+ * device resume, otherwise, runtime PM can't work in PCI resume from
+ * S3 state.
+ */
+static int psys_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int psys_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops psys_pm_ops = {
+	.runtime_suspend = psys_runtime_pm_suspend,
+	.runtime_resume = psys_runtime_pm_resume,
+	.suspend = psys_suspend,
+	.resume = psys_resume,
+};
+
+#define PSYS_PM_OPS (&psys_pm_ops)
+#else
+#define PSYS_PM_OPS NULL
+#endif
+
+static int cpd_fw_reload(struct ipu_device *isp)
+{
+	struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
+	int rval;
+
+	if (!isp->secure_mode) {
+		dev_warn(&isp->pdev->dev,
+			 "CPD firmware reload was only supported for secure mode.\n");
+		return -EINVAL;
+	}
+
+	if (isp->cpd_fw) {
+		ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
+				     psys->pkg_dir_dma_addr,
+				     psys->pkg_dir_size);
+
+		ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
+		release_firmware(isp->cpd_fw);
+		isp->cpd_fw = NULL;
+		dev_info(&isp->pdev->dev, "Old FW removed\n");
+	}
+
+	rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name,
+			      &isp->pdev->dev);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n",
+			isp->cpd_fw_name);
+		return rval;
+	}
+
+	rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+					 isp->cpd_fw->size);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Failed to validate cpd file\n");
+		goto out_release_firmware;
+	}
+
+	rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt);
+	if (rval)
+		goto out_release_firmware;
+
+	psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
+					       isp->cpd_fw->data,
+					       sg_dma_address(psys->fw_sgt.sgl),
+					       &psys->pkg_dir_dma_addr,
+					       &psys->pkg_dir_size);
+
+	if (!psys->pkg_dir) {
+		rval = -EINVAL;
+		goto out_unmap_fw_image;
+	}
+
+	isp->pkg_dir = psys->pkg_dir;
+	isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr;
+	isp->pkg_dir_size = psys->pkg_dir_size;
+
+	if (!isp->secure_mode)
+		return 0;
+
+	rval = ipu_fw_authenticate(isp, 1);
+	if (rval)
+		goto out_free_pkg_dir;
+
+	return 0;
+
+out_free_pkg_dir:
+	ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
+			     psys->pkg_dir_dma_addr, psys->pkg_dir_size);
+out_unmap_fw_image:
+	ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
+out_release_firmware:
+	release_firmware(isp->cpd_fw);
+	isp->cpd_fw = NULL;
+
+	return rval;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val)
+{
+	struct ipu_psys *psys = data;
+
+	*val = psys->icache_prefetch_sp;
+	return 0;
+}
+
+static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val)
+{
+	struct ipu_psys *psys = data;
+
+	if (val != !!val)
+		return -EINVAL;
+
+	psys->icache_prefetch_sp = val;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops,
+			ipu_psys_icache_prefetch_sp_get,
+			ipu_psys_icache_prefetch_sp_set, "%llu\n");
+
+static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val)
+{
+	struct ipu_psys *psys = data;
+
+	*val = psys->icache_prefetch_isp;
+	return 0;
+}
+
+static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val)
+{
+	struct ipu_psys *psys = data;
+
+	if (val != !!val)
+		return -EINVAL;
+
+	psys->icache_prefetch_isp = val;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops,
+			ipu_psys_icache_prefetch_isp_get,
+			ipu_psys_icache_prefetch_isp_set, "%llu\n");
+
+static int ipu_psys_init_debugfs(struct ipu_psys *psys)
+{
+	struct dentry *file;
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir);
+	if (IS_ERR(dir))
+		return -ENOMEM;
+
+	file = debugfs_create_file("icache_prefetch_sp", 0600,
+				   dir, psys, &psys_icache_prefetch_sp_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	file = debugfs_create_file("icache_prefetch_isp", 0600,
+				   dir, psys, &psys_icache_prefetch_isp_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	psys->debugfsdir = dir;
+
+#ifdef IPU_PSYS_GPC
+	if (ipu_psys_gpc_init_debugfs(psys))
+		return -ENOMEM;
+#endif
+
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+#endif
+
+static int ipu_psys_sched_cmd(void *ptr)
+{
+	struct ipu_psys *psys = ptr;
+	size_t pending = 0;
+
+	while (1) {
+		wait_event_interruptible(psys->sched_cmd_wq,
+					 (kthread_should_stop() ||
+					  (pending =
+					   atomic_read(&psys->wakeup_count))));
+
+		if (kthread_should_stop())
+			break;
+
+		if (pending == 0)
+			continue;
+
+		mutex_lock(&psys->mutex);
+		atomic_set(&psys->wakeup_count, 0);
+		ipu_psys_run_next(psys);
+		mutex_unlock(&psys->mutex);
+	}
+
+	return 0;
+}
+
+static void start_sp(struct ipu_bus_device *adev)
+{
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = psys->pdata->base +
+	    psys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val = 0;
+
+	val |= IPU_PSYS_SPC_STATUS_START |
+	    IPU_PSYS_SPC_STATUS_RUN |
+	    IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+	val |= psys->icache_prefetch_sp ?
+	    IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+	writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu_bus_device *adev)
+{
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = psys->pdata->base +
+	    psys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+
+	/* return true when READY == 1, START == 0 */
+	val &= IPU_PSYS_SPC_STATUS_READY | IPU_PSYS_SPC_STATUS_START;
+
+	return val == IPU_PSYS_SPC_STATUS_READY;
+}
+
+static int ipu_psys_fw_init(struct ipu_psys *psys)
+{
+	unsigned int size;
+	struct ipu_fw_syscom_queue_config *queue_cfg;
+	struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = {
+		{
+			IPU_FW_PSYS_EVENT_QUEUE_SIZE,
+			sizeof(struct ipu_fw_psys_event)
+		}
+	};
+	struct ipu_fw_psys_srv_init server_init = {
+		.ddr_pkg_dir_address = 0,
+		.host_ddr_pkg_dir = NULL,
+		.pkg_dir_size = 0,
+		.icache_prefetch_sp = psys->icache_prefetch_sp,
+		.icache_prefetch_isp = psys->icache_prefetch_isp,
+	};
+	struct ipu_fw_com_cfg fwcom = {
+		.num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID,
+		.output = fw_psys_event_queue_cfg,
+		.specific_addr = &server_init,
+		.specific_size = sizeof(server_init),
+		.cell_start = start_sp,
+		.cell_ready = query_sp,
+		.buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET,
+	};
+	int i;
+
+	size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+	if (ipu_ver == IPU_VER_6)
+		size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+
+	queue_cfg = devm_kzalloc(&psys->adev->dev, sizeof(*queue_cfg) * size,
+				 GFP_KERNEL);
+	if (!queue_cfg)
+		return -ENOMEM;
+
+	for (i = 0; i < size; i++) {
+		queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE;
+		queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd);
+	}
+
+	fwcom.input = queue_cfg;
+	fwcom.num_input_queues = size;
+	fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset;
+
+	psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base);
+	if (!psys->fwcom) {
+		dev_err(&psys->adev->dev, "psys fw com prepare failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void run_fw_init_work(struct work_struct *work)
+{
+	struct fw_init_task *task = (struct fw_init_task *)work;
+	struct ipu_psys *psys = task->psys;
+	int rval;
+
+	rval = ipu_psys_fw_init(psys);
+
+	if (rval) {
+		dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval);
+		ipu_psys_remove(psys->adev);
+	} else {
+		dev_info(&psys->adev->dev, "FW init done\n");
+	}
+}
+
+static int ipu_psys_probe(struct ipu_bus_device *adev)
+{
+	struct ipu_device *isp = adev->isp;
+	struct ipu_psys_pg *kpg, *kpg0;
+	struct ipu_psys *psys;
+	unsigned int minor;
+	int i, rval = -E2BIG;
+
+	rval = ipu_mmu_hw_init(adev->mmu);
+	if (rval)
+		return rval;
+
+	mutex_lock(&ipu_psys_mutex);
+
+	minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0);
+	if (minor == IPU_PSYS_NUM_DEVICES) {
+		dev_err(&adev->dev, "too many devices\n");
+		goto out_unlock;
+	}
+
+	psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL);
+	if (!psys) {
+		rval = -ENOMEM;
+		goto out_unlock;
+	}
+
+	psys->adev = adev;
+	psys->pdata = adev->pdata;
+	psys->icache_prefetch_sp = 0;
+
+	psys->power_gating = 0;
+
+	ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev,
+		       psys_trace_blocks);
+
+	cdev_init(&psys->cdev, &ipu_psys_fops);
+	psys->cdev.owner = ipu_psys_fops.owner;
+
+	rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1);
+	if (rval) {
+		dev_err(&adev->dev, "cdev_add failed (%d)\n", rval);
+		goto out_unlock;
+	}
+
+	set_bit(minor, ipu_psys_devices);
+
+	spin_lock_init(&psys->power_lock);
+	spin_lock_init(&psys->pgs_lock);
+	psys->power = 0;
+	psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS;
+
+	mutex_init(&psys->mutex);
+	INIT_LIST_HEAD(&psys->fhs);
+	INIT_LIST_HEAD(&psys->pgs);
+	INIT_LIST_HEAD(&psys->started_kcmds_list);
+	INIT_WORK(&psys->watchdog_work, ipu_psys_watchdog_work);
+
+	init_waitqueue_head(&psys->sched_cmd_wq);
+	atomic_set(&psys->wakeup_count, 0);
+	/*
+	 * Create a thread to schedule commands sent to IPU firmware.
+	 * The thread reduces the coupling between the command scheduler
+	 * and queueing commands from the user to driver.
+	 */
+	psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys,
+					     "psys_sched_cmd");
+
+	if (IS_ERR(psys->sched_cmd_thread)) {
+		psys->sched_cmd_thread = NULL;
+		mutex_destroy(&psys->mutex);
+		goto out_unlock;
+	}
+
+	ipu_bus_set_drvdata(adev, psys);
+
+	rval = ipu_psys_resource_pool_init(&psys->resource_pool_started);
+	if (rval < 0) {
+		dev_err(&psys->dev,
+			"unable to alloc process group resources\n");
+		goto out_mutex_destroy;
+	}
+
+	rval = ipu_psys_resource_pool_init(&psys->resource_pool_running);
+	if (rval < 0) {
+		dev_err(&psys->dev,
+			"unable to alloc process group resources\n");
+		goto out_resources_started_free;
+	}
+
+	ipu6_psys_hw_res_variant_init();
+	psys->pkg_dir = isp->pkg_dir;
+	psys->pkg_dir_dma_addr = isp->pkg_dir_dma_addr;
+	psys->pkg_dir_size = isp->pkg_dir_size;
+	psys->fw_sgt = isp->fw_sgt;
+
+	/* allocate and map memory for process groups */
+	for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) {
+		kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
+		if (!kpg)
+			goto out_free_pgs;
+		kpg->pg = dma_alloc_attrs(&adev->dev,
+					  IPU_PSYS_PG_MAX_SIZE,
+					  &kpg->pg_dma_addr,
+					  GFP_KERNEL, 0);
+		if (!kpg->pg) {
+			kfree(kpg);
+			goto out_free_pgs;
+		}
+		kpg->size = IPU_PSYS_PG_MAX_SIZE;
+		list_add(&kpg->list, &psys->pgs);
+	}
+
+	psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
+
+	dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count);
+	if (async_fw_init) {
+		INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task,
+				  run_fw_init_work);
+		fw_init_task.psys = psys;
+		schedule_delayed_work((struct delayed_work *)&fw_init_task, 0);
+	} else {
+		rval = ipu_psys_fw_init(psys);
+	}
+
+	if (rval) {
+		dev_err(&adev->dev, "FW init failed(%d)\n", rval);
+		goto out_free_pgs;
+	}
+
+	psys->dev.parent = &adev->dev;
+	psys->dev.bus = &ipu_psys_bus;
+	psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor);
+	psys->dev.release = ipu_psys_dev_release;
+	dev_set_name(&psys->dev, "ipu-psys%d", minor);
+	rval = device_register(&psys->dev);
+	if (rval < 0) {
+		dev_err(&psys->dev, "psys device_register failed\n");
+		goto out_release_fw_com;
+	}
+
+	/* Add the hw stepping information to caps */
+	strlcpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME,
+		sizeof(psys->caps.dev_model));
+
+	pm_runtime_set_autosuspend_delay(&psys->adev->dev,
+					 IPU_PSYS_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(&psys->adev->dev);
+	pm_runtime_mark_last_busy(&psys->adev->dev);
+
+	mutex_unlock(&ipu_psys_mutex);
+
+#ifdef CONFIG_DEBUG_FS
+	/* Debug fs failure is not fatal. */
+	ipu_psys_init_debugfs(psys);
+#endif
+
+	adev->isp->cpd_fw_reload = &cpd_fw_reload;
+
+	dev_info(&adev->dev, "psys probe minor: %d\n", minor);
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	return 0;
+
+out_release_fw_com:
+	ipu_fw_com_release(psys->fwcom, 1);
+out_free_pgs:
+	list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
+		dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
+			       kpg->pg_dma_addr, 0);
+		kfree(kpg);
+	}
+
+	ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
+out_resources_started_free:
+	ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
+out_mutex_destroy:
+	mutex_destroy(&psys->mutex);
+	cdev_del(&psys->cdev);
+	if (psys->sched_cmd_thread) {
+		kthread_stop(psys->sched_cmd_thread);
+		psys->sched_cmd_thread = NULL;
+	}
+out_unlock:
+	/* Safe to call even if the init is not called */
+	ipu_trace_uninit(&adev->dev);
+	mutex_unlock(&ipu_psys_mutex);
+
+	ipu_mmu_hw_cleanup(adev->mmu);
+
+	return rval;
+}
+
+static void ipu_psys_remove(struct ipu_bus_device *adev)
+{
+	struct ipu_device *isp = adev->isp;
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	struct ipu_psys_pg *kpg, *kpg0;
+
+#ifdef CONFIG_DEBUG_FS
+	if (isp->ipu_dir)
+		debugfs_remove_recursive(psys->debugfsdir);
+#endif
+
+	flush_workqueue(IPU_PSYS_WORK_QUEUE);
+
+	if (psys->sched_cmd_thread) {
+		kthread_stop(psys->sched_cmd_thread);
+		psys->sched_cmd_thread = NULL;
+	}
+
+	pm_runtime_dont_use_autosuspend(&psys->adev->dev);
+
+	mutex_lock(&ipu_psys_mutex);
+
+	list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
+		dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
+			       kpg->pg_dma_addr, 0);
+		kfree(kpg);
+	}
+
+	if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1))
+		dev_err(&adev->dev, "fw com release failed.\n");
+
+	isp->pkg_dir = NULL;
+	isp->pkg_dir_dma_addr = 0;
+	isp->pkg_dir_size = 0;
+
+	ipu_cpd_free_pkg_dir(adev, psys->pkg_dir,
+			     psys->pkg_dir_dma_addr, psys->pkg_dir_size);
+
+	ipu_buttress_unmap_fw_image(adev, &psys->fw_sgt);
+
+	kfree(psys->server_init);
+	kfree(psys->syscom_config);
+
+	ipu_trace_uninit(&adev->dev);
+
+	ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
+	ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
+
+	device_unregister(&psys->dev);
+
+	clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices);
+	cdev_del(&psys->cdev);
+
+	mutex_unlock(&ipu_psys_mutex);
+
+	mutex_destroy(&psys->mutex);
+
+	dev_info(&adev->dev, "removed\n");
+}
+
+static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev)
+{
+	struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+	void __iomem *base = psys->pdata->base;
+	u32 status;
+	int r;
+
+	mutex_lock(&psys->mutex);
+#ifdef CONFIG_PM
+	if (!READ_ONCE(psys->power)) {
+		mutex_unlock(&psys->mutex);
+		return IRQ_NONE;
+	}
+	r = pm_runtime_get_sync(&psys->adev->dev);
+	if (r < 0) {
+		pm_runtime_put(&psys->adev->dev);
+		mutex_unlock(&psys->mutex);
+		return IRQ_NONE;
+	}
+#endif
+
+	status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS);
+	writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
+
+	if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) {
+		writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
+		ipu_psys_handle_events(psys);
+	}
+
+	pm_runtime_mark_last_busy(&psys->adev->dev);
+	pm_runtime_put_autosuspend(&psys->adev->dev);
+	mutex_unlock(&psys->mutex);
+
+	return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static struct ipu_bus_driver ipu_psys_driver = {
+	.probe = ipu_psys_probe,
+	.remove = ipu_psys_remove,
+	.isr_threaded = psys_isr_threaded,
+	.wanted = IPU_PSYS_NAME,
+	.drv = {
+		.name = IPU_PSYS_NAME,
+		.owner = THIS_MODULE,
+		.pm = PSYS_PM_OPS,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+static int __init ipu_psys_init(void)
+{
+	int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0,
+				       IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME);
+	if (rval) {
+		pr_err("can't alloc psys chrdev region (%d)\n", rval);
+		return rval;
+	}
+
+	rval = bus_register(&ipu_psys_bus);
+	if (rval) {
+		pr_warn("can't register psys bus (%d)\n", rval);
+		goto out_bus_register;
+	}
+
+	ipu_bus_register_driver(&ipu_psys_driver);
+
+	return rval;
+
+out_bus_register:
+	unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
+
+	return rval;
+}
+
+static void __exit ipu_psys_exit(void)
+{
+	ipu_bus_unregister_driver(&ipu_psys_driver);
+	bus_unregister(&ipu_psys_bus);
+	unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
+}
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+module_init(ipu_psys_init);
+module_exit(ipu_psys_exit);
+
+MODULE_AUTHOR("Antti Laakso <antti.laakso at intel.com>");
+MODULE_AUTHOR("Bin Han <bin.b.han at intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu at intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng at intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu at intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao at intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang at intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding at intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu processing system driver");
diff --git a/drivers/media/pci/intel/ipu-psys.h b/drivers/media/pci/intel/ipu-psys.h
new file mode 100644
index 000000000000..b227c2bc7415
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-psys.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_PSYS_H
+#define IPU_PSYS_H
+
+#include <linux/cdev.h>
+#include <linux/workqueue.h>
+
+#include "ipu.h"
+#include "ipu-pdata.h"
+#include "ipu-fw-psys.h"
+#include "ipu-platform-psys.h"
+
+#define IPU_PSYS_PG_POOL_SIZE 16
+#define IPU_PSYS_PG_MAX_SIZE 2048
+#define IPU_MAX_PSYS_CMD_BUFFERS 32
+#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS
+#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS
+#define IPU_PSYS_CLOSE_TIMEOUT_US   50
+#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US)
+#define IPU_PSYS_WORK_QUEUE		system_power_efficient_wq
+#define IPU_MAX_RESOURCES 128
+
+/* Opaque structure. Do not access fields. */
+struct ipu_resource {
+	u32 id;
+	int elements;	/* Number of elements available to allocation */
+	unsigned long *bitmap;	/* Allocation bitmap, a bit for each element */
+};
+
+enum ipu_resource_type {
+	IPU_RESOURCE_DEV_CHN = 0,
+	IPU_RESOURCE_EXT_MEM,
+	IPU_RESOURCE_DFM
+};
+
+/* Allocation of resource(s) */
+/* Opaque structure. Do not access fields. */
+struct ipu_resource_alloc {
+	enum ipu_resource_type type;
+	struct ipu_resource *resource;
+	int elements;
+	int pos;
+};
+
+/*
+ * This struct represents all of the currently allocated
+ * resources from IPU model. It is used also for allocating
+ * resources for the next set of PGs to be run on IPU
+ * (ie. those PGs which are not yet being run and which don't
+ * yet reserve real IPU resources).
+ * Use larger array to cover existing resource quantity
+ */
+
+/* resource size may need expand for new resource model */
+struct ipu_psys_resource_pool {
+	u32 cells;	/* Bitmask of cells allocated */
+	struct ipu_resource dev_channels[16];
+	struct ipu_resource ext_memory[32];
+	struct ipu_resource dfms[16];
+	DECLARE_BITMAP(cmd_queues, 32);
+};
+
+/*
+ * This struct keeps book of the resources allocated for a specific PG.
+ * It is used for freeing up resources from struct ipu_psys_resources
+ * when the PG is released from IPU4 (or model of IPU4).
+ */
+struct ipu_psys_resource_alloc {
+	u32 cells;	/* Bitmask of cells needed */
+	struct ipu_resource_alloc
+	 resource_alloc[IPU_MAX_RESOURCES];
+	int resources;
+};
+
+struct task_struct;
+struct ipu_psys {
+	struct ipu_psys_capability caps;
+	struct cdev cdev;
+	struct device dev;
+
+	struct mutex mutex;	/* Psys various */
+	int power;
+	bool icache_prefetch_sp;
+	bool icache_prefetch_isp;
+	spinlock_t power_lock;	/* Serialize access to power */
+	spinlock_t pgs_lock;	/* Protect pgs list access */
+	struct list_head fhs;
+	struct list_head pgs;
+	struct list_head started_kcmds_list;
+	struct ipu_psys_pdata *pdata;
+	struct ipu_bus_device *adev;
+	struct ia_css_syscom_context *dev_ctx;
+	struct ia_css_syscom_config *syscom_config;
+	struct ia_css_psys_server_init *server_init;
+	struct task_struct *sched_cmd_thread;
+	struct work_struct watchdog_work;
+	wait_queue_head_t sched_cmd_wq;
+	atomic_t wakeup_count;  /* Psys schedule thread wakeup count */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfsdir;
+#endif
+
+	/* Resources needed to be managed for process groups */
+	struct ipu_psys_resource_pool resource_pool_running;
+	struct ipu_psys_resource_pool resource_pool_started;
+
+	const struct firmware *fw;
+	struct sg_table fw_sgt;
+	u64 *pkg_dir;
+	dma_addr_t pkg_dir_dma_addr;
+	unsigned int pkg_dir_size;
+	unsigned long timeout;
+
+	int active_kcmds, started_kcmds;
+	void *fwcom;
+
+	int power_gating;
+};
+
+struct ipu_psys_fh {
+	struct ipu_psys *psys;
+	struct mutex mutex;	/* Protects bufmap & kcmds fields */
+	struct list_head list;
+	struct list_head bufmap;
+	wait_queue_head_t wait;
+	struct ipu_psys_scheduler sched;
+};
+
+struct ipu_psys_pg {
+	struct ipu_fw_psys_process_group *pg;
+	size_t size;
+	size_t pg_size;
+	dma_addr_t pg_dma_addr;
+	struct list_head list;
+	struct ipu_psys_resource_alloc resource_alloc;
+};
+
+struct ipu_psys_kcmd {
+	struct ipu_psys_fh *fh;
+	struct list_head list;
+	struct ipu_psys_buffer_set *kbuf_set;
+	enum ipu_psys_cmd_state state;
+	void *pg_manifest;
+	size_t pg_manifest_size;
+	struct ipu_psys_kbuffer **kbufs;
+	struct ipu_psys_buffer *buffers;
+	size_t nbuffers;
+	struct ipu_fw_psys_process_group *pg_user;
+	struct ipu_psys_pg *kpg;
+	u64 user_token;
+	u64 issue_id;
+	u32 priority;
+	u32 kernel_enable_bitmap[4];
+	u32 terminal_enable_bitmap[4];
+	u32 routing_enable_bitmap[4];
+	u32 rbm[5];
+	struct ipu_buttress_constraint constraint;
+	struct ipu_psys_event ev;
+	struct timer_list watchdog;
+};
+
+struct ipu_dma_buf_attach {
+	struct device *dev;
+	u64 len;
+	void *userptr;
+	struct sg_table *sgt;
+	bool vma_is_io;
+	struct page **pages;
+	size_t npages;
+};
+
+struct ipu_psys_kbuffer {
+	u64 len;
+	void *userptr;
+	u32 flags;
+	int fd;
+	void *kaddr;
+	struct list_head list;
+	dma_addr_t dma_addr;
+	struct sg_table *sgt;
+	struct dma_buf_attachment *db_attach;
+	struct dma_buf *dbuf;
+	bool valid;	/* True when buffer is usable */
+};
+
+#define inode_to_ipu_psys(inode) \
+	container_of((inode)->i_cdev, struct ipu_psys, cdev)
+
+#ifdef CONFIG_COMPAT
+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd,
+			     unsigned long arg);
+#endif
+
+void ipu_psys_setup_hw(struct ipu_psys *psys);
+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on);
+void ipu_psys_handle_events(struct ipu_psys *psys);
+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh);
+void ipu_psys_run_next(struct ipu_psys *psys);
+void ipu_psys_watchdog_work(struct work_struct *work);
+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size);
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd);
+int ipu_psys_mapbuf_with_lock(int fd, struct ipu_psys_fh *fh,
+			      struct ipu_psys_kbuffer *kbuf);
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr);
+#ifdef IPU_PSYS_GPC
+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys);
+#endif
+int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool);
+void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool);
+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh);
+long ipu_ioctl_dqevent(struct ipu_psys_event *event,
+		       struct ipu_psys_fh *fh, unsigned int f_flags);
+
+#endif /* IPU_PSYS_H */
diff --git a/drivers/media/pci/intel/ipu-trace.c b/drivers/media/pci/intel/ipu-trace.c
new file mode 100644
index 000000000000..10e4d5c68619
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-trace.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ipu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+
+/* Input data processing states */
+enum config_file_parse_states {
+	STATE_FILL = 0,
+	STATE_COMMENT,
+	STATE_COMPLETE,
+};
+
+struct trace_register_range {
+	u32 start;
+	u32 end;
+};
+
+static u16 trace_unit_template[] = TRACE_REG_CREATE_TUN_REGISTER_LIST;
+static u16 trace_monitor_template[] = TRACE_REG_CREATE_TM_REGISTER_LIST;
+static u16 trace_gpc_template[] = TRACE_REG_CREATE_GPC_REGISTER_LIST;
+
+static struct trace_register_range trace_csi2_range_template[] = {
+	{
+	 .start = TRACE_REG_CSI2_TM_RESET_REG_IDX,
+	 .end = TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_ID_N(7)
+	},
+	{
+	 .start = TRACE_REG_END_MARK,
+	 .end = TRACE_REG_END_MARK
+	}
+};
+
+static struct trace_register_range trace_csi2_3ph_range_template[] = {
+	{
+	 .start = TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX,
+	 .end = TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_ID_N(7)
+	},
+	{
+	 .start = TRACE_REG_END_MARK,
+	 .end = TRACE_REG_END_MARK
+	}
+};
+
+static struct trace_register_range trace_sig2cio_range_template[] = {
+	{
+	 .start = TRACE_REG_SIG2CIO_ADDRESS,
+	 .end = (TRACE_REG_SIG2CIO_STATUS + 8 * TRACE_REG_SIG2CIO_SIZE_OF)
+	},
+	{
+	 .start = TRACE_REG_END_MARK,
+	 .end = TRACE_REG_END_MARK
+	}
+};
+
+#define LINE_MAX_LEN			128
+#define MEMORY_RING_BUFFER_SIZE		(SZ_1M * 32)
+#define TRACE_MESSAGE_SIZE		16
+/*
+ * It looks that the trace unit sometimes writes outside the given buffer.
+ * To avoid memory corruption one extra page is reserved at the end
+ * of the buffer. Read also the extra area since it may contain valid data.
+ */
+#define MEMORY_RING_BUFFER_GUARD	PAGE_SIZE
+#define MEMORY_RING_BUFFER_OVERREAD	MEMORY_RING_BUFFER_GUARD
+#define MAX_TRACE_REGISTERS		200
+#define TRACE_CONF_DUMP_BUFFER_SIZE	(MAX_TRACE_REGISTERS * 2 * 32)
+
+#define IPU_TRACE_TIME_RETRY	5
+
+struct config_value {
+	u32 reg;
+	u32 value;
+};
+
+struct ipu_trace_buffer {
+	dma_addr_t dma_handle;
+	void *memory_buffer;
+};
+
+struct ipu_subsystem_trace_config {
+	u32 offset;
+	void __iomem *base;
+	struct ipu_trace_buffer memory;	/* ring buffer */
+	struct device *dev;
+	struct ipu_trace_block *blocks;
+	unsigned int fill_level;	/* Nbr of regs in config table below */
+	bool running;
+	/* Cached register values  */
+	struct config_value config[MAX_TRACE_REGISTERS];
+};
+
+/*
+ * State of the input data processing is kept in this structure.
+ * Only one user is supported at time.
+ */
+struct buf_state {
+	char line_buffer[LINE_MAX_LEN];
+	enum config_file_parse_states state;
+	int offset;	/* Offset to line_buffer */
+};
+
+struct ipu_trace {
+	struct mutex lock; /* Protect ipu trace operations */
+	bool open;
+	char *conf_dump_buffer;
+	int size_conf_dump;
+	struct buf_state buffer_state;
+
+	struct ipu_subsystem_trace_config isys;
+	struct ipu_subsystem_trace_config psys;
+};
+
+static void __ipu_trace_restore(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_device *isp = adev->isp;
+	struct ipu_trace *trace = isp->trace;
+	struct config_value *config;
+	struct ipu_subsystem_trace_config *sys = adev->trace_cfg;
+	struct ipu_trace_block *blocks;
+	u32 mapped_trace_buffer;
+	void __iomem *addr = NULL;
+	int i;
+
+	if (trace->open) {
+		dev_info(dev, "Trace control file open. Skipping update\n");
+		return;
+	}
+
+	if (!sys)
+		return;
+
+	/* leave if no trace configuration for this subsystem */
+	if (sys->fill_level == 0)
+		return;
+
+	/* Find trace unit base address */
+	blocks = sys->blocks;
+	while (blocks->type != IPU_TRACE_BLOCK_END) {
+		if (blocks->type == IPU_TRACE_BLOCK_TUN) {
+			addr = sys->base + blocks->offset;
+			break;
+		}
+		blocks++;
+	}
+	if (!addr)
+		return;
+
+	if (!sys->memory.memory_buffer) {
+		sys->memory.memory_buffer =
+		    dma_alloc_attrs(dev, MEMORY_RING_BUFFER_SIZE +
+				    MEMORY_RING_BUFFER_GUARD,
+				    &sys->memory.dma_handle,
+				    GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+	}
+
+	if (!sys->memory.memory_buffer) {
+		dev_err(dev, "No memory for tracing. Trace unit disabled\n");
+		return;
+	}
+
+	config = sys->config;
+	mapped_trace_buffer = sys->memory.dma_handle;
+
+	/* ring buffer base */
+	writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR);
+
+	/* ring buffer end */
+	writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE -
+		   TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR);
+
+	/* Infobits for ddr trace */
+	writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY,
+	       addr + TRACE_REG_TUN_DDR_INFO_VAL);
+
+	/* Find trace timer reset address */
+	addr = NULL;
+	blocks = sys->blocks;
+	while (blocks->type != IPU_TRACE_BLOCK_END) {
+		if (blocks->type == IPU_TRACE_TIMER_RST) {
+			addr = sys->base + blocks->offset;
+			break;
+		}
+		blocks++;
+	}
+	if (!addr) {
+		dev_err(dev, "No trace reset addr\n");
+		return;
+	}
+
+	/* Remove reset from trace timers */
+	writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr);
+
+	/* Register config received from userspace */
+	for (i = 0; i < sys->fill_level; i++) {
+		dev_dbg(dev,
+			"Trace restore: reg 0x%08x, value 0x%08x\n",
+			config[i].reg, config[i].value);
+		writel(config[i].value, isp->base + config[i].reg);
+	}
+
+	sys->running = true;
+}
+
+void ipu_trace_restore(struct device *dev)
+{
+	struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace;
+
+	if (!trace)
+		return;
+
+	mutex_lock(&trace->lock);
+	__ipu_trace_restore(dev);
+	mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_restore);
+
+static void __ipu_trace_stop(struct device *dev)
+{
+	struct ipu_subsystem_trace_config *sys =
+	    to_ipu_bus_device(dev)->trace_cfg;
+	struct ipu_trace_block *blocks;
+
+	if (!sys)
+		return;
+
+	if (!sys->running)
+		return;
+	sys->running = false;
+
+	/* Turn off all the gpc blocks */
+	blocks = sys->blocks;
+	while (blocks->type != IPU_TRACE_BLOCK_END) {
+		if (blocks->type == IPU_TRACE_BLOCK_GPC) {
+			writel(0, sys->base + blocks->offset +
+				   TRACE_REG_GPC_OVERALL_ENABLE);
+		}
+		blocks++;
+	}
+
+	/* Turn off all the trace monitors */
+	blocks = sys->blocks;
+	while (blocks->type != IPU_TRACE_BLOCK_END) {
+		if (blocks->type == IPU_TRACE_BLOCK_TM) {
+			writel(0, sys->base + blocks->offset +
+				   TRACE_REG_TM_TRACE_ENABLE_NPK);
+
+			writel(0, sys->base + blocks->offset +
+				   TRACE_REG_TM_TRACE_ENABLE_DDR);
+		}
+		blocks++;
+	}
+
+	/* Turn off trace units */
+	blocks = sys->blocks;
+	while (blocks->type != IPU_TRACE_BLOCK_END) {
+		if (blocks->type == IPU_TRACE_BLOCK_TUN) {
+			writel(0, sys->base + blocks->offset +
+				   TRACE_REG_TUN_DDR_ENABLE);
+			writel(0, sys->base + blocks->offset +
+				   TRACE_REG_TUN_NPK_ENABLE);
+		}
+		blocks++;
+	}
+}
+
+void ipu_trace_stop(struct device *dev)
+{
+	struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace;
+
+	if (!trace)
+		return;
+
+	mutex_lock(&trace->lock);
+	__ipu_trace_stop(dev);
+	mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_stop);
+
+static int validate_register(u32 base, u32 reg, u16 *template)
+{
+	int i = 0;
+
+	while (template[i] != TRACE_REG_END_MARK) {
+		if (template[i] + base != reg) {
+			i++;
+			continue;
+		}
+		/* This is a valid register */
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int validate_register_range(u32 base, u32 reg,
+				   struct trace_register_range *template)
+{
+	unsigned int i = 0;
+
+	if (!IS_ALIGNED(reg, sizeof(u32)))
+		return -EINVAL;
+
+	while (template[i].start != TRACE_REG_END_MARK) {
+		if ((reg < template[i].start + base) ||
+		    (reg > template[i].end + base)) {
+			i++;
+			continue;
+		}
+		/* This is a valid register */
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int update_register_cache(struct ipu_device *isp, u32 reg, u32 value)
+{
+	struct ipu_trace *dctrl = isp->trace;
+	const struct ipu_trace_block *blocks;
+	struct ipu_subsystem_trace_config *sys;
+	struct device *dev;
+	u32 base = 0;
+	u16 *template = NULL;
+	struct trace_register_range *template_range = NULL;
+	int i, range;
+	int rval = -EINVAL;
+
+	if (dctrl->isys.offset == dctrl->psys.offset) {
+		/* For the IPU with uniform address space */
+		if (reg >= IPU_ISYS_OFFSET &&
+		    reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET)
+			sys = &dctrl->isys;
+		else if (reg >= IPU_PSYS_OFFSET &&
+			 reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET)
+			sys = &dctrl->psys;
+		else
+			goto error;
+	} else {
+		if (dctrl->isys.offset &&
+		    reg >= dctrl->isys.offset &&
+		    reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET)
+			sys = &dctrl->isys;
+		else if (dctrl->psys.offset &&
+			 reg >= dctrl->psys.offset &&
+			 reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET)
+			sys = &dctrl->psys;
+		else
+			goto error;
+	}
+
+	blocks = sys->blocks;
+	dev = sys->dev;
+
+	/* Check registers block by block */
+	i = 0;
+	while (blocks[i].type != IPU_TRACE_BLOCK_END) {
+		base = blocks[i].offset + sys->offset;
+		if ((reg >= base && reg < base + TRACE_REG_MAX_BLOCK_SIZE))
+			break;
+		i++;
+	}
+
+	range = 0;
+	switch (blocks[i].type) {
+	case IPU_TRACE_BLOCK_TUN:
+		template = trace_unit_template;
+		break;
+	case IPU_TRACE_BLOCK_TM:
+		template = trace_monitor_template;
+		break;
+	case IPU_TRACE_BLOCK_GPC:
+		template = trace_gpc_template;
+		break;
+	case IPU_TRACE_CSI2:
+		range = 1;
+		template_range = trace_csi2_range_template;
+		break;
+	case IPU_TRACE_CSI2_3PH:
+		range = 1;
+		template_range = trace_csi2_3ph_range_template;
+		break;
+	case IPU_TRACE_SIG2CIOS:
+		range = 1;
+		template_range = trace_sig2cio_range_template;
+		break;
+	default:
+		goto error;
+	}
+
+	if (range)
+		rval = validate_register_range(base, reg, template_range);
+	else
+		rval = validate_register(base, reg, template);
+
+	if (rval)
+		goto error;
+
+	if (sys->fill_level < MAX_TRACE_REGISTERS) {
+		dev_dbg(dev,
+			"Trace reg addr 0x%08x value 0x%08x\n", reg, value);
+		sys->config[sys->fill_level].reg = reg;
+		sys->config[sys->fill_level].value = value;
+		sys->fill_level++;
+	} else {
+		rval = -ENOMEM;
+		goto error;
+	}
+	return 0;
+error:
+	dev_info(&isp->pdev->dev,
+		 "Trace register address 0x%08x ignored as invalid register\n",
+		 reg);
+	return rval;
+}
+
+/*
+ * We don't know how much data is received this time. Process given data
+ * character by character.
+ * Fill the line buffer until either
+ * 1) new line is got -> go to decode
+ * or
+ * 2) line_buffer is full -> ignore rest of line and then try to decode
+ * or
+ * 3) Comment mark is found -> ignore rest of the line and then try to decode
+ *    the data which was received before the comment mark
+ *
+ * Decode phase tries to find "reg = value" pairs and validates those
+ */
+static int process_buffer(struct ipu_device *isp,
+			  char *buffer, int size, struct buf_state *state)
+{
+	int i, ret;
+	int curr_state = state->state;
+	u32 reg, value;
+
+	for (i = 0; i < size; i++) {
+		/*
+		 * Comment mark in any position turns on comment mode
+		 * until end of line
+		 */
+		if (curr_state != STATE_COMMENT && buffer[i] == '#') {
+			state->line_buffer[state->offset] = '\0';
+			curr_state = STATE_COMMENT;
+			continue;
+		}
+
+		switch (curr_state) {
+		case STATE_COMMENT:
+			/* Only new line can break this mode */
+			if (buffer[i] == '\n')
+				curr_state = STATE_COMPLETE;
+			break;
+		case STATE_FILL:
+			state->line_buffer[state->offset] = buffer[i];
+			state->offset++;
+
+			if (state->offset >= sizeof(state->line_buffer) - 1) {
+				/* Line buffer full - ignore rest */
+				state->line_buffer[state->offset] = '\0';
+				curr_state = STATE_COMMENT;
+				break;
+			}
+
+			if (buffer[i] == '\n') {
+				state->line_buffer[state->offset] = '\0';
+				curr_state = STATE_COMPLETE;
+			}
+			break;
+		default:
+			state->offset = 0;
+			state->line_buffer[state->offset] = '\0';
+			curr_state = STATE_COMMENT;
+		}
+
+		if (curr_state == STATE_COMPLETE) {
+			ret = sscanf(state->line_buffer, "%x = %x",
+				     &reg, &value);
+			if (ret == 2)
+				update_register_cache(isp, reg, value);
+
+			state->offset = 0;
+			curr_state = STATE_FILL;
+		}
+	}
+	state->state = curr_state;
+	return 0;
+}
+
+static void traceconf_dump(struct ipu_device *isp)
+{
+	struct ipu_subsystem_trace_config *sys[2] = {
+		&isp->trace->isys,
+		&isp->trace->psys
+	};
+	int i, j, rem_size;
+	char *out;
+
+	isp->trace->size_conf_dump = 0;
+	out = isp->trace->conf_dump_buffer;
+	rem_size = TRACE_CONF_DUMP_BUFFER_SIZE;
+
+	for (j = 0; j < ARRAY_SIZE(sys); j++) {
+		for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) {
+			int bytes_print;
+			int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n",
+					 sys[j]->config[i].reg,
+					 sys[j]->config[i].value);
+
+			bytes_print = min(n, rem_size - 1);
+			rem_size -= bytes_print;
+			out += bytes_print;
+		}
+	}
+	isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer;
+}
+
+static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys)
+{
+	if (!sys->memory.memory_buffer)
+		return;
+
+	memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE +
+	       MEMORY_RING_BUFFER_OVERREAD);
+
+	dma_sync_single_for_device(sys->dev,
+				   sys->memory.dma_handle,
+				   MEMORY_RING_BUFFER_SIZE +
+				   MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE);
+}
+
+static int traceconf_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct ipu_device *isp;
+
+	if (!inode->i_private)
+		return -EACCES;
+
+	isp = inode->i_private;
+
+	ret = mutex_trylock(&isp->trace->lock);
+	if (!ret)
+		return -EBUSY;
+
+	if (isp->trace->open) {
+		mutex_unlock(&isp->trace->lock);
+		return -EBUSY;
+	}
+
+	file->private_data = isp;
+	isp->trace->open = 1;
+	if (file->f_mode & FMODE_WRITE) {
+		/* TBD: Allocate temp buffer for processing.
+		 * Push validated buffer to active config
+		 */
+
+		/* Forget old config if opened for write */
+		isp->trace->isys.fill_level = 0;
+		isp->trace->psys.fill_level = 0;
+	}
+
+	if (file->f_mode & FMODE_READ) {
+		isp->trace->conf_dump_buffer =
+		    vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE);
+		if (!isp->trace->conf_dump_buffer) {
+			isp->trace->open = 0;
+			mutex_unlock(&isp->trace->lock);
+			return -ENOMEM;
+		}
+		traceconf_dump(isp);
+	}
+	mutex_unlock(&isp->trace->lock);
+	return 0;
+}
+
+static ssize_t traceconf_read(struct file *file, char __user *buf,
+			      size_t len, loff_t *ppos)
+{
+	struct ipu_device *isp = file->private_data;
+
+	return simple_read_from_buffer(buf, len, ppos,
+				       isp->trace->conf_dump_buffer,
+				       isp->trace->size_conf_dump);
+}
+
+static ssize_t traceconf_write(struct file *file, const char __user *buf,
+			       size_t len, loff_t *ppos)
+{
+	struct ipu_device *isp = file->private_data;
+	char buffer[64];
+	ssize_t bytes, count;
+	loff_t pos = *ppos;
+
+	if (*ppos < 0)
+		return -EINVAL;
+
+	count = min(len, sizeof(buffer));
+	bytes = copy_from_user(buffer, buf, count);
+	if (bytes == count)
+		return -EFAULT;
+
+	count -= bytes;
+	mutex_lock(&isp->trace->lock);
+	process_buffer(isp, buffer, count, &isp->trace->buffer_state);
+	mutex_unlock(&isp->trace->lock);
+	*ppos = pos + count;
+
+	return count;
+}
+
+static int traceconf_release(struct inode *inode, struct file *file)
+{
+	struct ipu_device *isp = file->private_data;
+	struct device *psys_dev = isp->psys ? &isp->psys->dev : NULL;
+	struct device *isys_dev = isp->isys ? &isp->isys->dev : NULL;
+	int pm_rval = -EINVAL;
+
+	/*
+	 * Turn devices on outside trace->lock mutex. PM transition may
+	 * cause call to function which tries to take the same lock.
+	 * Also do this before trace->open is set back to 0 to avoid
+	 * double restore (one here and one in pm transition). We can't
+	 * rely purely on the restore done by pm call backs since trace
+	 * configuration can occur in any phase compared to other activity.
+	 */
+
+	if (file->f_mode & FMODE_WRITE) {
+		if (isys_dev)
+			pm_rval = pm_runtime_get_sync(isys_dev);
+
+		if (pm_rval >= 0) {
+			/* ISYS ok or missing */
+			if (psys_dev)
+				pm_rval = pm_runtime_get_sync(psys_dev);
+
+			if (pm_rval < 0) {
+				pm_runtime_put_noidle(psys_dev);
+				if (isys_dev)
+					pm_runtime_put(isys_dev);
+			}
+		} else {
+			pm_runtime_put_noidle(&isp->isys->dev);
+		}
+	}
+
+	mutex_lock(&isp->trace->lock);
+	isp->trace->open = 0;
+	vfree(isp->trace->conf_dump_buffer);
+	isp->trace->conf_dump_buffer = NULL;
+
+	if (pm_rval >= 0) {
+		/* Update new cfg to HW */
+		if (isys_dev) {
+			__ipu_trace_stop(isys_dev);
+			clear_trace_buffer(isp->isys->trace_cfg);
+			__ipu_trace_restore(isys_dev);
+		}
+
+		if (psys_dev) {
+			__ipu_trace_stop(psys_dev);
+			clear_trace_buffer(isp->psys->trace_cfg);
+			__ipu_trace_restore(psys_dev);
+		}
+	}
+
+	mutex_unlock(&isp->trace->lock);
+
+	if (pm_rval >= 0) {
+		/* Again - this must be done with trace->lock not taken */
+		if (psys_dev)
+			pm_runtime_put(psys_dev);
+		if (isys_dev)
+			pm_runtime_put(isys_dev);
+	}
+	return 0;
+}
+
+static const struct file_operations ipu_traceconf_fops = {
+	.owner = THIS_MODULE,
+	.open = traceconf_open,
+	.release = traceconf_release,
+	.read = traceconf_read,
+	.write = traceconf_write,
+	.llseek = no_llseek,
+};
+
+static int gettrace_open(struct inode *inode, struct file *file)
+{
+	struct ipu_subsystem_trace_config *sys = inode->i_private;
+
+	if (!sys)
+		return -EACCES;
+
+	if (!sys->memory.memory_buffer)
+		return -EACCES;
+
+	dma_sync_single_for_cpu(sys->dev,
+				sys->memory.dma_handle,
+				MEMORY_RING_BUFFER_SIZE +
+				MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE);
+
+	file->private_data = sys;
+	return 0;
+};
+
+static ssize_t gettrace_read(struct file *file, char __user *buf,
+			     size_t len, loff_t *ppos)
+{
+	struct ipu_subsystem_trace_config *sys = file->private_data;
+
+	return simple_read_from_buffer(buf, len, ppos,
+				       sys->memory.memory_buffer,
+				       MEMORY_RING_BUFFER_SIZE +
+				       MEMORY_RING_BUFFER_OVERREAD);
+}
+
+static ssize_t gettrace_write(struct file *file, const char __user *buf,
+			      size_t len, loff_t *ppos)
+{
+	struct ipu_subsystem_trace_config *sys = file->private_data;
+	static const char str[] = "clear";
+	char buffer[sizeof(str)] = { 0 };
+	ssize_t ret;
+
+	ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len);
+	if (ret < 0)
+		return ret;
+
+	if (ret < sizeof(str) - 1)
+		return -EINVAL;
+
+	if (!strncmp(str, buffer, sizeof(str) - 1)) {
+		clear_trace_buffer(sys);
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static int gettrace_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations ipu_gettrace_fops = {
+	.owner = THIS_MODULE,
+	.open = gettrace_open,
+	.release = gettrace_release,
+	.read = gettrace_read,
+	.write = gettrace_write,
+	.llseek = no_llseek,
+};
+
+int ipu_trace_init(struct ipu_device *isp, void __iomem *base,
+		   struct device *dev, struct ipu_trace_block *blocks)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_trace *trace = isp->trace;
+	struct ipu_subsystem_trace_config *sys;
+	int ret = 0;
+
+	if (!isp->trace)
+		return 0;
+
+	mutex_lock(&isp->trace->lock);
+
+	if (dev == &isp->isys->dev) {
+		sys = &trace->isys;
+	} else if (dev == &isp->psys->dev) {
+		sys = &trace->psys;
+	} else {
+		ret = -EINVAL;
+		goto leave;
+	}
+
+	adev->trace_cfg = sys;
+	sys->dev = dev;
+	sys->offset = base - isp->base;	/* sub system offset */
+	sys->base = base;
+	sys->blocks = blocks;
+
+leave:
+	mutex_unlock(&isp->trace->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_trace_init);
+
+void ipu_trace_uninit(struct device *dev)
+{
+	struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+	struct ipu_device *isp = adev->isp;
+	struct ipu_trace *trace = isp->trace;
+	struct ipu_subsystem_trace_config *sys = adev->trace_cfg;
+
+	if (!trace || !sys)
+		return;
+
+	mutex_lock(&trace->lock);
+
+	if (sys->memory.memory_buffer)
+		dma_free_attrs(sys->dev,
+			       MEMORY_RING_BUFFER_SIZE +
+			       MEMORY_RING_BUFFER_GUARD,
+			       sys->memory.memory_buffer,
+			       sys->memory.dma_handle, DMA_ATTR_NON_CONSISTENT);
+
+	sys->dev = NULL;
+	sys->memory.memory_buffer = NULL;
+
+	mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_uninit);
+
+int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir)
+{
+	struct dentry *files[3];
+	int i = 0;
+
+	files[i] = debugfs_create_file("traceconf", 0644,
+				       dir, isp, &ipu_traceconf_fops);
+	if (!files[i])
+		return -ENOMEM;
+	i++;
+
+	files[i] = debugfs_create_file("getisystrace", 0444,
+				       dir,
+				       &isp->trace->isys, &ipu_gettrace_fops);
+
+	if (!files[i])
+		goto error;
+	i++;
+
+	files[i] = debugfs_create_file("getpsystrace", 0444,
+				       dir,
+				       &isp->trace->psys, &ipu_gettrace_fops);
+	if (!files[i])
+		goto error;
+
+	return 0;
+
+error:
+	for (; i > 0; i--)
+		debugfs_remove(files[i - 1]);
+	return -ENOMEM;
+}
+
+int ipu_trace_add(struct ipu_device *isp)
+{
+	isp->trace = devm_kzalloc(&isp->pdev->dev,
+				  sizeof(struct ipu_trace), GFP_KERNEL);
+	if (!isp->trace)
+		return -ENOMEM;
+
+	mutex_init(&isp->trace->lock);
+
+	return 0;
+}
+
+void ipu_trace_release(struct ipu_device *isp)
+{
+	if (!isp->trace)
+		return;
+	mutex_destroy(&isp->trace->lock);
+}
+
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo at intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu trace support");
diff --git a/drivers/media/pci/intel/ipu-trace.h b/drivers/media/pci/intel/ipu-trace.h
new file mode 100644
index 000000000000..f66b63aacc9a
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-trace.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2020 Intel Corporation */
+
+#ifndef IPU_TRACE_H
+#define IPU_TRACE_H
+#include <linux/debugfs.h>
+
+#define TRACE_REG_MAX_BLOCK_SIZE	0x0fff
+
+#define TRACE_REG_END_MARK 0xffff
+
+#define TRACE_REG_CMD_TYPE_D64		0x0
+#define TRACE_REG_CMD_TYPE_D64M		0x1
+#define TRACE_REG_CMD_TYPE_D64TS	0x2
+#define TRACE_REG_CMD_TYPE_D64MTS	0x3
+
+/* Trace unit register offsets */
+#define TRACE_REG_TUN_DDR_ENABLE        0x000
+#define TRACE_REG_TUN_NPK_ENABLE	0x004
+#define TRACE_REG_TUN_DDR_INFO_VAL	0x008
+#define TRACE_REG_TUN_NPK_ADDR		0x00C
+#define TRACE_REG_TUN_DRAM_BASE_ADDR	0x010
+#define TRACE_REG_TUN_DRAM_END_ADDR	0x014
+#define TRACE_REG_TUN_LOCAL_TIMER0	0x018
+#define TRACE_REG_TUN_LOCAL_TIMER1	0x01C
+#define TRACE_REG_TUN_WR_PTR		0x020
+#define TRACE_REG_TUN_RD_PTR		0x024
+
+#define TRACE_REG_CREATE_TUN_REGISTER_LIST {	\
+	TRACE_REG_TUN_DDR_ENABLE,		\
+	TRACE_REG_TUN_NPK_ENABLE,		\
+	TRACE_REG_TUN_DDR_INFO_VAL,	        \
+	TRACE_REG_TUN_NPK_ADDR,			\
+	TRACE_REG_END_MARK			\
+}
+
+/*
+ * Following registers are left out on purpose:
+ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR
+ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR
+ */
+
+/* Trace monitor register offsets */
+#define TRACE_REG_TM_TRACE_ADDR_A		0x0900
+#define TRACE_REG_TM_TRACE_ADDR_B		0x0904
+#define TRACE_REG_TM_TRACE_ADDR_C		0x0908
+#define TRACE_REG_TM_TRACE_ADDR_D		0x090c
+#define TRACE_REG_TM_TRACE_ENABLE_NPK		0x0910
+#define TRACE_REG_TM_TRACE_ENABLE_DDR		0x0914
+#define TRACE_REG_TM_TRACE_PER_PC		0x0918
+#define TRACE_REG_TM_TRACE_PER_BRANCH		0x091c
+#define TRACE_REG_TM_TRACE_HEADER		0x0920
+#define TRACE_REG_TM_TRACE_CFG			0x0924
+#define TRACE_REG_TM_TRACE_LOST_PACKETS		0x0928
+#define TRACE_REG_TM_TRACE_LP_CLEAR		0x092c
+#define TRACE_REG_TM_TRACE_LMRUN_MASK		0x0930
+#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW		0x0934
+#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH	0x0938
+#define TRACE_REG_TM_TRACE_MMIO_SEL		0x093c
+#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW		0x0940
+#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW		0x0944
+#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW		0x0948
+#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW		0x094c
+#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH	0x0950
+#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH	0x0954
+#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH	0x0958
+#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH	0x095c
+#define TRACE_REG_TM_FWTRACE_FIRST		0x0A00
+#define TRACE_REG_TM_FWTRACE_MIDDLE		0x0A04
+#define TRACE_REG_TM_FWTRACE_LAST		0x0A08
+
+#define TRACE_REG_CREATE_TM_REGISTER_LIST {	\
+	TRACE_REG_TM_TRACE_ADDR_A,		\
+	TRACE_REG_TM_TRACE_ADDR_B,		\
+	TRACE_REG_TM_TRACE_ADDR_C,		\
+	TRACE_REG_TM_TRACE_ADDR_D,		\
+	TRACE_REG_TM_TRACE_ENABLE_NPK,		\
+	TRACE_REG_TM_TRACE_ENABLE_DDR,		\
+	TRACE_REG_TM_TRACE_PER_PC,		\
+	TRACE_REG_TM_TRACE_PER_BRANCH,		\
+	TRACE_REG_TM_TRACE_HEADER,		\
+	TRACE_REG_TM_TRACE_CFG,			\
+	TRACE_REG_TM_TRACE_LOST_PACKETS,	\
+	TRACE_REG_TM_TRACE_LP_CLEAR,		\
+	TRACE_REG_TM_TRACE_LMRUN_MASK,		\
+	TRACE_REG_TM_TRACE_LMRUN_PC_LOW,	\
+	TRACE_REG_TM_TRACE_LMRUN_PC_HIGH,	\
+	TRACE_REG_TM_TRACE_MMIO_SEL,		\
+	TRACE_REG_TM_TRACE_MMIO_WP0_LOW,	\
+	TRACE_REG_TM_TRACE_MMIO_WP1_LOW,	\
+	TRACE_REG_TM_TRACE_MMIO_WP2_LOW,	\
+	TRACE_REG_TM_TRACE_MMIO_WP3_LOW,	\
+	TRACE_REG_TM_TRACE_MMIO_WP0_HIGH,	\
+	TRACE_REG_TM_TRACE_MMIO_WP1_HIGH,	\
+	TRACE_REG_TM_TRACE_MMIO_WP2_HIGH,	\
+	TRACE_REG_TM_TRACE_MMIO_WP3_HIGH,	\
+	TRACE_REG_END_MARK			\
+}
+
+/*
+ * Following exists only in (I)SP address space:
+ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST
+ */
+
+#define TRACE_REG_GPC_RESET			0x000
+#define TRACE_REG_GPC_OVERALL_ENABLE		0x004
+#define TRACE_REG_GPC_TRACE_HEADER		0x008
+#define TRACE_REG_GPC_TRACE_ADDRESS		0x00C
+#define TRACE_REG_GPC_TRACE_NPK_EN		0x010
+#define TRACE_REG_GPC_TRACE_DDR_EN		0x014
+#define TRACE_REG_GPC_TRACE_LPKT_CLEAR		0x018
+#define TRACE_REG_GPC_TRACE_LPKT		0x01C
+
+#define TRACE_REG_GPC_ENABLE_ID0		0x020
+#define TRACE_REG_GPC_ENABLE_ID1		0x024
+#define TRACE_REG_GPC_ENABLE_ID2		0x028
+#define TRACE_REG_GPC_ENABLE_ID3		0x02c
+
+#define TRACE_REG_GPC_VALUE_ID0			0x030
+#define TRACE_REG_GPC_VALUE_ID1			0x034
+#define TRACE_REG_GPC_VALUE_ID2			0x038
+#define TRACE_REG_GPC_VALUE_ID3			0x03c
+
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0	0x040
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1	0x044
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2	0x048
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3	0x04c
+
+#define TRACE_REG_GPC_CNT_START_SELECT_ID0	0x050
+#define TRACE_REG_GPC_CNT_START_SELECT_ID1	0x054
+#define TRACE_REG_GPC_CNT_START_SELECT_ID2	0x058
+#define TRACE_REG_GPC_CNT_START_SELECT_ID3	0x05c
+
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0	0x060
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1	0x064
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2	0x068
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3	0x06c
+
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0	0x070
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1	0x074
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2	0x078
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3	0x07c
+
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0	0x080
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1	0x084
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2	0x088
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3	0x08c
+
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0	0x090
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1	0x094
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2	0x098
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3	0x09c
+
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0	0x0a0
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1	0x0a4
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2	0x0a8
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3	0x0ac
+
+#define TRACE_REG_GPC_IRQ_ENABLE_ID0		0x0b0
+#define TRACE_REG_GPC_IRQ_ENABLE_ID1		0x0b4
+#define TRACE_REG_GPC_IRQ_ENABLE_ID2		0x0b8
+#define TRACE_REG_GPC_IRQ_ENABLE_ID3		0x0bc
+
+#define TRACE_REG_CREATE_GPC_REGISTER_LIST {	\
+	TRACE_REG_GPC_RESET,			\
+	TRACE_REG_GPC_OVERALL_ENABLE,		\
+	TRACE_REG_GPC_TRACE_HEADER,		\
+	TRACE_REG_GPC_TRACE_ADDRESS,		\
+	TRACE_REG_GPC_TRACE_NPK_EN,		\
+	TRACE_REG_GPC_TRACE_DDR_EN,		\
+	TRACE_REG_GPC_TRACE_LPKT_CLEAR,		\
+	TRACE_REG_GPC_TRACE_LPKT,		\
+	TRACE_REG_GPC_ENABLE_ID0,		\
+	TRACE_REG_GPC_ENABLE_ID1,		\
+	TRACE_REG_GPC_ENABLE_ID2,		\
+	TRACE_REG_GPC_ENABLE_ID3,		\
+	TRACE_REG_GPC_VALUE_ID0,		\
+	TRACE_REG_GPC_VALUE_ID1,		\
+	TRACE_REG_GPC_VALUE_ID2,		\
+	TRACE_REG_GPC_VALUE_ID3,		\
+	TRACE_REG_GPC_CNT_INPUT_SELECT_ID0,	\
+	TRACE_REG_GPC_CNT_INPUT_SELECT_ID1,	\
+	TRACE_REG_GPC_CNT_INPUT_SELECT_ID2,	\
+	TRACE_REG_GPC_CNT_INPUT_SELECT_ID3,	\
+	TRACE_REG_GPC_CNT_START_SELECT_ID0,	\
+	TRACE_REG_GPC_CNT_START_SELECT_ID1,	\
+	TRACE_REG_GPC_CNT_START_SELECT_ID2,	\
+	TRACE_REG_GPC_CNT_START_SELECT_ID3,	\
+	TRACE_REG_GPC_CNT_STOP_SELECT_ID0,	\
+	TRACE_REG_GPC_CNT_STOP_SELECT_ID1,	\
+	TRACE_REG_GPC_CNT_STOP_SELECT_ID2,	\
+	TRACE_REG_GPC_CNT_STOP_SELECT_ID3,	\
+	TRACE_REG_GPC_CNT_MSG_SELECT_ID0,	\
+	TRACE_REG_GPC_CNT_MSG_SELECT_ID1,	\
+	TRACE_REG_GPC_CNT_MSG_SELECT_ID2,	\
+	TRACE_REG_GPC_CNT_MSG_SELECT_ID3,	\
+	TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0,	\
+	TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1,	\
+	TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2,	\
+	TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3,	\
+	TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0,	\
+	TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1,	\
+	TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2,	\
+	TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3,	\
+	TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0,	\
+	TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1,	\
+	TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2,	\
+	TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3,	\
+	TRACE_REG_GPC_IRQ_ENABLE_ID0,		\
+	TRACE_REG_GPC_IRQ_ENABLE_ID1,		\
+	TRACE_REG_GPC_IRQ_ENABLE_ID2,		\
+	TRACE_REG_GPC_IRQ_ENABLE_ID3,		\
+	TRACE_REG_END_MARK			\
+}
+
+/* CSI2 legacy receiver trace registers */
+#define TRACE_REG_CSI2_TM_RESET_REG_IDX			   0x0000
+#define TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX	   0x0004
+#define TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX		   0x0008
+#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX		   0x000c
+#define TRACE_REG_CSI2_TM_TRACE_HEADER_VAL		   0xf
+#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL		   0x100218
+#define TRACE_REG_CSI2_TM_MONITOR_ID			   0x8
+
+/* 0 <= n <= 3 */
+#define TRACE_REG_CSI2_TM_TRACE_NPK_EN_REG_IDX_P(n)	(0x0010 + (n) * 4)
+#define TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(n)	(0x0020 + (n) * 4)
+#define TRACE_CSI2_TM_EVENT_FE(vc)			(BIT(0) << ((vc) * 6))
+#define TRACE_CSI2_TM_EVENT_FS(vc)			(BIT(1) << ((vc) * 6))
+#define TRACE_CSI2_TM_EVENT_PE(vc)			(BIT(2) << ((vc) * 6))
+#define TRACE_CSI2_TM_EVENT_PS(vc)			(BIT(3) << ((vc) * 6))
+#define TRACE_CSI2_TM_EVENT_LE(vc)			(BIT(4) << ((vc) * 6))
+#define TRACE_CSI2_TM_EVENT_LS(vc)			(BIT(5) << ((vc) * 6))
+
+#define TRACE_REG_CSI2_TM_TRACE_LPKT_CLEAR_REG_IDX	   0x0030
+#define TRACE_REG_CSI2_TM_TRACE_LPKT_REG_IDX		   0x0034
+
+/* 0 <= n <= 7 */
+#define TRACE_REG_CSI2_TM_ENABLE_REG_ID_N(n)		   (0x0038 + (n) * 4)
+#define TRACE_REG_CSI2_TM_VALUE_REG_ID_N(n)		   (0x0058 + (n) * 4)
+#define TRACE_REG_CSI2_TM_CNT_INPUT_SELECT_REG_ID_N(n)	   (0x0078 + (n) * 4)
+#define TRACE_REG_CSI2_TM_CNT_START_SELECT_REG_ID_N(n)	   (0x0098 + (n) * 4)
+#define TRACE_REG_CSI2_TM_CNT_STOP_SELECT_REG_ID_N(n)	   (0x00b8 + (n) * 4)
+#define TRACE_REG_CSI2_TM_IRQ_TRIGGER_VALUE_REG_ID_N(n)	   (0x00d8 + (n) * 4)
+#define TRACE_REG_CSI2_TM_IRQ_TIMER_SELECT_REG_ID_N(n)	   (0x00f8 + (n) * 4)
+#define TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_ID_N(n)	   (0x0118 + (n) * 4)
+
+/* CSI2_3PH combo receiver trace registers */
+#define TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX		   0x0000
+#define TRACE_REG_CSI2_3PH_TM_OVERALL_ENABLE_REG_IDX	   0x0004
+#define TRACE_REG_CSI2_3PH_TM_TRACE_HEADER_REG_IDX	   0x0008
+#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_REG_IDX	   0x000c
+#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL		   0x100258
+#define TRACE_REG_CSI2_3PH_TM_MONITOR_ID		   0x9
+
+/* 0 <= n <= 5 */
+#define TRACE_REG_CSI2_3PH_TM_TRACE_NPK_EN_REG_IDX_P(n)   (0x0010 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(n)   (0x0028 + (n) * 4)
+
+#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_CLEAR_REG_IDX	   0x0040
+#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_REG_IDX	   0x0044
+
+/* 0 <= n <= 7 */
+#define TRACE_REG_CSI2_3PH_TM_ENABLE_REG_ID_N(n)	     (0x0048 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_VALUE_REG_ID_N(n)		     (0x0068 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_CNT_INPUT_SELECT_REG_ID_N(n)   (0x0088 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_CNT_START_SELECT_REG_ID_N(n)   (0x00a8 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_CNT_STOP_SELECT_REG_ID_N(n)    (0x00c8 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_IRQ_TRIGGER_VALUE_REG_ID_N(n)  (0x00e8 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_IRQ_TIMER_SELECT_REG_ID_N(n)   (0x0108 + (n) * 4)
+#define TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_ID_N(n)	     (0x0128 + (n) * 4)
+
+/* SIG2CIO trace monitors */
+#define TRACE_REG_SIG2CIO_ADDRESS			   0x0000
+#define TRACE_REG_SIG2CIO_WDATA				   0x0004
+#define TRACE_REG_SIG2CIO_MASK				   0x0008
+#define TRACE_REG_SIG2CIO_GROUP_CFG			   0x000c
+#define TRACE_REG_SIG2CIO_STICKY			   0x0010
+#define TRACE_REG_SIG2CIO_RST_STICKY			   0x0014
+#define TRACE_REG_SIG2CIO_MANUAL_RST_STICKY		   0x0018
+#define TRACE_REG_SIG2CIO_STATUS			   0x001c
+/* Size of on SIG2CIO block */
+#define TRACE_REG_SIG2CIO_SIZE_OF			   0x0020
+
+struct ipu_trace;
+struct ipu_subsystem_trace_config;
+
+enum ipu_trace_block_type {
+	IPU_TRACE_BLOCK_TUN = 0,	/* Trace unit */
+	IPU_TRACE_BLOCK_TM,	/* Trace monitor */
+	IPU_TRACE_BLOCK_GPC,	/* General purpose control */
+	IPU_TRACE_CSI2,	/* CSI2 legacy receiver */
+	IPU_TRACE_CSI2_3PH,	/* CSI2 combo receiver */
+	IPU_TRACE_SIG2CIOS,
+	IPU_TRACE_TIMER_RST,	/* Trace reset control timer */
+	IPU_TRACE_BLOCK_END	/* End of list */
+};
+
+struct ipu_trace_block {
+	u32 offset;	/* Offset to block inside subsystem */
+	enum ipu_trace_block_type type;
+};
+
+int ipu_trace_add(struct ipu_device *isp);
+int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir);
+void ipu_trace_release(struct ipu_device *isp);
+int ipu_trace_init(struct ipu_device *isp, void __iomem *base,
+		   struct device *dev, struct ipu_trace_block *blocks);
+void ipu_trace_restore(struct device *dev);
+void ipu_trace_uninit(struct device *dev);
+void ipu_trace_stop(struct device *dev);
+#endif
diff --git a/drivers/media/pci/intel/ipu.c b/drivers/media/pci/intel/ipu.c
new file mode 100644
index 000000000000..18b1ba01ebd6
--- /dev/null
+++ b/drivers/media/pci/intel/ipu.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-platform.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-cpd.h"
+#include "ipu-pdata.h"
+#include "ipu-bus.h"
+#include "ipu-mmu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu-trace.h"
+
+#define IPU_PCI_BAR		0
+enum ipu_version ipu_ver;
+EXPORT_SYMBOL(ipu_ver);
+
+static struct ipu_bus_device *ipu_isys_init(struct pci_dev *pdev,
+					    struct device *parent,
+					    struct ipu_buttress_ctrl *ctrl,
+					    void __iomem *base,
+					    const struct ipu_isys_internal_pdata
+					    *ipdata,
+					    unsigned int nr)
+{
+	struct ipu_bus_device *isys;
+	struct ipu_isys_pdata *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	pdata->ipdata = ipdata;
+
+	isys = ipu_bus_add_device(pdev, parent, pdata, ctrl,
+				  IPU_ISYS_NAME, nr);
+	if (IS_ERR(isys))
+		return ERR_PTR(-ENOMEM);
+
+	isys->mmu = ipu_mmu_init(&pdev->dev, base, ISYS_MMID,
+				 &ipdata->hw_variant);
+	if (IS_ERR(isys->mmu))
+		return ERR_PTR(-ENOMEM);
+
+	isys->mmu->dev = &isys->dev;
+
+	return isys;
+}
+
+static struct ipu_bus_device *ipu_psys_init(struct pci_dev *pdev,
+					    struct device *parent,
+					    struct ipu_buttress_ctrl *ctrl,
+					    void __iomem *base,
+					    const struct ipu_psys_internal_pdata
+					    *ipdata, unsigned int nr)
+{
+	struct ipu_bus_device *psys;
+	struct ipu_psys_pdata *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	pdata->ipdata = ipdata;
+
+	psys = ipu_bus_add_device(pdev, parent, pdata, ctrl,
+				  IPU_PSYS_NAME, nr);
+	if (IS_ERR(psys))
+		return ERR_PTR(-ENOMEM);
+
+	psys->mmu = ipu_mmu_init(&pdev->dev, base, PSYS_MMID,
+				 &ipdata->hw_variant);
+	if (IS_ERR(psys->mmu))
+		return ERR_PTR(-ENOMEM);
+
+	psys->mmu->dev = &psys->dev;
+
+	return psys;
+}
+
+int ipu_fw_authenticate(void *data, u64 val)
+{
+	struct ipu_device *isp = data;
+	int ret;
+
+	if (!isp->secure_mode)
+		return -EINVAL;
+
+	ret = ipu_buttress_reset_authentication(isp);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Failed to reset authentication!\n");
+		return ret;
+	}
+
+	ret = pm_runtime_get_sync(&isp->psys->dev);
+	if (ret < 0) {
+		dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret);
+		return ret;
+	}
+
+	ret = ipu_buttress_authenticate(isp);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "FW authentication failed\n");
+		return ret;
+	}
+
+	pm_runtime_put(&isp->psys->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipu_fw_authenticate);
+DEFINE_SIMPLE_ATTRIBUTE(authenticate_fops, NULL, ipu_fw_authenticate, "%llu\n");
+
+#ifdef CONFIG_DEBUG_FS
+static int resume_ipu_bus_device(struct ipu_bus_device *adev)
+{
+	struct device *dev = &adev->dev;
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (!pm || !pm->resume)
+		return -EIO;
+
+	return pm->resume(dev);
+}
+
+static int suspend_ipu_bus_device(struct ipu_bus_device *adev)
+{
+	struct device *dev = &adev->dev;
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (!pm || !pm->suspend)
+		return -EIO;
+
+	return pm->suspend(dev);
+}
+
+static int force_suspend_get(void *data, u64 *val)
+{
+	struct ipu_device *isp = data;
+	struct ipu_buttress *b = &isp->buttress;
+
+	*val = b->force_suspend;
+	return 0;
+}
+
+static int force_suspend_set(void *data, u64 val)
+{
+	struct ipu_device *isp = data;
+	struct ipu_buttress *b = &isp->buttress;
+	int ret = 0;
+
+	if (val == b->force_suspend)
+		return 0;
+
+	if (val) {
+		b->force_suspend = 1;
+		ret = suspend_ipu_bus_device(isp->psys);
+		if (ret) {
+			dev_err(&isp->pdev->dev, "Failed to suspend psys\n");
+			return ret;
+		}
+		ret = suspend_ipu_bus_device(isp->isys);
+		if (ret) {
+			dev_err(&isp->pdev->dev, "Failed to suspend isys\n");
+			return ret;
+		}
+		ret = pci_set_power_state(isp->pdev, PCI_D3hot);
+		if (ret) {
+			dev_err(&isp->pdev->dev,
+				"Failed to suspend IUnit PCI device\n");
+			return ret;
+		}
+	} else {
+		ret = pci_set_power_state(isp->pdev, PCI_D0);
+		if (ret) {
+			dev_err(&isp->pdev->dev,
+				"Failed to suspend IUnit PCI device\n");
+			return ret;
+		}
+		ret = resume_ipu_bus_device(isp->isys);
+		if (ret) {
+			dev_err(&isp->pdev->dev, "Failed to resume isys\n");
+			return ret;
+		}
+		ret = resume_ipu_bus_device(isp->psys);
+		if (ret) {
+			dev_err(&isp->pdev->dev, "Failed to resume psys\n");
+			return ret;
+		}
+		b->force_suspend = 0;
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(force_suspend_fops, force_suspend_get,
+			force_suspend_set, "%llu\n");
+/*
+ * The sysfs interface for reloading cpd fw is there only for debug purpose,
+ * and it must not be used when either isys or psys is in use.
+ */
+static int cpd_fw_reload(void *data, u64 val)
+{
+	struct ipu_device *isp = data;
+	int rval = -EINVAL;
+
+	if (isp->cpd_fw_reload)
+		rval = isp->cpd_fw_reload(isp);
+
+	return rval;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpd_fw_fops, NULL, cpd_fw_reload, "%llu\n");
+
+static int ipu_init_debugfs(struct ipu_device *isp)
+{
+	struct dentry *file;
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(pci_name(isp->pdev), NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	file = debugfs_create_file("force_suspend", 0700, dir, isp,
+				   &force_suspend_fops);
+	if (!file)
+		goto err;
+	file = debugfs_create_file("authenticate", 0700, dir, isp,
+				   &authenticate_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("cpd_fw_reload", 0700, dir, isp,
+				   &cpd_fw_fops);
+	if (!file)
+		goto err;
+
+	if (ipu_trace_debugfs_add(isp, dir))
+		goto err;
+
+	isp->ipu_dir = dir;
+
+	if (ipu_buttress_debugfs_init(isp))
+		goto err;
+
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+
+static void ipu_remove_debugfs(struct ipu_device *isp)
+{
+	/*
+	 * Since isys and psys debugfs dir will be created under ipu root dir,
+	 * mark its dentry to NULL to avoid duplicate removal.
+	 */
+	debugfs_remove_recursive(isp->ipu_dir);
+	isp->ipu_dir = NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int ipu_pci_config_setup(struct pci_dev *dev)
+{
+	u16 pci_command;
+	int rval = pci_enable_msi(dev);
+
+	if (rval) {
+		dev_err(&dev->dev, "Failed to enable msi (%d)\n", rval);
+		return rval;
+	}
+
+	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+	pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+	    PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(dev, PCI_COMMAND, pci_command);
+
+	return 0;
+}
+
+static void ipu_configure_vc_mechanism(struct ipu_device *isp)
+{
+	u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+
+	if (IPU_BTRS_ARB_STALL_MODE_VC0 == IPU_BTRS_ARB_MODE_TYPE_STALL)
+		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+	else
+		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+
+	if (IPU_BTRS_ARB_STALL_MODE_VC1 == IPU_BTRS_ARB_MODE_TYPE_STALL)
+		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+	else
+		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+
+	writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
+}
+
+int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+		   struct device *device)
+{
+	const struct firmware *fw;
+	struct firmware *tmp;
+	int ret;
+
+	ret = request_firmware(&fw, name, device);
+	if (ret)
+		return ret;
+
+	if (is_vmalloc_addr(fw->data)) {
+		*firmware_p = fw;
+	} else {
+		tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+		if (!tmp) {
+			release_firmware(fw);
+			return -ENOMEM;
+		}
+		tmp->size = fw->size;
+		tmp->data = vmalloc(fw->size);
+		if (!tmp->data) {
+			kfree(tmp);
+			release_firmware(fw);
+			return -ENOMEM;
+		}
+		memcpy((void *)tmp->data, fw->data, fw->size);
+		*firmware_p = tmp;
+		release_firmware(fw);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(request_cpd_fw);
+
+static int ipu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct ipu_device *isp;
+	phys_addr_t phys;
+	void __iomem *const *iomap;
+	void __iomem *isys_base = NULL;
+	void __iomem *psys_base = NULL;
+	struct ipu_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
+	unsigned int dma_mask = IPU_DMA_MASK;
+	int rval;
+
+	isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+	if (!isp)
+		return -ENOMEM;
+
+	dev_set_name(&pdev->dev, "intel-ipu");
+	isp->pdev = pdev;
+	INIT_LIST_HEAD(&isp->devices);
+
+	rval = pcim_enable_device(pdev);
+	if (rval) {
+		dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n",
+			rval);
+		return rval;
+	}
+
+	dev_info(&pdev->dev, "Device 0x%x (rev: 0x%x)\n",
+		 pdev->device, pdev->revision);
+
+	phys = pci_resource_start(pdev, IPU_PCI_BAR);
+
+	rval = pcim_iomap_regions(pdev,
+				  1 << IPU_PCI_BAR,
+				  pci_name(pdev));
+	if (rval) {
+		dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n",
+			rval);
+		return rval;
+	}
+	dev_info(&pdev->dev, "physical base address 0x%llx\n", phys);
+
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap) {
+		dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval);
+		return -ENODEV;
+	}
+
+	isp->base = iomap[IPU_PCI_BAR];
+	dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base);
+
+	pci_set_drvdata(pdev, isp);
+	pci_set_master(pdev);
+
+	switch (id->device) {
+	case IPU6_PCI_ID:
+		ipu_ver = IPU_VER_6;
+		isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
+		break;
+	case IPU6SE_PCI_ID:
+		ipu_ver = IPU_VER_6SE;
+		isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
+		break;
+	default:
+		WARN(1, "Unsupported IPU device");
+		return -ENODEV;
+	}
+
+	ipu_internal_pdata_init();
+
+	isys_base = isp->base + isys_ipdata.hw_variant.offset;
+	psys_base = isp->base + psys_ipdata.hw_variant.offset;
+
+	dev_dbg(&pdev->dev, "isys_base: 0x%lx\n", (unsigned long)isys_base);
+	dev_dbg(&pdev->dev, "psys_base: 0x%lx\n", (unsigned long)psys_base);
+
+	rval = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+	if (!rval)
+		rval = pci_set_consistent_dma_mask(pdev,
+						   DMA_BIT_MASK(dma_mask));
+	if (rval) {
+		dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval);
+		return rval;
+	}
+
+	rval = ipu_pci_config_setup(pdev);
+	if (rval)
+		return rval;
+
+	rval = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+					 ipu_buttress_isr,
+					 ipu_buttress_isr_threaded,
+					 IRQF_SHARED, IPU_NAME, isp);
+	if (rval) {
+		dev_err(&pdev->dev, "Requesting irq failed(%d)\n", rval);
+		return rval;
+	}
+
+	rval = ipu_buttress_init(isp);
+	if (rval)
+		return rval;
+
+	dev_info(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name);
+
+	rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, &pdev->dev);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Requesting signed firmware failed\n");
+		return rval;
+	}
+
+	rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+					 isp->cpd_fw->size);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Failed to validate cpd\n");
+		goto out_ipu_bus_del_devices;
+	}
+
+	rval = ipu_trace_add(isp);
+	if (rval)
+		dev_err(&pdev->dev, "Trace support not available\n");
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	/*
+	 * NOTE Device hierarchy below is important to ensure proper
+	 * runtime suspend and resume order.
+	 * Also registration order is important to ensure proper
+	 * suspend and resume order during system
+	 * suspend. Registration order is as follows:
+	 * isys->psys
+	 */
+	isys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*isys_ctrl), GFP_KERNEL);
+	if (!isys_ctrl) {
+		rval = -ENOMEM;
+		goto out_ipu_bus_del_devices;
+	}
+
+	/* Init butress control with default values based on the HW */
+	memcpy(isys_ctrl, &isys_buttress_ctrl, sizeof(*isys_ctrl));
+
+	isp->isys = ipu_isys_init(pdev, &pdev->dev,
+				  isys_ctrl, isys_base,
+				  &isys_ipdata,
+				  0);
+	if (IS_ERR(isp->isys)) {
+		rval = PTR_ERR(isp->isys);
+		goto out_ipu_bus_del_devices;
+	}
+
+	psys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*psys_ctrl), GFP_KERNEL);
+	if (!psys_ctrl) {
+		rval = -ENOMEM;
+		goto out_ipu_bus_del_devices;
+	}
+
+	/* Init butress control with default values based on the HW */
+	memcpy(psys_ctrl, &psys_buttress_ctrl, sizeof(*psys_ctrl));
+
+	isp->psys = ipu_psys_init(pdev, &isp->isys->dev,
+				  psys_ctrl, psys_base,
+				  &psys_ipdata, 0);
+	if (IS_ERR(isp->psys)) {
+		rval = PTR_ERR(isp->psys);
+		goto out_ipu_bus_del_devices;
+	}
+
+	rval = pm_runtime_get_sync(&isp->psys->dev);
+	if (rval < 0) {
+		dev_err(&isp->psys->dev, "Failed to get runtime PM\n");
+		goto out_ipu_bus_del_devices;
+	}
+
+	rval = ipu_mmu_hw_init(isp->psys->mmu);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "Failed to set mmu hw\n");
+		goto out_ipu_bus_del_devices;
+	}
+
+	rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw,
+					 &isp->fw_sgt);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "failed to map fw image\n");
+		goto out_ipu_bus_del_devices;
+	}
+
+	isp->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
+					      isp->cpd_fw->data,
+					      sg_dma_address(isp->fw_sgt.sgl),
+					      &isp->pkg_dir_dma_addr,
+					      &isp->pkg_dir_size);
+	if (!isp->pkg_dir) {
+		rval = -ENOMEM;
+		dev_err(&isp->pdev->dev, "failed to create pkg dir\n");
+		goto out_ipu_bus_del_devices;
+	}
+
+	rval = ipu_buttress_authenticate(isp);
+	if (rval) {
+		dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n",
+			rval);
+		goto out_ipu_bus_del_devices;
+	}
+
+	ipu_mmu_hw_cleanup(isp->psys->mmu);
+	pm_runtime_put(&isp->psys->dev);
+
+#ifdef CONFIG_DEBUG_FS
+	rval = ipu_init_debugfs(isp);
+	if (rval) {
+		dev_err(&pdev->dev, "Failed to initialize debugfs");
+		goto out_ipu_bus_del_devices;
+	}
+#endif
+
+	/* Configure the arbitration mechanisms for VC requests */
+	ipu_configure_vc_mechanism(isp);
+
+	dev_info(&pdev->dev, "IPU driver version %d.%d\n", IPU_MAJOR_VERSION,
+		 IPU_MINOR_VERSION);
+
+	return 0;
+
+out_ipu_bus_del_devices:
+	if (isp->pkg_dir) {
+		ipu_cpd_free_pkg_dir(isp->psys, isp->pkg_dir,
+				     isp->pkg_dir_dma_addr,
+				     isp->pkg_dir_size);
+		ipu_buttress_unmap_fw_image(isp->psys, &isp->fw_sgt);
+		isp->pkg_dir = NULL;
+	}
+	if (isp->psys && isp->psys->mmu)
+		ipu_mmu_cleanup(isp->psys->mmu);
+	if (isp->isys && isp->isys->mmu)
+		ipu_mmu_cleanup(isp->isys->mmu);
+	if (isp->psys)
+		pm_runtime_put(&isp->psys->dev);
+	ipu_bus_del_devices(pdev);
+	ipu_buttress_exit(isp);
+	release_firmware(isp->cpd_fw);
+
+	return rval;
+}
+
+static void ipu_pci_remove(struct pci_dev *pdev)
+{
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+	ipu_remove_debugfs(isp);
+#endif
+	ipu_trace_release(isp);
+
+	ipu_bus_del_devices(pdev);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	ipu_buttress_exit(isp);
+
+	release_firmware(isp->cpd_fw);
+
+	ipu_mmu_cleanup(isp->psys->mmu);
+	ipu_mmu_cleanup(isp->isys->mmu);
+}
+
+static void ipu_pci_reset_prepare(struct pci_dev *pdev)
+{
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+
+	dev_warn(&pdev->dev, "FLR prepare\n");
+	pm_runtime_forbid(&isp->pdev->dev);
+	isp->flr_done = true;
+}
+
+static void ipu_pci_reset_done(struct pci_dev *pdev)
+{
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+
+	ipu_buttress_restore(isp);
+	if (isp->secure_mode)
+		ipu_buttress_reset_authentication(isp);
+
+	ipu_bus_flr_recovery();
+	isp->ipc_reinit = true;
+	pm_runtime_allow(&isp->pdev->dev);
+
+	dev_warn(&pdev->dev, "FLR completed\n");
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * PCI base driver code requires driver to provide these to enable
+ * PCI device level PM state transitions (D0<->D3)
+ */
+static int ipu_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+
+	isp->flr_done = false;
+
+	return 0;
+}
+
+static int ipu_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+	struct ipu_buttress *b = &isp->buttress;
+	int rval;
+
+	/* Configure the arbitration mechanisms for VC requests */
+	ipu_configure_vc_mechanism(isp);
+
+	ipu_buttress_set_secure_mode(isp);
+	isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+	dev_info(dev, "IPU in %s mode\n",
+		 isp->secure_mode ? "secure" : "non-secure");
+
+	ipu_buttress_restore(isp);
+
+	rval = ipu_buttress_ipc_reset(isp, &b->cse);
+	if (rval)
+		dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
+
+	rval = pm_runtime_get_sync(&isp->psys->dev);
+	if (rval < 0) {
+		dev_err(&isp->psys->dev, "Failed to get runtime PM\n");
+		return 0;
+	}
+
+	rval = ipu_buttress_authenticate(isp);
+	if (rval)
+		dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n",
+			rval);
+
+	pm_runtime_put(&isp->psys->dev);
+
+	return 0;
+}
+
+static int ipu_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ipu_device *isp = pci_get_drvdata(pdev);
+	int rval;
+
+	ipu_configure_vc_mechanism(isp);
+	ipu_buttress_restore(isp);
+
+	if (isp->ipc_reinit) {
+		struct ipu_buttress *b = &isp->buttress;
+
+		isp->ipc_reinit = false;
+		rval = ipu_buttress_ipc_reset(isp, &b->cse);
+		if (rval)
+			dev_err(&isp->pdev->dev,
+				"IPC reset protocol failed!\n");
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops ipu_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(&ipu_suspend, &ipu_resume)
+	    SET_RUNTIME_PM_OPS(&ipu_suspend,	/* Same as in suspend flow */
+			       &ipu_runtime_resume,
+			       NULL)
+};
+
+#define IPU_PM (&ipu_pm_ops)
+#else
+#define IPU_PM NULL
+#endif
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+static const struct pci_error_handlers pci_err_handlers = {
+	.reset_prepare = ipu_pci_reset_prepare,
+	.reset_done = ipu_pci_reset_done,
+};
+
+static struct pci_driver ipu_pci_driver = {
+	.name = IPU_NAME,
+	.id_table = ipu_pci_tbl,
+	.probe = ipu_pci_probe,
+	.remove = ipu_pci_remove,
+	.driver = {
+		   .pm = IPU_PM,
+		   },
+	.err_handler = &pci_err_handlers,
+};
+
+static int __init ipu_init(void)
+{
+	int rval = ipu_bus_register();
+
+	if (rval) {
+		pr_warn("can't register ipu bus (%d)\n", rval);
+		return rval;
+	}
+
+	rval = pci_register_driver(&ipu_pci_driver);
+	if (rval) {
+		pr_warn("can't register pci driver (%d)\n", rval);
+		goto out_pci_register_driver;
+	}
+
+	return 0;
+
+out_pci_register_driver:
+	ipu_bus_unregister();
+
+	return rval;
+}
+
+static void __exit ipu_exit(void)
+{
+	pci_unregister_driver(&ipu_pci_driver);
+	ipu_bus_unregister();
+}
+
+module_init(ipu_init);
+module_exit(ipu_exit);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus at linux.intel.com>");
+MODULE_AUTHOR("Jouni Högander <jouni.hogander at intel.com>");
+MODULE_AUTHOR("Antti Laakso <antti.laakso at intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo at intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng at intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu at intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu at intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao at intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding at intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang at intel.com>");
+MODULE_AUTHOR("Leifu Zhao <leifu.zhao at intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu at intel.com>");
+MODULE_AUTHOR("Kun Jiang <kun.jiang at intel.com>");
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu pci driver");
diff --git a/drivers/media/pci/intel/ipu.h b/drivers/media/pci/intel/ipu.h
new file mode 100644
index 000000000000..67b0561105e2
--- /dev/null
+++ b/drivers/media/pci/intel/ipu.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_H
+#define IPU_H
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <uapi/linux/media.h>
+#include <linux/version.h>
+
+#include "ipu-pdata.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-trace.h"
+
+#define IPU6_PCI_ID	0x9a19
+#define IPU6SE_PCI_ID	0x4e19
+
+enum ipu_version {
+	IPU_VER_INVALID = 0,
+	IPU_VER_6,
+	IPU_VER_6SE,
+};
+
+/*
+ * IPU version definitions to reflect the IPU driver changes.
+ * Both ISYS and PSYS share the same version.
+ */
+#define IPU_MAJOR_VERSION 1
+#define IPU_MINOR_VERSION 0
+#define IPU_DRIVER_VERSION (IPU_MAJOR_VERSION << 16 | IPU_MINOR_VERSION)
+
+/* processing system frequency: 25Mhz x ratio, Legal values [8,32] */
+#define PS_FREQ_CTL_DEFAULT_RATIO	0x12
+
+/* input system frequency: 1600Mhz / divisor. Legal values [2,8] */
+#define IS_FREQ_SOURCE			1600000000
+#define IS_FREQ_CTL_DIVISOR		0x4
+
+/*
+ * ISYS DMA can overshoot. For higher resolutions over allocation is one line
+ * but it must be at minimum 1024 bytes. Value could be different in
+ * different versions / generations thus provide it via platform data.
+ */
+#define IPU_ISYS_OVERALLOC_MIN		1024
+
+/*
+ * Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others.
+ */
+#define IPU_DEVICE_GDA_NR_PAGES		128
+
+/*
+ * Virtualization factor to calculate the available virtual pages.
+ */
+#define IPU_DEVICE_GDA_VIRT_FACTOR	32
+
+struct pci_dev;
+struct list_head;
+struct firmware;
+
+#define NR_OF_MMU_RESOURCES			2
+
+struct ipu_device {
+	struct pci_dev *pdev;
+	struct list_head devices;
+	struct ipu_bus_device *isys;
+	struct ipu_bus_device *psys;
+	struct ipu_buttress buttress;
+
+	const struct firmware *cpd_fw;
+	const char *cpd_fw_name;
+	u64 *pkg_dir;
+	dma_addr_t pkg_dir_dma_addr;
+	unsigned int pkg_dir_size;
+	struct sg_table fw_sgt;
+
+	void __iomem *base;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *ipu_dir;
+#endif
+	struct ipu_trace *trace;
+	bool flr_done;
+	bool ipc_reinit;
+	bool secure_mode;
+
+	int (*cpd_fw_reload)(struct ipu_device *isp);
+};
+
+#define IPU_DMA_MASK	39
+#define IPU_LIB_CALL_TIMEOUT_MS		2000
+#define IPU_PSYS_CMD_TIMEOUT_MS	2000
+#define IPU_PSYS_OPEN_TIMEOUT_US	   50
+#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US)
+
+int ipu_fw_authenticate(void *data, u64 val);
+void ipu_configure_spc(struct ipu_device *isp,
+		       const struct ipu_hw_variants *hw_variant,
+		       int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+		       dma_addr_t pkg_dir_dma_addr);
+int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+		   struct device *device);
+extern enum ipu_version ipu_ver;
+void ipu_internal_pdata_init(void);
+
+#endif /* IPU_H */
diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile
new file mode 100644
index 000000000000..0c45215ad991
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/Makefile
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2017 - 2020 Intel Corporation.
+
+ifneq ($(EXTERNAL_BUILD), 1)
+srcpath := $(srctree)
+endif
+
+ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DIPU_TPG_FRAME_SYNC -DIPU_PSYS_GPC \
+		-DIPU_ISYS_GPC
+
+intel-ipu6-objs				+= ../ipu.o \
+					   ../ipu-bus.o \
+					   ../ipu-dma.o \
+					   ../ipu-mmu.o \
+					   ../ipu-buttress.o \
+					   ../ipu-trace.o \
+					   ../ipu-cpd.o \
+					   ipu6.o \
+					   ../ipu-fw-com.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU)		+= intel-ipu6.o
+
+intel-ipu6-isys-objs			+= ../ipu-isys.o \
+					   ../ipu-isys-csi2.o \
+					   ipu6-isys.o \
+					   ipu6-isys-phy.o \
+					   ipu6-isys-csi2.o \
+					   ipu6-isys-gpc.o \
+					   ../ipu-isys-csi2-be-soc.o \
+					   ../ipu-isys-csi2-be.o \
+					   ../ipu-fw-isys.o \
+					   ../ipu-isys-video.o \
+					   ../ipu-isys-queue.o \
+					   ../ipu-isys-subdev.o \
+					   ../ipu-isys-tpg.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU)		+= intel-ipu6-isys.o
+
+intel-ipu6-psys-objs			+= ../ipu-psys.o \
+					   ipu6-psys.o \
+					   ipu-resources.o \
+					   ipu6-psys-gpc.o \
+					   ipu6-l-scheduler.o \
+					   ipu6-ppg.o
+
+intel-ipu6-psys-objs			+= ipu-fw-resources.o \
+					   ipu6-fw-resources.o \
+					   ipu6se-fw-resources.o \
+					   ../ipu-fw-psys.o
+
+ifeq ($(CONFIG_COMPAT),y)
+intel-ipu6-psys-objs			+= ../ipu-psys-compat32.o
+endif
+
+obj-$(CONFIG_VIDEO_INTEL_IPU)		+= intel-ipu6-psys.o
+
+ccflags-y += -I$(srcpath)/$(src)/../../../../../include/
+ccflags-y += -I$(srcpath)/$(src)/../
+ccflags-y += -I$(srcpath)/$(src)/
diff --git a/drivers/media/pci/intel/ipu6/ipu-fw-resources.c b/drivers/media/pci/intel/ipu6/ipu-fw-resources.c
new file mode 100644
index 000000000000..ab663ead07ad
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-fw-resources.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2019 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6-platform-resources.h"
+#include "ipu6se-platform-resources.h"
+
+/********** Generic resource handling **********/
+
+/*
+ * Extension library gives byte offsets to its internal structures.
+ * use those offsets to update fields. Without extension lib access
+ * structures directly.
+ */
+const struct ipu6_psys_hw_res_variant *var = &hw_var;
+
+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index,
+				    u8 value)
+{
+	struct ipu_fw_psys_process_group *parent =
+		(struct ipu_fw_psys_process_group *)((char *)ptr +
+						      ptr->parent_offset);
+
+	ptr->cells[index] = value;
+	parent->resource_bitmap |= 1 << value;
+
+	return 0;
+}
+
+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index)
+{
+	return ptr->cells[index];
+}
+
+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr)
+{
+	struct ipu_fw_psys_process_group *parent;
+	u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0);
+	int retval = -1;
+	u8 value;
+
+	parent = (struct ipu_fw_psys_process_group *)((char *)ptr +
+						       ptr->parent_offset);
+
+	value = var->cell_num;
+	if ((1 << cell_id) != 0 &&
+	    ((1 << cell_id) & parent->resource_bitmap)) {
+		ipu_fw_psys_set_process_cell_id(ptr, 0, value);
+		parent->resource_bitmap &= ~(1 << cell_id);
+		retval = 0;
+	}
+
+	return retval;
+}
+
+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				 u16 value)
+{
+	if (var->set_proc_dev_chn)
+		return var->set_proc_dev_chn(ptr, offset, value);
+
+	WARN(1, "ipu6 psys res var is not initialised correctly.");
+	return 0;
+}
+
+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				    u16 id, u32 bitmap,
+				    u32 active_bitmap)
+{
+	if (var->set_proc_dfm_bitmap)
+		return var->set_proc_dfm_bitmap(ptr, id, bitmap,
+						active_bitmap);
+
+	WARN(1, "ipu6 psys res var is not initialised correctly.");
+	return 0;
+}
+
+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				    u16 type_id, u16 mem_id, u16 offset)
+{
+	if (var->set_proc_ext_mem)
+		return var->set_proc_ext_mem(ptr, type_id, mem_id, offset);
+
+	WARN(1, "ipu6 psys res var is not initialised correctly.");
+	return 0;
+}
+
+int ipu_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process)
+{
+	if (var->get_pgm_by_proc)
+		return var->get_pgm_by_proc(gen_pm, pg_manifest, process);
+
+	WARN(1, "ipu6 psys res var is not initialised correctly.");
+	return 0;
+}
+
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-buttress-regs.h b/drivers/media/pci/intel/ipu6/ipu-platform-buttress-regs.h
new file mode 100644
index 000000000000..e91524c8e7f7
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-buttress-regs.h
@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_BUTTRESS_REGS_H
+#define IPU_PLATFORM_BUTTRESS_REGS_H
+
+/* IS_WORKPOINT_REQ */
+#define IPU_BUTTRESS_REG_IS_FREQ_CTL		0x34
+/* PS_WORKPOINT_REQ */
+#define IPU_BUTTRESS_REG_PS_FREQ_CTL		0x38
+
+#define IPU_BUTTRESS_IS_FREQ_RATIO_MASK	0xff
+#define IPU_BUTTRESS_PS_FREQ_RATIO_MASK	0xff
+
+#define IPU_IS_FREQ_MAX		533
+#define IPU_IS_FREQ_MIN		200
+#define IPU_PS_FREQ_MAX		450
+#define IPU_IS_FREQ_RATIO_BASE		25
+#define IPU_PS_FREQ_RATIO_BASE		25
+#define IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK	0xff
+#define IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK	0xff
+
+/* should be tuned for real silicon */
+#define IPU_IS_FREQ_CTL_DEFAULT_RATIO	0x08
+#define IPU_PS_FREQ_CTL_DEFAULT_RATIO	0x10
+
+#define IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO	0x10
+#define IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO	0x0708
+
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT	3
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK	\
+	(0x3 << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT	6
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK	\
+	(0x3 << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_DN_DONE		0x0
+#define IPU_BUTTRESS_PWR_STATE_UP_PROCESS	0x1
+#define IPU_BUTTRESS_PWR_STATE_DN_PROCESS	0x2
+#define IPU_BUTTRESS_PWR_STATE_UP_DONE		0x3
+
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_0	0x270
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_1	0x274
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_2	0x278
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_3	0x27c
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_4	0x280
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_5	0x284
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_6	0x288
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_7	0x28c
+
+#define BUTTRESS_REG_WDT			0x8
+#define BUTTRESS_REG_BTRS_CTRL			0xc
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0	BIT(0)
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1	BIT(1)
+
+#define BUTTRESS_REG_FW_RESET_CTL	0x30
+#define BUTTRESS_FW_RESET_CTL_START	BIT(0)
+#define BUTTRESS_FW_RESET_CTL_DONE	BIT(1)
+
+#define BUTTRESS_REG_IS_FREQ_CTL	0x34
+
+#define BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK	0xf
+
+#define BUTTRESS_REG_PS_FREQ_CTL	0x38
+
+#define BUTTRESS_PS_FREQ_CTL_RATIO_MASK		0xff
+
+#define BUTTRESS_FREQ_CTL_START		BIT(31)
+#define BUTTRESS_FREQ_CTL_START_SHIFT		31
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT	8
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK	(0xff << 8)
+
+#define BUTTRESS_REG_PWR_STATE	0x5c
+
+#define BUTTRESS_PWR_STATE_IS_PWR_SHIFT	3
+#define BUTTRESS_PWR_STATE_IS_PWR_MASK	(0x3 << 3)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_SHIFT	6
+#define BUTTRESS_PWR_STATE_PS_PWR_MASK	(0x3 << 6)
+
+#define BUTTRESS_PWR_STATE_RESET		0x0
+#define BUTTRESS_PWR_STATE_PWR_ON_DONE		0x1
+#define BUTTRESS_PWR_STATE_PWR_RDY		0x3
+#define BUTTRESS_PWR_STATE_PWR_IDLE		0x4
+
+#define BUTTRESS_PWR_STATE_HH_STATUS_SHIFT	11
+#define BUTTRESS_PWR_STATE_HH_STATUS_MASK	(0x3 << 11)
+
+enum {
+	BUTTRESS_PWR_STATE_HH_STATE_IDLE,
+	BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS,
+	BUTTRESS_PWR_STATE_HH_STATE_DONE,
+	BUTTRESS_PWR_STATE_HH_STATE_ERR,
+};
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT	19
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK	(0xf << 19)
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE			0x0
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP		0x1
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK		0x2
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK		0x3
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES		0x4
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1		0x5
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2		0x6
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES	0x7
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP	0x8
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT			0x9
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY			0xa
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED		0xb
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3		0xc
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD		0xd
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT		0xe
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0		0xf
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT	24
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK	(0x1f << 24)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE			0x0
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY	0x1
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH	0x2
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD	0x3
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH	0x4
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO		0x5
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP		0x6
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK		0x7
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES		0x8
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1		0x9
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2		0xa
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES	0xb
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT		0xc
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT		0xd
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP			0xf
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED		0x10
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3		0x11
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK		0x12
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND		0x13
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4		0x14
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP		0x15
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK		0x16
+
+#define BUTTRESS_REG_SECURITY_CTL	0x300
+
+#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE	BIT(16)
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_SHIFT		0
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK		0x1f
+
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE		0x1
+#define BUTTRESS_SECURITY_CTL_AUTH_DONE			0x2
+#define BUTTRESS_SECURITY_CTL_AUTH_FAILED			0x8
+
+#define BUTTRESS_REG_SENSOR_FREQ_CTL	0x16c
+
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(i) \
+					(0x1b << ((i) * 10))
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(i)	((i) * 10)
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(i) \
+					(0x1ff << ((i) * 10))
+
+#define BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ	0x176
+#define BUTTRESS_SENSOR_CLK_FREQ_8MHZ		0x164
+#define BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ		0x2
+#define BUTTRESS_SENSOR_CLK_FREQ_12MHZ		0x1b2
+#define BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ	0x1ac
+#define BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ	0x1cc
+#define BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ	0x1a6
+#define BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ	0xca
+#define BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ	0x12e
+#define BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ	0x1c0
+#define BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ	0x0
+#define BUTTRESS_SENSOR_CLK_FREQ_24MHZ		0xb2
+#define BUTTRESS_SENSOR_CLK_FREQ_26MHZ		0xae
+#define BUTTRESS_SENSOR_CLK_FREQ_27MHZ		0x196
+
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FB_RATIO_MASK		0xff
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_SHIFT		8
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_MASK		(0x2 << 8)
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_SHIFT		10
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_MASK		(0x2 << 10)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FORCE_OFF_SHIFT		12
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_SHIFT		14
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_MASK		(0x2 << 14)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_SHIFT		16
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_MASK		(0x2 << 16)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_SHIFT	18
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_MASK	(0x2 << 18)
+#define BUTTRESS_SENSOR_FREQ_CTL_START_SHIFT			31
+
+#define BUTTRESS_REG_SENSOR_CLK_CTL	0x170
+
+/* 0 <= i <= 2 */
+#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i)		((i) * 2)
+#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(i)	((i) * 2 + 1)
+
+#define BUTTRESS_REG_FW_SOURCE_BASE_LO	0x78
+#define BUTTRESS_REG_FW_SOURCE_BASE_HI	0x7C
+#define BUTTRESS_REG_FW_SOURCE_SIZE	0x80
+
+#define BUTTRESS_REG_ISR_STATUS		0x90
+#define BUTTRESS_REG_ISR_ENABLED_STATUS	0x94
+#define BUTTRESS_REG_ISR_ENABLE		0x98
+#define BUTTRESS_REG_ISR_CLEAR		0x9C
+
+#define BUTTRESS_ISR_IS_IRQ			BIT(0)
+#define BUTTRESS_ISR_PS_IRQ			BIT(1)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE	BIT(2)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH	BIT(3)
+#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING	BIT(4)
+#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING	BIT(5)
+#define BUTTRESS_ISR_CSE_CSR_SET		BIT(6)
+#define BUTTRESS_ISR_ISH_CSR_SET		BIT(7)
+#define BUTTRESS_ISR_SPURIOUS_CMP		BIT(8)
+#define BUTTRESS_ISR_WATCHDOG_EXPIRED		BIT(9)
+#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ		BIT(10)
+#define BUTTRESS_ISR_SAI_VIOLATION		BIT(11)
+#define BUTTRESS_ISR_HW_ASSERTION		BIT(12)
+
+#define BUTTRESS_REG_IU2CSEDB0	0x100
+
+#define BUTTRESS_IU2CSEDB0_BUSY		BIT(31)
+#define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT	27
+#define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT	10
+#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL	2
+
+#define BUTTRESS_REG_IU2CSEDATA0	0x104
+
+#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD		1
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN		2
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE		3
+#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH	16
+
+#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE			1
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE			2
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE		4
+#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE	16
+
+#define BUTTRESS_REG_IU2CSECSR		0x108
+
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1		BIT(0)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2		BIT(1)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE	BIT(2)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ		BIT(3)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID			BIT(4)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ		BIT(5)
+
+#define BUTTRESS_REG_CSE2IUDB0		0x304
+#define BUTTRESS_REG_CSE2IUCSR		0x30C
+#define BUTTRESS_REG_CSE2IUDATA0	0x308
+
+/* 0x20 == NACK, 0xf == unknown command */
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK      0xf20
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff
+
+#define BUTTRESS_REG_ISH2IUCSR		0x50
+#define BUTTRESS_REG_ISH2IUDB0		0x54
+#define BUTTRESS_REG_ISH2IUDATA0	0x58
+
+#define BUTTRESS_REG_IU2ISHDB0		0x10C
+#define BUTTRESS_REG_IU2ISHDATA0	0x110
+#define BUTTRESS_REG_IU2ISHDATA1	0x114
+#define BUTTRESS_REG_IU2ISHCSR		0x118
+
+#define BUTTRESS_REG_ISH_START_DETECT		0x198
+#define BUTTRESS_REG_ISH_START_DETECT_MASK	0x19C
+
+#define BUTTRESS_REG_FABRIC_CMD	0x88
+
+#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC	BIT(0)
+#define BUTTRESS_FABRIC_CMD_IS_DRAIN		BIT(4)
+
+#define BUTTRESS_REG_TSW_CTL		0x120
+#define BUTTRESS_TSW_CTL_SOFT_RESET	BIT(8)
+
+#define BUTTRESS_REG_TSC_LO	0x164
+#define BUTTRESS_REG_TSC_HI	0x168
+
+#define BUTTRESS_REG_CSI2_PORT_CONFIG_AB		0x200
+#define BUTTRESS_CSI2_PORT_CONFIG_AB_MUX_MASK		0x1f
+#define BUTTRESS_CSI2_PORT_CONFIG_AB_COMBO_SHIFT_B0	16
+
+#define BUTTRESS_REG_PS_FREQ_CAPABILITIES			0xf7498
+
+#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT	24
+#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK	(0xff << 24)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT		16
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK		(0xff << 16)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT	8
+#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK	(0xff << 8)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT		0
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK		(0xff)
+
+#define BUTTRESS_IRQS		(BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING |	\
+				 BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE |	\
+				 BUTTRESS_ISR_IS_IRQ |			\
+				 BUTTRESS_ISR_PS_IRQ)
+
+#define IPU6SE_ISYS_PHY_0_BASE		0x10000
+
+/* only use BB0, BB2, BB4, and BB6 on PHY0 */
+#define IPU6SE_ISYS_PHY_BB_NUM		4
+
+/* offset from PHY base */
+#define PHY_CSI_CFG			0xc0
+#define PHY_CSI_RCOMP_CONTROL		0xc8
+#define PHY_CSI_BSCAN_EXCLUDE		0xd8
+
+#define PHY_CPHY_DLL_OVRD(x)		(0x100 + 0x100 * (x))
+#define PHY_DPHY_DLL_OVRD(x)		(0x14c + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL1(x)		(0x110 + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL2(x)		(0x114 + 0x100 * (x))
+#define PHY_DPHY_CFG(x)			(0x148 + 0x100 * (x))
+#define PHY_BB_AFE_CONFIG(x)		(0x174 + 0x100 * (x))
+
+#endif /* IPU_PLATFORM_BUTTRESS_REGS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-isys-csi2-reg.h b/drivers/media/pci/intel/ipu6/ipu-platform-isys-csi2-reg.h
new file mode 100644
index 000000000000..80f7ac0b0c7f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-isys-csi2-reg.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_ISYS_CSI2_REG_H
+#define IPU_PLATFORM_ISYS_CSI2_REG_H
+
+#define CSI_REG_BASE			0x220000
+#define CSI_REG_BASE_PORT(id)		((id) * 0x1000)
+
+#define IPU_CSI_PORT_A_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(0))
+#define IPU_CSI_PORT_B_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(1))
+#define IPU_CSI_PORT_C_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(2))
+#define IPU_CSI_PORT_D_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(3))
+#define IPU_CSI_PORT_E_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(4))
+#define IPU_CSI_PORT_F_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(5))
+#define IPU_CSI_PORT_G_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(6))
+#define IPU_CSI_PORT_H_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(7))
+
+/* CSI Port Genral Purpose Registers */
+#define CSI_REG_PORT_GPREG_SRST                 0x0
+#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST    0x4
+#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL    0x8
+
+/*
+ * Port IRQs mapping events:
+ * IRQ0 - CSI_FE event
+ * IRQ1 - CSI_SYNC
+ * IRQ2 - S2M_SIDS0TO7
+ * IRQ3 - S2M_SIDS8TO15
+ */
+#define CSI_PORT_REG_BASE_IRQ_CSI               0x80
+#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC          0xA0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7     0xC0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15    0xE0
+
+#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET	0x0
+#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET	0x4
+#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET	0x8
+#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET	0xc
+#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET	0x10
+#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET	0x14
+
+#define IPU6SE_CSI_RX_ERROR_IRQ_MASK		0x7ffff
+#define IPU6_CSI_RX_ERROR_IRQ_MASK		0xfffff
+
+#define CSI_RX_NUM_ERRORS_IN_IRQ		20
+#define CSI_RX_NUM_IRQ				32
+
+#define IPU_CSI_RX_IRQ_FS_VC		1
+#define IPU_CSI_RX_IRQ_FE_VC		2
+
+/* PPI2CSI */
+#define CSI_REG_PPI2CSI_ENABLE                  0x200
+#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF         0x204
+#define PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT	3
+#define PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT		5
+#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE      0x208
+
+enum CSI_PPI2CSI_CTRL {
+	CSI_PPI2CSI_DISABLE = 0,
+	CSI_PPI2CSI_ENABLE = 1,
+};
+
+/* CSI_FE */
+#define CSI_REG_CSI_FE_ENABLE                   0x280
+#define CSI_REG_CSI_FE_MODE                     0x284
+#define CSI_REG_CSI_FE_MUX_CTRL                 0x288
+#define CSI_REG_CSI_FE_SYNC_CNTR_SEL            0x290
+
+enum CSI_FE_ENABLE_TYPE {
+	CSI_FE_DISABLE = 0,
+	CSI_FE_ENABLE = 1,
+};
+
+enum CSI_FE_MODE_TYPE {
+	CSI_FE_DPHY_MODE = 0,
+	CSI_FE_CPHY_MODE = 1,
+};
+
+enum CSI_FE_INPUT_SELECTOR {
+	CSI_SENSOR_INPUT = 0,
+	CSI_MIPIGEN_INPUT = 1,
+};
+
+enum CSI_FE_SYNC_CNTR_SEL_TYPE {
+	CSI_CNTR_SENSOR_LINE_ID = (1 << 0),
+	CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID,
+	CSI_CNTR_SENSOR_FRAME_ID = (1 << 1),
+	CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID,
+};
+
+/* MIPI_PKT_GEN */
+#define CSI_REG_PIXGEN_COM_BASE_OFFSET		0x300
+
+#define IPU_CSI_PORT_A_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(0) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_B_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(1) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_C_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(2) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_D_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(3) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_E_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(4) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_F_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(5) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_G_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(6) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_H_PIXGEN_ADDR_OFFSET	\
+	(CSI_REG_BASE + CSI_REG_BASE_PORT(7) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+
+#define CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x300)
+#define CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x304)
+#define CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x308)
+#define CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x30C)
+#define CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x310)
+/* PRBS */
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x314)
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x318)
+/* SYNC_GENERATOR_CONFIG */
+#define CSI_REG_PIXGEN_SYNG_FREE_RUN_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x31C)
+#define CSI_REG_PIXGEN_SYNG_PAUSE_REG_IDX(id)		\
+	(CSI_REG_BASE_PORT(id) + 0x320)
+#define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x324)
+#define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x328)
+#define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x32C)
+#define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x330)
+#define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x334)
+#define CSI_REG_PIXGEN_SYNG_STAT_HCNT_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x338)
+#define CSI_REG_PIXGEN_SYNG_STAT_VCNT_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x33C)
+#define CSI_REG_PIXGEN_SYNG_STAT_FCNT_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x340)
+#define CSI_REG_PIXGEN_SYNG_STAT_DONE_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x344)
+/* TPG */
+#define CSI_REG_PIXGEN_TPG_MODE_REG_IDX(id)		\
+	(CSI_REG_BASE_PORT(id) + 0x348)
+/* TPG: mask_cfg */
+#define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x34C)
+#define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x350)
+#define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x354)
+/* TPG: delta_cfg */
+#define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x358)
+#define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(id)	\
+	(CSI_REG_BASE_PORT(id) + 0x35C)
+/* TPG: color_cfg */
+#define CSI_REG_PIXGEN_TPG_R1_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x360)
+#define CSI_REG_PIXGEN_TPG_G1_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x364)
+#define CSI_REG_PIXGEN_TPG_B1_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x368)
+#define CSI_REG_PIXGEN_TPG_R2_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x36C)
+#define CSI_REG_PIXGEN_TPG_G2_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x370)
+#define CSI_REG_PIXGEN_TPG_B2_REG_IDX(id)	(CSI_REG_BASE_PORT(id) + 0x374)
+
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0	CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(0)
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1	CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(0)
+#define CSI_REG_PIXGEN_COM_ENABLE_REG	CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_MODE_REG	CSI_REG_PIXGEN_TPG_MODE_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_R1_REG	CSI_REG_PIXGEN_TPG_R1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_G1_REG	CSI_REG_PIXGEN_TPG_G1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_B1_REG	CSI_REG_PIXGEN_TPG_B1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_R2_REG	CSI_REG_PIXGEN_TPG_R2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_G2_REG	CSI_REG_PIXGEN_TPG_G2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_B2_REG	CSI_REG_PIXGEN_TPG_B2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG	\
+	CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(0)
+
+#define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG	\
+	CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG	\
+	CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG	\
+	CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG	\
+	CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG	\
+	CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_WCOUNT_REG	CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_DTYPE_REG	CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_VTYPE_REG	CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_VCHAN_REG	CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG	\
+	CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG	\
+	CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(0)
+
+/* CSI HUB General Purpose Registers */
+#define CSI_REG_HUB_GPREG_SRST			(CSI_REG_BASE + 0x18000)
+#define CSI_REG_HUB_GPREG_SLV_REG_SRST		(CSI_REG_BASE + 0x18004)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL(id)	\
+	(CSI_REG_BASE + 0x18008 + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL_RESET		BIT(4)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN		BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS(id)	\
+	(CSI_REG_BASE + 0x1800c + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK		BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY		BIT(4)
+
+#define CSI_REG_HUB_DRV_ACCESS_PORT(id)	(CSI_REG_BASE + 0x18018 + (id) * 4)
+#define CSI_REG_HUB_FW_ACCESS_PORT(id)	(CSI_REG_BASE + 0x17000 + (id) * 4)
+
+enum CSI_PORT_CLK_GATING_SWITCH {
+	CSI_PORT_CLK_GATING_OFF = 0,
+	CSI_PORT_CLK_GATING_ON = 1,
+};
+
+#define CSI_REG_BASE_HUB_IRQ                        0x18200
+
+#define IPU_NUM_OF_DLANE_REG_PORT0      2
+#define IPU_NUM_OF_DLANE_REG_PORT1      4
+#define IPU_NUM_OF_DLANE_REG_PORT2      4
+#define IPU_NUM_OF_DLANE_REG_PORT3      2
+#define IPU_NUM_OF_DLANE_REG_PORT4      2
+#define IPU_NUM_OF_DLANE_REG_PORT5      4
+#define IPU_NUM_OF_DLANE_REG_PORT6      4
+#define IPU_NUM_OF_DLANE_REG_PORT7      2
+
+/* ipu6se support 2 SIPs, 2 port per SIP, 4 ports 0..3
+ * sip0 - 0, 1
+ * sip1 - 2, 3
+ * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes
+ * all offset are base from isys base address
+ */
+
+#define CSI2_HUB_GPREG_SIP_SRST(sip)			(0x238038 + (sip) * 4)
+#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)		(0x238050 + (sip) * 4)
+
+#define CSI2_HUB_GPREG_DPHY_TIMER_INCR			(0x238040)
+#define CSI2_HUB_GPREG_HPLL_FREQ			(0x238044)
+#define CSI2_HUB_GPREG_IS_CLK_RATIO			(0x238048)
+#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE	(0x23804c)
+#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE		(0x238058)
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL		(0x23805c)
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL		(0x238088)
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL		(0x2380a4)
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL		(0x2380d0)
+
+#define CSI2_SIP_TOP_CSI_RX_BASE(sip)		(0x23805c + (sip) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port)	(0x23805c + ((port) / 2) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port)	(0x238088 + ((port) / 2) * 0x48)
+
+/* offset from port base */
+#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL		(0x0)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE	(0x4)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE	(0x8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane)	(0xc + (lane) * 8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane)	(0x10 + (lane) * 8)
+
+#endif /* IPU6_ISYS_CSI2_REG_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-isys.h b/drivers/media/pci/intel/ipu6/ipu-platform-isys.h
new file mode 100644
index 000000000000..886947870387
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-isys.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_ISYS_H
+#define IPU_PLATFORM_ISYS_H
+
+#define IPU_ISYS_ENTITY_PREFIX		"Intel IPU6"
+
+/*
+ * FW support max 16 streams
+ */
+#define IPU_ISYS_MAX_STREAMS		16
+
+#define ISYS_UNISPART_IRQS	(IPU_ISYS_UNISPART_IRQ_SW |	\
+				 IPU_ISYS_UNISPART_IRQ_CSI0 |	\
+				 IPU_ISYS_UNISPART_IRQ_CSI1)
+
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-psys.h b/drivers/media/pci/intel/ipu6/ipu-platform-psys.h
new file mode 100644
index 000000000000..e8483e9a325b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-psys.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_PSYS_H
+#define IPU_PLATFORM_PSYS_H
+
+#include "ipu-psys.h"
+#include <uapi/linux/ipu-psys.h>
+
+#define IPU_PSYS_BUF_SET_POOL_SIZE 8
+#define IPU_PSYS_BUF_SET_MAX_SIZE 1024
+
+struct ipu_fw_psys_buffer_set;
+
+enum ipu_psys_cmd_state {
+	KCMD_STATE_PPG_NEW,
+	KCMD_STATE_PPG_START,
+	KCMD_STATE_PPG_ENQUEUE,
+	KCMD_STATE_PPG_STOP,
+	KCMD_STATE_PPG_COMPLETE
+};
+
+struct ipu_psys_scheduler {
+	struct list_head ppgs;
+	struct mutex bs_mutex;  /* Protects buf_set field */
+	struct list_head buf_sets;
+};
+
+enum ipu_psys_ppg_state {
+	PPG_STATE_START = (1 << 0),
+	PPG_STATE_STARTING = (1 << 1),
+	PPG_STATE_STARTED = (1 << 2),
+	PPG_STATE_RUNNING = (1 << 3),
+	PPG_STATE_SUSPEND = (1 << 4),
+	PPG_STATE_SUSPENDING = (1 << 5),
+	PPG_STATE_SUSPENDED = (1 << 6),
+	PPG_STATE_RESUME = (1 << 7),
+	PPG_STATE_RESUMING = (1 << 8),
+	PPG_STATE_RESUMED = (1 << 9),
+	PPG_STATE_STOP = (1 << 10),
+	PPG_STATE_STOPPING = (1 << 11),
+	PPG_STATE_STOPPED = (1 << 12),
+};
+
+struct ipu_psys_ppg {
+	struct ipu_psys_pg *kpg;
+	struct ipu_psys_fh *fh;
+	struct list_head list;
+	struct list_head sched_list;
+	u64 token;
+	void *manifest;
+	struct mutex mutex;     /* Protects kcmd and ppg state field */
+	struct list_head kcmds_new_list;
+	struct list_head kcmds_processing_list;
+	struct list_head kcmds_finished_list;
+	enum ipu_psys_ppg_state state;
+	u32 pri_base;
+	int pri_dynamic;
+};
+
+struct ipu_psys_buffer_set {
+	struct list_head list;
+	struct ipu_fw_psys_buffer_set *buf_set;
+	size_t size;
+	size_t buf_set_size;
+	dma_addr_t dma_addr;
+	void *kaddr;
+	struct ipu_psys_kcmd *kcmd;
+};
+
+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd);
+void ipu_psys_kcmd_complete(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd,
+			    int error);
+int ipu_psys_fh_init(struct ipu_psys_fh *fh);
+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh);
+
+#endif /* IPU_PLATFORM_PSYS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-regs.h b/drivers/media/pci/intel/ipu6/ipu-platform-regs.h
new file mode 100644
index 000000000000..5415b7e46fe7
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-regs.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_REGS_H
+#define IPU_PLATFORM_REGS_H
+
+/*
+ * IPU6 uses uniform address within IPU, therefore all subsystem registers
+ * locates in one signle space starts from 0 but in different sctions with
+ * with different addresses, the subsystem offsets are defined to 0 as the
+ * register definition will have the address offset to 0.
+ */
+#define IPU_UNIFIED_OFFSET			0
+
+#define IPU_ISYS_IOMMU0_OFFSET		0x2E0000
+#define IPU_ISYS_IOMMU1_OFFSET		0x2E0500
+#define IPU_ISYS_IOMMUI_OFFSET		0x2E0A00
+
+#define IPU_PSYS_IOMMU0_OFFSET		0x1B0000
+#define IPU_PSYS_IOMMU1_OFFSET		0x1B0700
+#define IPU_PSYS_IOMMU1R_OFFSET		0x1B0E00
+#define IPU_PSYS_IOMMUI_OFFSET		0x1B1500
+
+/* the offset from IOMMU base register */
+#define IPU_MMU_L1_STREAM_ID_REG_OFFSET	0x0c
+#define IPU_MMU_L2_STREAM_ID_REG_OFFSET	0x4c
+#define IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET	0x8c
+
+#define IPU_MMU_INFO_OFFSET		0x8
+
+#define IPU_ISYS_SPC_OFFSET		0x210000
+
+#define IPU6SE_PSYS_SPC_OFFSET		0x110000
+#define IPU6_PSYS_SPC_OFFSET		0x118000
+
+#define IPU_ISYS_DMEM_OFFSET		0x200000
+#define IPU_PSYS_DMEM_OFFSET		0x100000
+
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE		0x238200
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK		0x238204
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS		0x238208
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR		0x23820c
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE		0x238210
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE	0x238214
+
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE		0x238220
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK		0x238224
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS		0x238228
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR		0x23822c
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE		0x238230
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE	0x238234
+
+#define IPU_REG_ISYS_UNISPART_IRQ_EDGE			0x27c000
+#define IPU_REG_ISYS_UNISPART_IRQ_MASK			0x27c004
+#define IPU_REG_ISYS_UNISPART_IRQ_STATUS		0x27c008
+#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR			0x27c00c
+#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE			0x27c010
+#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE	0x27c014
+#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG			0x27c414
+#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG		0x27c418
+#define IPU_ISYS_UNISPART_IRQ_CSI0			BIT(2)
+#define IPU_ISYS_UNISPART_IRQ_CSI1			BIT(3)
+#define IPU_ISYS_UNISPART_IRQ_SW			BIT(22)
+
+#define IPU_REG_ISYS_ISL_TOP_IRQ_EDGE			0x2b0200
+#define IPU_REG_ISYS_ISL_TOP_IRQ_MASK			0x2b0204
+#define IPU_REG_ISYS_ISL_TOP_IRQ_STATUS			0x2b0208
+#define IPU_REG_ISYS_ISL_TOP_IRQ_CLEAR			0x2b020c
+#define IPU_REG_ISYS_ISL_TOP_IRQ_ENABLE			0x2b0210
+#define IPU_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE	0x2b0214
+
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_EDGE			0x2d2100
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_MASK			0x2d2104
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_STATUS		0x2d2108
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_CLEAR			0x2d210c
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_ENABLE		0x2d2110
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE	0x2d2114
+
+/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */
+#define IPU_REG_ISYS_CDC_THRESHOLD(i)		(0x27c400 + ((i) * 4))
+
+#define IPU_ISYS_CSI_PHY_NUM				2
+#define IPU_CSI_IRQ_NUM_PER_PIPE			4
+#define IPU6SE_ISYS_CSI_PORT_NUM			4
+#define IPU6_ISYS_CSI_PORT_NUM				8
+
+#define IPU_ISYS_CSI_PORT_IRQ(irq_num)		(1 << (irq_num))
+
+/* PSYS Info bits*/
+#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a)	(0x2C + ((a) * 12))
+#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a)	(0x5C + ((a) * 12))
+
+/* PKG DIR OFFSET in IMR in secure mode */
+#define IPU_PKG_DIR_IMR_OFFSET			0x40
+
+#define IPU_ISYS_REG_SPC_STATUS_CTRL		0x0
+
+#define IPU_ISYS_SPC_STATUS_START			BIT(1)
+#define IPU_ISYS_SPC_STATUS_RUN				BIT(3)
+#define IPU_ISYS_SPC_STATUS_READY			BIT(5)
+#define IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE	BIT(12)
+#define IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH		BIT(13)
+
+#define IPU_PSYS_REG_SPC_STATUS_CTRL		0x0
+#define IPU_PSYS_REG_SPC_START_PC		0x4
+#define IPU_PSYS_REG_SPC_ICACHE_BASE		0x10
+#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER	0x14
+
+#define IPU_PSYS_SPC_STATUS_START			BIT(1)
+#define IPU_PSYS_SPC_STATUS_RUN				BIT(3)
+#define IPU_PSYS_SPC_STATUS_READY			BIT(5)
+#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE	BIT(12)
+#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH		BIT(13)
+
+#define IPU_PSYS_REG_SPP0_STATUS_CTRL			0x20000
+
+#define IPU_INFO_ENABLE_SNOOP			BIT(0)
+#define IPU_INFO_DEC_FORCE_FLUSH		BIT(1)
+#define IPU_INFO_DEC_PASS_THRU			BIT(2)
+#define IPU_INFO_ZLW                            BIT(3)
+#define IPU_INFO_STREAM_ID_SET(id)		(((id) & 0x1F) << 4)
+#define IPU_INFO_REQUEST_DESTINATION_IOSF	BIT(9)
+#define IPU_INFO_IMR_BASE			BIT(10)
+#define IPU_INFO_IMR_DESTINED			BIT(11)
+
+#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF
+
+/* Trace unit related register definitions */
+#define TRACE_REG_MAX_ISYS_OFFSET	0xfffff
+#define TRACE_REG_MAX_PSYS_OFFSET	0xfffff
+#define IPU_ISYS_OFFSET			IPU_ISYS_DMEM_OFFSET
+#define IPU_PSYS_OFFSET			IPU_PSYS_DMEM_OFFSET
+/* ISYS trace unit registers */
+/* Trace unit base offset */
+#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE		0x27d000
+/* Trace monitors */
+#define IPU_TRACE_REG_IS_SP_EVQ_BASE		0x211000
+/* GPC blocks */
+#define IPU_TRACE_REG_IS_SP_GPC_BASE		0x210800
+#define IPU_TRACE_REG_IS_ISL_GPC_BASE		0x2b0a00
+#define IPU_TRACE_REG_IS_MMU_GPC_BASE		0x2e0f00
+/* each CSI2 port has a dedicated trace monitor, index 0..7 */
+#define IPU_TRACE_REG_CSI2_TM_BASE(port)	(0x220400 + 0x1000 * (port))
+
+/* Trace timers */
+#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N		0x27c410
+#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF		BIT(0)
+
+/* SIG2CIO */
+#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port)		\
+			(0x220e00 + (port) * 0x1000)
+
+/* PSYS trace unit registers */
+/* Trace unit base offset */
+#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE		0x1b4000
+/* Trace monitors */
+#define IPU_TRACE_REG_PS_SPC_EVQ_BASE			0x119000
+#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE			0x139000
+
+/* GPC blocks */
+#define IPU_TRACE_REG_PS_SPC_GPC_BASE			0x118800
+#define IPU_TRACE_REG_PS_SPP0_GPC_BASE			0x138800
+#define IPU_TRACE_REG_PS_MMU_GPC_BASE			0x1b1b00
+
+/* Trace timers */
+#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N	0x1aa714
+
+/*
+ * s2m_pixel_soc_pixel_remapping is dedicated for the enableing of the
+ * pixel s2m remp ability.Remap here  means that s2m rearange the order
+ * of the pixels in each 4 pixels group.
+ * For examle, mirroring remping means that if input's 4 first pixels
+ * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels.
+ * 0xE4 is from s2m MAS document. It means no remaping.
+ */
+#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xE4
+/*
+ * csi_be_soc_pixel_remapping is for the enabling of the csi\mipi be pixel
+ * remapping feature. This remapping is exactly like the stream2mmio remapping.
+ */
+#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING    0xE4
+
+#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR         0x1AE000
+#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR         0x1AF000
+#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n)       (0x4 + (n) * 0xC)
+#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n)       (0x8 + (n) * 0xC)
+#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n)    (0xC + (n) * 0xC)
+
+enum ipu_device_ab_group1_target_id {
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER,
+	IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB,
+};
+
+enum nci_ab_access_mode {
+	NCI_AB_ACCESS_MODE_RW,	/* read & write */
+	NCI_AB_ACCESS_MODE_RO,	/* read only */
+	NCI_AB_ACCESS_MODE_WO,	/* write only */
+	NCI_AB_ACCESS_MODE_NA	/* No access at all */
+};
+
+/*
+ * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits)
+ * [0] CSI_PORT.IRQ_CTRL0_csi
+ * [1] CSI_PORT.IRQ_CTRL1_csi_sync
+ * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7
+ * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15
+ */
+#define IPU_ISYS_UNISPART_IRQ_CSI2(port)		\
+				   (0x3 << ((port) * IPU_CSI_IRQ_NUM_PER_PIPE))
+
+/* IRQ-related registers in PSYS */
+#define IPU_REG_PSYS_GPDEV_IRQ_EDGE		0x1aa200
+#define IPU_REG_PSYS_GPDEV_IRQ_MASK		0x1aa204
+#define IPU_REG_PSYS_GPDEV_IRQ_STATUS		0x1aa208
+#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR		0x1aa20c
+#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE		0x1aa210
+#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE	0x1aa214
+/* There are 8 FW interrupts, n = 0..7 */
+#define IPU_PSYS_GPDEV_FWIRQ0			5
+#define IPU_PSYS_GPDEV_FWIRQ1			6
+#define IPU_PSYS_GPDEV_FWIRQ2			7
+#define IPU_PSYS_GPDEV_FWIRQ3			8
+#define IPU_PSYS_GPDEV_FWIRQ4			9
+#define IPU_PSYS_GPDEV_FWIRQ5			10
+#define IPU_PSYS_GPDEV_FWIRQ6			11
+#define IPU_PSYS_GPDEV_FWIRQ7			12
+#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n)		(1 << (n))
+#define IPU_REG_PSYS_GPDEV_FWIRQ(n)		(4 * (n) + 0x1aa100)
+
+/*
+ * ISYS GPC (Gerneral Performance Counters) Registers
+ */
+#define IPU_ISYS_GPC_BASE			0x2E0000
+#define IPU_ISYS_GPREG_TRACE_TIMER_RST		0x27C410
+enum ipu_isf_cdc_mmu_gpc_registers {
+	IPU_ISF_CDC_MMU_GPC_SOFT_RESET  = 0x0F00,
+	IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE  = 0x0F04,
+	IPU_ISF_CDC_MMU_GPC_ENABLE0  = 0x0F20,
+	IPU_ISF_CDC_MMU_GPC_VALUE0  = 0x0F60,
+	IPU_ISF_CDC_MMU_GPC_CNT_SEL0 = 0x0FA0,
+};
+
+/*
+ * GPC (Gerneral Performance Counters) Registers
+ */
+#define IPU_GPC_BASE 0x1B0000
+#define IPU_GPREG_TRACE_TIMER_RST	0x1AA714
+enum ipu_cdc_mmu_gpc_registers {
+	IPU_CDC_MMU_GPC_SOFT_RESET  = 0x1B00,
+	IPU_CDC_MMU_GPC_OVERALL_ENABLE  = 0x1B04,
+	IPU_CDC_MMU_GPC_TRACE_HEADER  = 0x1B08,
+	IPU_CDC_MMU_GPC_TRACE_ADDR  = 0x1B0C,
+	IPU_CDC_MMU_GPC_TRACE_ENABLE_NPK  = 0x1B10,
+	IPU_CDC_MMU_GPC_TRACE_ENABLE_DDR  = 0x1B14,
+	IPU_CDC_MMU_GPC_LP_CLEAR  = 0x1B18,
+	IPU_CDC_MMU_GPC_LOST_PACKET  = 0x1B1C,
+	IPU_CDC_MMU_GPC_ENABLE0  = 0x1B20,
+	IPU_CDC_MMU_GPC_VALUE0  = 0x1B60,
+	IPU_CDC_MMU_GPC_CNT_SEL0 = 0x1BA0,
+	IPU_CDC_MMU_GPC_START_SEL0 = 0x1BE0,
+	IPU_CDC_MMU_GPC_STOP_SEL0 = 0x1C20,
+	IPU_CDC_MMU_GPC_MSG_SEL0 = 0x1C60,
+	IPU_CDC_MMU_GPC_PLOAD_SEL0 = 0x1CA0,
+	IPU_CDC_MMU_GPC_IRQ_TRIGGER_VALUE0 = 0x1CE0,
+	IPU_CDC_MMU_GPC_IRQ_TIMER_SEL0 = 0x1D20,
+	IPU_CDC_MMU_GPC_IRQ_ENABLE0 = 0x1D60,
+	IPU_CDC_MMU_GPC_END = 0x1D9C
+};
+
+#define IPU_GPC_SENSE_OFFSET		7
+#define IPU_GPC_ROUTE_OFFSET		5
+#define IPU_GPC_SOURCE_OFFSET		0
+
+/*
+ * Signals monitored by GPC
+ */
+#define IPU_GPC_TRACE_TLB_MISS_MMU_LB_IDX		0
+#define IPU_GPC_TRACE_FULL_WRITE_LB_IDX			1
+#define IPU_GPC_TRACE_NOFULL_WRITE_LB_IDX		2
+#define IPU_GPC_TRACE_FULL_READ_LB_IDX			3
+#define IPU_GPC_TRACE_NOFULL_READ_LB_IDX		4
+#define IPU_GPC_TRACE_STALL_LB_IDX			5
+#define IPU_GPC_TRACE_ZLW_LB_IDX			6
+#define IPU_GPC_TRACE_TLB_MISS_MMU_HBTX_IDX		7
+#define IPU_GPC_TRACE_FULL_WRITE_HBTX_IDX		8
+#define IPU_GPC_TRACE_NOFULL_WRITE_HBTX_IDX		9
+#define IPU_GPC_TRACE_FULL_READ_HBTX_IDX		10
+#define IPU_GPC_TRACE_STALL_HBTX_IDX			11
+#define IPU_GPC_TRACE_ZLW_HBTX_IDX			12
+#define IPU_GPC_TRACE_TLB_MISS_MMU_HBFRX_IDX		13
+#define IPU_GPC_TRACE_FULL_READ_HBFRX_IDX		14
+#define IPU_GPC_TRACE_NOFULL_READ_HBFRX_IDX		15
+#define IPU_GPC_TRACE_STALL_HBFRX_IDX			16
+#define IPU_GPC_TRACE_ZLW_HBFRX_IDX			17
+#define IPU_GPC_TRACE_TLB_MISS_ICACHE_IDX		18
+#define IPU_GPC_TRACE_FULL_READ_ICACHE_IDX		19
+#define IPU_GPC_TRACE_STALL_ICACHE_IDX			20
+/*
+ * psys subdomains power request regs
+ */
+enum ipu_device_buttress_psys_domain_pos {
+	IPU_PSYS_SUBDOMAIN_ISA		= 0,
+	IPU_PSYS_SUBDOMAIN_PSA		= 1,
+	IPU_PSYS_SUBDOMAIN_BB		= 2,
+	IPU_PSYS_SUBDOMAIN_IDSP1	= 3, /* only in IPU6M */
+	IPU_PSYS_SUBDOMAIN_IDSP2	= 4, /* only in IPU6M */
+};
+
+#define IPU_PSYS_SUBDOMAINS_POWER_MASK  (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \
+					 BIT(IPU_PSYS_SUBDOMAIN_PSA) | \
+					 BIT(IPU_PSYS_SUBDOMAIN_BB))
+
+#define IPU_PSYS_SUBDOMAINS_POWER_REQ                   0xa0
+#define IPU_PSYS_SUBDOMAINS_POWER_STATUS                0xa4
+
+#endif /* IPU_PLATFORM_REGS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform-resources.h b/drivers/media/pci/intel/ipu6/ipu-platform-resources.h
new file mode 100644
index 000000000000..642c5c6ca6c1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform-resources.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_RESOURCES_COMMON_H
+#define IPU_PLATFORM_RESOURCES_COMMON_H
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST                 0
+
+#define	IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT			0
+#define	IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT		2
+#define	IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT		2
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT			5
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT		6
+
+#define	IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT		3
+
+#define	IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT		3
+#define IPU_FW_PSYS_N_FRAME_PLANES					6
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT			4
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT		1
+
+#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES		4
+#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES		4
+
+#define IPU_FW_PSYS_PROCESS_MAX_CELLS			1
+#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS		4
+#define IPU_FW_PSYS_RBM_NOF_ELEMS			5
+#define IPU_FW_PSYS_KBM_NOF_ELEMS			4
+
+struct ipu_fw_psys_process {
+	s16 parent_offset;
+	u8 size;
+	u8 cell_dependencies_offset;
+	u8 terminal_dependencies_offset;
+	u8 process_extension_offset;
+	u8 ID;
+	u8 program_idx;
+	u8 state;
+	u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+	u8 cell_dependency_count;
+	u8 terminal_dependency_count;
+};
+
+struct ipu_fw_psys_program_manifest {
+	u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+	s16 parent_offset;
+	u8  program_dependency_offset;
+	u8  terminal_dependency_offset;
+	u8  size;
+	u8  program_extension_offset;
+	u8 program_type;
+	u8 ID;
+	u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+	u8 cell_type_id;
+	u8 program_dependency_count;
+	u8 terminal_dependency_count;
+};
+
+/* platform specific resource interface */
+struct ipu_psys_resource_pool;
+struct ipu_psys_resource_alloc;
+struct ipu_fw_psys_process_group;
+int ipu_psys_allocate_resources(const struct device *dev,
+				struct ipu_fw_psys_process_group *pg,
+				void *pg_manifest,
+				struct ipu_psys_resource_alloc *alloc,
+				struct ipu_psys_resource_pool *pool);
+int ipu_psys_move_resources(const struct device *dev,
+			    struct ipu_psys_resource_alloc *alloc,
+			    struct ipu_psys_resource_pool *source_pool,
+			    struct ipu_psys_resource_pool *target_pool);
+
+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src,
+			    struct ipu_psys_resource_pool *dest);
+
+int ipu_psys_try_allocate_resources(struct device *dev,
+				    struct ipu_fw_psys_process_group *pg,
+				    void *pg_manifest,
+				    struct ipu_psys_resource_pool *pool);
+
+void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc,
+			     struct ipu_psys_resource_pool *pool);
+
+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				    u16 id, u32 bitmap,
+				    u32 active_bitmap);
+
+int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool);
+void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool,
+				      u8 queue_id);
+
+extern const struct ipu_fw_resource_definitions *ipu6_res_defs;
+extern const struct ipu_fw_resource_definitions *ipu6se_res_defs;
+extern struct ipu6_psys_hw_res_variant hw_var;
+#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu-platform.h b/drivers/media/pci/intel/ipu6/ipu-platform.h
new file mode 100644
index 000000000000..e98b9672b74b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-platform.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_H
+#define IPU_PLATFORM_H
+
+#define IPU_NAME			"intel-ipu6"
+
+#define IPU6SE_FIRMWARE_NAME		"intel/ipu6se_fw.bin"
+#define IPU6_FIRMWARE_NAME		"intel/ipu6_fw.bin"
+
+/*
+ * The following definitions are encoded to the media_device's model field so
+ * that the software components which uses IPU driver can get the hw stepping
+ * information.
+ */
+#define IPU_MEDIA_DEV_MODEL_NAME		"ipu6"
+
+#define IPU6SE_ISYS_NUM_STREAMS          8       /* Max 8 */
+#define IPU6_ISYS_NUM_STREAMS            16      /* Max 16 */
+
+/* declearations, definitions in ipu6.c */
+extern struct ipu_isys_internal_pdata isys_ipdata;
+extern struct ipu_psys_internal_pdata psys_ipdata;
+extern const struct ipu_buttress_ctrl isys_buttress_ctrl;
+extern const struct ipu_buttress_ctrl psys_buttress_ctrl;
+
+/* definitions in ipu6-isys.c */
+extern struct ipu_trace_block isys_trace_blocks[];
+/* definitions in ipu6-psys.c */
+extern struct ipu_trace_block psys_trace_blocks[];
+
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu-resources.c b/drivers/media/pci/intel/ipu6/ipu-resources.c
new file mode 100644
index 000000000000..e5932287b7f3
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu-resources.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2020 Intel Corporation
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+
+struct ipu6_psys_hw_res_variant hw_var;
+void ipu6_psys_hw_res_variant_init(void)
+{
+	if (ipu_ver == IPU_VER_6SE) {
+		hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+		hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID;
+		hw_var.set_proc_dev_chn = ipu6se_fw_psys_set_proc_dev_chn;
+		hw_var.set_proc_dev_chn = ipu6se_fw_psys_set_proc_dev_chn;
+		hw_var.set_proc_dfm_bitmap = ipu6se_fw_psys_set_proc_dfm_bitmap;
+		hw_var.set_proc_ext_mem = ipu6se_fw_psys_set_process_ext_mem;
+		hw_var.get_pgm_by_proc =
+			ipu6se_fw_psys_get_program_manifest_by_process;
+		return;
+	} else if (ipu_ver == IPU_VER_6) {
+		hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+		hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID;
+		hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
+		hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
+		hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap;
+		hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem;
+		hw_var.get_pgm_by_proc =
+			ipu6_fw_psys_get_program_manifest_by_process;
+		return;
+	}
+
+	WARN(1, "ipu6 psys res var is not initialised correctly.");
+}
+
+static const struct ipu_fw_resource_definitions *get_res(void)
+{
+	if (ipu_ver == IPU_VER_6SE)
+		return ipu6se_res_defs;
+
+	return ipu6_res_defs;
+}
+
+static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements)
+{
+	if (elements <= 0) {
+		res->bitmap = NULL;
+		return 0;
+	}
+
+	res->bitmap = bitmap_zalloc(elements, GFP_KERNEL);
+	if (!res->bitmap)
+		return -ENOMEM;
+	res->elements = elements;
+	res->id = id;
+	return 0;
+}
+
+static unsigned long
+ipu_resource_alloc_with_pos(struct ipu_resource *res, int n,
+			    int pos,
+			    struct ipu_resource_alloc *alloc,
+			    enum ipu_resource_type type)
+{
+	unsigned long p;
+
+	if (n <= 0) {
+		alloc->elements = 0;
+		return 0;
+	}
+
+	if (!res->bitmap || pos >= res->elements)
+		return (unsigned long)(-ENOSPC);
+
+	p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0);
+	alloc->resource = NULL;
+
+	if (p != pos)
+		return (unsigned long)(-ENOSPC);
+	bitmap_set(res->bitmap, p, n);
+	alloc->resource = res;
+	alloc->elements = n;
+	alloc->pos = p;
+	alloc->type = type;
+
+	return pos;
+}
+
+static unsigned long
+ipu_resource_alloc(struct ipu_resource *res, int n,
+		   struct ipu_resource_alloc *alloc,
+		   enum ipu_resource_type type)
+{
+	unsigned long p;
+
+	if (n <= 0) {
+		alloc->elements = 0;
+		return 0;
+	}
+
+	if (!res->bitmap)
+		return (unsigned long)(-ENOSPC);
+
+	p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0);
+	alloc->resource = NULL;
+
+	if (p >= res->elements)
+		return (unsigned long)(-ENOSPC);
+	bitmap_set(res->bitmap, p, n);
+	alloc->resource = res;
+	alloc->elements = n;
+	alloc->pos = p;
+	alloc->type = type;
+
+	return p;
+}
+
+static void ipu_resource_free(struct ipu_resource_alloc *alloc)
+{
+	if (alloc->elements <= 0)
+		return;
+
+	if (alloc->type == IPU_RESOURCE_DFM)
+		*alloc->resource->bitmap &= ~(unsigned long)(alloc->elements);
+	else
+		bitmap_clear(alloc->resource->bitmap, alloc->pos,
+			     alloc->elements);
+	alloc->resource = NULL;
+}
+
+static void ipu_resource_cleanup(struct ipu_resource *res)
+{
+	bitmap_free(res->bitmap);
+	res->bitmap = NULL;
+}
+
+/********** IPU PSYS-specific resource handling **********/
+
+int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool
+				*pool)
+{
+	int i, j, k, ret;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	res_defs = get_res();
+
+	pool->cells = 0;
+
+	for (i = 0; i < res_defs->num_dev_channels; i++) {
+		ret = ipu_resource_init(&pool->dev_channels[i], i,
+					res_defs->dev_channels[i]);
+		if (ret)
+			goto error;
+	}
+
+	for (j = 0; j < res_defs->num_ext_mem_ids; j++) {
+		ret = ipu_resource_init(&pool->ext_memory[j], j,
+					res_defs->ext_mem_ids[j]);
+		if (ret)
+			goto memory_error;
+	}
+
+	for (k = 0; k < res_defs->num_dfm_ids; k++) {
+		ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]);
+		if (ret)
+			goto dfm_error;
+	}
+
+	if (ipu_ver == IPU_VER_6SE)
+		bitmap_zero(pool->cmd_queues,
+			    IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID);
+	else
+		bitmap_zero(pool->cmd_queues,
+			    IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID);
+
+	return 0;
+
+dfm_error:
+	for (k--; k >= 0; k--)
+		ipu_resource_cleanup(&pool->dfms[k]);
+
+memory_error:
+	for (j--; j >= 0; j--)
+		ipu_resource_cleanup(&pool->ext_memory[j]);
+
+error:
+	for (i--; i >= 0; i--)
+		ipu_resource_cleanup(&pool->dev_channels[i]);
+	return ret;
+}
+
+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src,
+			    struct ipu_psys_resource_pool *dest)
+{
+	int i;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	res_defs = get_res();
+
+	dest->cells = src->cells;
+	for (i = 0; i < res_defs->num_dev_channels; i++)
+		*dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap;
+
+	for (i = 0; i < res_defs->num_ext_mem_ids; i++)
+		*dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap;
+
+	for (i = 0; i < res_defs->num_dfm_ids; i++)
+		*dest->dfms[i].bitmap = *src->dfms[i].bitmap;
+}
+
+void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool
+				    *pool)
+{
+	u32 i;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	res_defs = get_res();
+	for (i = 0; i < res_defs->num_dev_channels; i++)
+		ipu_resource_cleanup(&pool->dev_channels[i]);
+
+	for (i = 0; i < res_defs->num_ext_mem_ids; i++)
+		ipu_resource_cleanup(&pool->ext_memory[i]);
+
+	for (i = 0; i < res_defs->num_dfm_ids; i++)
+		ipu_resource_cleanup(&pool->dfms[i]);
+}
+
+static int __alloc_one_resrc(const struct device *dev,
+			     struct ipu_fw_psys_process *process,
+			     struct ipu_resource *resource,
+			     struct ipu_fw_generic_program_manifest *pm,
+			     u32 resource_id,
+			     struct ipu_psys_resource_alloc *alloc)
+{
+	const u16 resource_req = pm->dev_chn_size[resource_id];
+	const u16 resource_offset_req = pm->dev_chn_offset[resource_id];
+	unsigned long retl;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	if (resource_req <= 0)
+		return 0;
+
+	if (alloc->resources >= IPU_MAX_RESOURCES) {
+		dev_err(dev, "out of resource handles\n");
+		return -ENOSPC;
+	}
+	res_defs = get_res();
+	if (resource_offset_req != (u16)(-1))
+		retl = ipu_resource_alloc_with_pos
+		    (resource,
+		     resource_req,
+		     resource_offset_req,
+		     &alloc->resource_alloc[alloc->resources],
+		     IPU_RESOURCE_DEV_CHN);
+	else
+		retl = ipu_resource_alloc
+		    (resource, resource_req,
+		     &alloc->resource_alloc[alloc->resources],
+		     IPU_RESOURCE_DEV_CHN);
+	if (IS_ERR_VALUE(retl)) {
+		dev_dbg(dev, "out of device channel resources\n");
+		return (int)retl;
+	}
+	alloc->resources++;
+
+	return 0;
+}
+
+static inline unsigned int bit_count(unsigned int n)
+{
+	unsigned int counter = 0;
+
+	while (n) {
+		counter++;
+		n &= (n - 1);
+	}
+	return counter;
+}
+
+static int ipu_psys_allocate_one_dfm(const struct device *dev,
+				     struct ipu_fw_psys_process *process,
+				     struct ipu_resource *resource,
+				     struct ipu_fw_generic_program_manifest *pm,
+				     u32 resource_id,
+				     struct ipu_psys_resource_alloc *alloc)
+{
+	u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id];
+	u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id];
+	const u8 is_relocatable = pm->is_dfm_relocatable[resource_id];
+	struct ipu_resource_alloc *alloc_resource;
+	unsigned long p = 0;
+
+	if (dfm_bitmap_req == 0)
+		return 0;
+
+	if (alloc->resources >= IPU_MAX_RESOURCES) {
+		dev_err(dev, "out of resource handles\n");
+		return -ENOSPC;
+	}
+
+	if (!resource->bitmap)
+		return -ENOSPC;
+
+	if (!is_relocatable) {
+		if (*resource->bitmap & dfm_bitmap_req) {
+			dev_warn(dev,
+				 "out of dfm resources, req 0x%x, get 0x%lx\n",
+				 dfm_bitmap_req, *resource->bitmap);
+			return -ENOSPC;
+		}
+		*resource->bitmap |= dfm_bitmap_req;
+	} else {
+		unsigned int n = bit_count(dfm_bitmap_req);
+
+		p = bitmap_find_next_zero_area(resource->bitmap,
+					       resource->elements, 0, n, 0);
+
+		if (p >= resource->elements)
+			return -ENOSPC;
+
+		bitmap_set(resource->bitmap, p, n);
+		dfm_bitmap_req = dfm_bitmap_req << p;
+		active_dfm_bitmap_req = active_dfm_bitmap_req << p;
+	}
+
+	alloc_resource = &alloc->resource_alloc[alloc->resources];
+	alloc_resource->resource = resource;
+	/* Using elements to indicate the bitmap */
+	alloc_resource->elements = dfm_bitmap_req;
+	alloc_resource->pos = p;
+	alloc_resource->type = IPU_RESOURCE_DFM;
+
+	alloc->resources++;
+
+	return 0;
+}
+
+/*
+ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM)
+ * ext_mem_bank_id is detailed type id for  memory (like DMEM0, DMEM1 etc.)
+ */
+static int __alloc_mem_resrc(const struct device *dev,
+			     struct ipu_fw_psys_process *process,
+			     struct ipu_resource *resource,
+			     struct ipu_fw_generic_program_manifest *pm,
+			     u32 ext_mem_type_id, u32 ext_mem_bank_id,
+			     struct ipu_psys_resource_alloc *alloc)
+{
+	const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id];
+	const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id];
+
+	unsigned long retl;
+
+	if (memory_resource_req <= 0)
+		return 0;
+
+	if (alloc->resources >= IPU_MAX_RESOURCES) {
+		dev_err(dev, "out of resource handles\n");
+		return -ENOSPC;
+	}
+	if (memory_offset_req != (u16)(-1))
+		retl = ipu_resource_alloc_with_pos
+		    (resource,
+		     memory_resource_req, memory_offset_req,
+		     &alloc->resource_alloc[alloc->resources],
+		     IPU_RESOURCE_EXT_MEM);
+	else
+		retl = ipu_resource_alloc
+		    (resource, memory_resource_req,
+		     &alloc->resource_alloc[alloc->resources],
+		     IPU_RESOURCE_EXT_MEM);
+	if (IS_ERR_VALUE(retl)) {
+		dev_dbg(dev, "out of memory resources\n");
+		return (int)retl;
+	}
+
+	alloc->resources++;
+
+	return 0;
+}
+
+int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool)
+{
+	unsigned long p;
+	int size, start;
+
+	size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+	start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID;
+
+	if (ipu_ver == IPU_VER_6SE) {
+		size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+		start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID;
+	}
+
+	/* find available cmd queue from ppg0_cmd_id */
+	p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0);
+
+	if (p >= size)
+		return -ENOSPC;
+
+	bitmap_set(pool->cmd_queues, p, 1);
+
+	return p;
+}
+
+void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool,
+				      u8 queue_id)
+{
+	bitmap_clear(pool->cmd_queues, queue_id, 1);
+}
+
+int ipu_psys_try_allocate_resources(struct device *dev,
+				    struct ipu_fw_psys_process_group *pg,
+				    void *pg_manifest,
+				    struct ipu_psys_resource_pool *pool)
+{
+	u32 id, idx;
+	u32 mem_type_id;
+	int ret, i;
+	u16 *process_offset_table;
+	u8 processes;
+	u32 cells = 0;
+	struct ipu_psys_resource_alloc *alloc;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	if (!pg)
+		return -EINVAL;
+	process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset);
+	processes = pg->process_count;
+
+	alloc = kzalloc(sizeof(*alloc), GFP_KERNEL);
+	if (!alloc)
+		return -ENOMEM;
+
+	res_defs = get_res();
+	for (i = 0; i < processes; i++) {
+		u32 cell;
+		struct ipu_fw_psys_process *process =
+			(struct ipu_fw_psys_process *)
+			((char *)pg + process_offset_table[i]);
+		struct ipu_fw_generic_program_manifest pm;
+
+		memset(&pm, 0, sizeof(pm));
+
+		if (!process) {
+			dev_err(dev, "can not get process\n");
+			ret = -ENOENT;
+			goto free_out;
+		}
+
+		ret = ipu_fw_psys_get_program_manifest_by_process
+			(&pm, pg_manifest, process);
+		if (ret < 0) {
+			dev_err(dev, "can not get manifest\n");
+			goto free_out;
+		}
+
+		if (pm.cell_id == res_defs->num_cells &&
+		    pm.cell_type_id == res_defs->num_cells_type) {
+			cell = res_defs->num_cells;
+		} else if ((pm.cell_id != res_defs->num_cells &&
+			    pm.cell_type_id == res_defs->num_cells_type)) {
+			cell = pm.cell_id;
+		} else {
+			/* Find a free cell of desired type */
+			u32 type = pm.cell_type_id;
+
+			for (cell = 0; cell < res_defs->num_cells; cell++)
+				if (res_defs->cells[cell] == type &&
+				    ((pool->cells | cells) & (1 << cell)) == 0)
+					break;
+			if (cell >= res_defs->num_cells) {
+				dev_dbg(dev, "no free cells of right type\n");
+				ret = -ENOSPC;
+				goto free_out;
+			}
+		}
+		if (cell < res_defs->num_cells)
+			cells |= 1 << cell;
+		if (pool->cells & cells) {
+			dev_dbg(dev, "out of cell resources\n");
+			ret = -ENOSPC;
+			goto free_out;
+		}
+
+		if (pm.dev_chn_size) {
+			for (id = 0; id < res_defs->num_dev_channels; id++) {
+				ret = __alloc_one_resrc(dev, process,
+							&pool->dev_channels[id],
+							&pm, id, alloc);
+				if (ret)
+					goto free_out;
+			}
+		}
+
+		if (pm.dfm_port_bitmap) {
+			for (id = 0; id < res_defs->num_dfm_ids; id++) {
+				ret = ipu_psys_allocate_one_dfm
+					(dev, process,
+					 &pool->dfms[id], &pm, id, alloc);
+				if (ret)
+					goto free_out;
+			}
+		}
+
+		if (pm.ext_mem_size) {
+			for (mem_type_id = 0;
+			     mem_type_id < res_defs->num_ext_mem_types;
+			     mem_type_id++) {
+				u32 bank = res_defs->num_ext_mem_ids;
+
+				if (cell != res_defs->num_cells) {
+					idx = res_defs->cell_mem_row * cell +
+						mem_type_id;
+					bank = res_defs->cell_mem[idx];
+				}
+
+				if (bank == res_defs->num_ext_mem_ids)
+					continue;
+
+				ret = __alloc_mem_resrc(dev, process,
+							&pool->ext_memory[bank],
+							&pm, mem_type_id, bank,
+							alloc);
+				if (ret)
+					goto free_out;
+			}
+		}
+	}
+	alloc->cells |= cells;
+	pool->cells |= cells;
+
+	kfree(alloc);
+	return 0;
+
+free_out:
+	dev_dbg(dev, "failed to try_allocate resource\n");
+	kfree(alloc);
+	return ret;
+}
+
+/*
+ * Allocate resources for pg from `pool'. Mark the allocated
+ * resources into `alloc'. Returns 0 on success, -ENOSPC
+ * if there are no enough resources, in which cases resources
+ * are not allocated at all, or some other error on other conditions.
+ */
+int ipu_psys_allocate_resources(const struct device *dev,
+				struct ipu_fw_psys_process_group *pg,
+				void *pg_manifest,
+				struct ipu_psys_resource_alloc
+				*alloc, struct ipu_psys_resource_pool
+				*pool)
+{
+	u32 id;
+	u32 mem_type_id;
+	int ret, i;
+	u16 *process_offset_table;
+	u8 processes;
+	u32 cells = 0;
+	int p, idx;
+	u32 bmp, a_bmp;
+	const struct ipu_fw_resource_definitions *res_defs;
+
+	if (!pg)
+		return -EINVAL;
+
+	res_defs = get_res();
+	process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset);
+	processes = pg->process_count;
+
+	for (i = 0; i < processes; i++) {
+		u32 cell;
+		struct ipu_fw_psys_process *process =
+		    (struct ipu_fw_psys_process *)
+		    ((char *)pg + process_offset_table[i]);
+		struct ipu_fw_generic_program_manifest pm;
+
+		memset(&pm, 0, sizeof(pm));
+		if (!process) {
+			dev_err(dev, "can not get process\n");
+			ret = -ENOENT;
+			goto free_out;
+		}
+
+		ret = ipu_fw_psys_get_program_manifest_by_process
+		    (&pm, pg_manifest, process);
+		if (ret < 0) {
+			dev_err(dev, "can not get manifest\n");
+			goto free_out;
+		}
+
+		if (pm.cell_id == res_defs->num_cells &&
+		    pm.cell_type_id == res_defs->num_cells_type) {
+			cell = res_defs->num_cells;
+		} else if ((pm.cell_id != res_defs->num_cells &&
+			    pm.cell_type_id == res_defs->num_cells_type)) {
+			cell = pm.cell_id;
+		} else {
+			/* Find a free cell of desired type */
+			u32 type = pm.cell_type_id;
+
+			for (cell = 0; cell < res_defs->num_cells; cell++)
+				if (res_defs->cells[cell] == type &&
+				    ((pool->cells | cells) & (1 << cell)) == 0)
+					break;
+			if (cell >= res_defs->num_cells) {
+				dev_dbg(dev, "no free cells of right type\n");
+				ret = -ENOSPC;
+				goto free_out;
+			}
+			ret = ipu_fw_psys_set_process_cell_id(process, 0, cell);
+			if (ret)
+				goto free_out;
+		}
+		if (cell < res_defs->num_cells)
+			cells |= 1 << cell;
+		if (pool->cells & cells) {
+			dev_dbg(dev, "out of cell resources\n");
+			ret = -ENOSPC;
+			goto free_out;
+		}
+
+		if (pm.dev_chn_size) {
+			for (id = 0; id < res_defs->num_dev_channels; id++) {
+				ret = __alloc_one_resrc(dev, process,
+							&pool->dev_channels[id],
+							&pm, id, alloc);
+				if (ret)
+					goto free_out;
+
+				idx = alloc->resources - 1;
+				p = alloc->resource_alloc[idx].pos;
+				ret = ipu_fw_psys_set_proc_dev_chn(process, id,
+								   p);
+				if (ret)
+					goto free_out;
+			}
+		}
+
+		if (pm.dfm_port_bitmap) {
+			for (id = 0; id < res_defs->num_dfm_ids; id++) {
+				ret = ipu_psys_allocate_one_dfm(dev, process,
+								&pool->dfms[id],
+								&pm, id, alloc);
+				if (ret)
+					goto free_out;
+
+				idx = alloc->resources - 1;
+				p = alloc->resource_alloc[idx].pos;
+				bmp = pm.dfm_port_bitmap[id];
+				bmp = bmp << p;
+				a_bmp = pm.dfm_active_port_bitmap[id];
+				a_bmp = a_bmp << p;
+				ret = ipu_fw_psys_set_proc_dfm_bitmap(process,
+								      id, bmp,
+								      a_bmp);
+				if (ret)
+					goto free_out;
+			}
+		}
+
+		if (pm.ext_mem_size) {
+			for (mem_type_id = 0;
+			     mem_type_id < res_defs->num_ext_mem_types;
+			     mem_type_id++) {
+				u32 bank = res_defs->num_ext_mem_ids;
+
+				if (cell != res_defs->num_cells) {
+					idx = res_defs->cell_mem_row * cell +
+						mem_type_id;
+					bank = res_defs->cell_mem[idx];
+				}
+				if (bank == res_defs->num_ext_mem_ids)
+					continue;
+
+				ret = __alloc_mem_resrc(dev, process,
+							&pool->ext_memory[bank],
+							&pm, mem_type_id,
+							bank, alloc);
+				if (ret)
+					goto free_out;
+				/* no return value check here because fw api
+				 * will do some checks, and would return
+				 * non-zero except mem_type_id == 0.
+				 * This maybe caused by that above flow of
+				 * allocating mem_bank_id is improper.
+				 */
+				idx = alloc->resources - 1;
+				p = alloc->resource_alloc[idx].pos;
+				ipu_fw_psys_set_process_ext_mem(process,
+								mem_type_id,
+								bank, p);
+			}
+		}
+	}
+	alloc->cells |= cells;
+	pool->cells |= cells;
+	return 0;
+
+free_out:
+	for (; i >= 0; i--) {
+		struct ipu_fw_psys_process *process =
+		    (struct ipu_fw_psys_process *)
+		    ((char *)pg + process_offset_table[i]);
+		struct ipu_fw_generic_program_manifest pm;
+		int retval;
+
+		if (!process)
+			break;
+
+		retval = ipu_fw_psys_get_program_manifest_by_process
+		    (&pm, pg_manifest, process);
+		if (retval < 0) {
+			dev_err(dev, "can not get manifest\n");
+			break;
+		}
+		if ((pm.cell_id != res_defs->num_cells &&
+		     pm.cell_type_id == res_defs->num_cells_type))
+			continue;
+		/* no return value check here because if finding free cell
+		 * failed, process cell would not set then calling clear_cell
+		 * will return non-zero.
+		 */
+		ipu_fw_psys_clear_process_cell(process);
+	}
+	dev_err(dev, "failed to allocate resources, ret %d\n", ret);
+	ipu_psys_free_resources(alloc, pool);
+	return ret;
+}
+
+int ipu_psys_move_resources(const struct device *dev,
+			    struct ipu_psys_resource_alloc *alloc,
+			    struct ipu_psys_resource_pool
+			    *source_pool, struct ipu_psys_resource_pool
+			    *target_pool)
+{
+	int i;
+
+	if (target_pool->cells & alloc->cells) {
+		dev_dbg(dev, "out of cell resources\n");
+		return -ENOSPC;
+	}
+
+	for (i = 0; i < alloc->resources; i++) {
+		unsigned long bitmap = 0;
+		unsigned int id = alloc->resource_alloc[i].resource->id;
+		unsigned long fbit, end;
+
+		switch (alloc->resource_alloc[i].type) {
+		case IPU_RESOURCE_DEV_CHN:
+			bitmap_set(&bitmap, alloc->resource_alloc[i].pos,
+				   alloc->resource_alloc[i].elements);
+			if (*target_pool->dev_channels[id].bitmap & bitmap)
+				return -ENOSPC;
+			break;
+		case IPU_RESOURCE_EXT_MEM:
+			end = alloc->resource_alloc[i].elements +
+			    alloc->resource_alloc[i].pos;
+
+			fbit = find_next_bit(target_pool->ext_memory[id].bitmap,
+					     end, alloc->resource_alloc[i].pos);
+			/* if find_next_bit returns "end" it didn't find 1bit */
+			if (end != fbit)
+				return -ENOSPC;
+			break;
+		case IPU_RESOURCE_DFM:
+			bitmap = alloc->resource_alloc[i].elements;
+			if (*target_pool->dfms[id].bitmap & bitmap)
+				return -ENOSPC;
+			break;
+		default:
+			dev_err(dev, "Illegal resource type\n");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < alloc->resources; i++) {
+		u32 id = alloc->resource_alloc[i].resource->id;
+
+		switch (alloc->resource_alloc[i].type) {
+		case IPU_RESOURCE_DEV_CHN:
+			bitmap_set(target_pool->dev_channels[id].bitmap,
+				   alloc->resource_alloc[i].pos,
+				   alloc->resource_alloc[i].elements);
+			ipu_resource_free(&alloc->resource_alloc[i]);
+			alloc->resource_alloc[i].resource =
+			    &target_pool->dev_channels[id];
+			break;
+		case IPU_RESOURCE_EXT_MEM:
+			bitmap_set(target_pool->ext_memory[id].bitmap,
+				   alloc->resource_alloc[i].pos,
+				   alloc->resource_alloc[i].elements);
+			ipu_resource_free(&alloc->resource_alloc[i]);
+			alloc->resource_alloc[i].resource =
+			    &target_pool->ext_memory[id];
+			break;
+		case IPU_RESOURCE_DFM:
+			*target_pool->dfms[id].bitmap |=
+			    alloc->resource_alloc[i].elements;
+			*alloc->resource_alloc[i].resource->bitmap &=
+			    ~(alloc->resource_alloc[i].elements);
+			alloc->resource_alloc[i].resource =
+			    &target_pool->dfms[id];
+			break;
+		default:
+			/*
+			 * Just keep compiler happy. This case failed already
+			 * in above loop.
+			 */
+			break;
+		}
+	}
+
+	target_pool->cells |= alloc->cells;
+	source_pool->cells &= ~alloc->cells;
+
+	return 0;
+}
+
+/* Free resources marked in `alloc' from `resources' */
+void ipu_psys_free_resources(struct ipu_psys_resource_alloc
+			     *alloc, struct ipu_psys_resource_pool *pool)
+{
+	unsigned int i;
+
+	pool->cells &= ~alloc->cells;
+	alloc->cells = 0;
+	for (i = 0; i < alloc->resources; i++)
+		ipu_resource_free(&alloc->resource_alloc[i]);
+	alloc->resources = 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-resources.c b/drivers/media/pci/intel/ipu6/ipu6-fw-resources.c
new file mode 100644
index 000000000000..4ac195f2f95d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-resources.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2020 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6-platform-resources.h"
+
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = {
+	IPU6_FW_PSYS_SP_CTRL_TYPE_ID,
+	IPU6_FW_PSYS_VP_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */
+	IPU6_FW_PSYS_GDC_TYPE_ID,
+	IPU6_FW_PSYS_TNR_TYPE_ID,
+};
+
+const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = {
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE,
+};
+
+const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = {
+	IPU6_FW_PSYS_VMEM0_MAX_SIZE,
+	IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE,
+	IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE,
+	IPU6_FW_PSYS_LB_VMEM_MAX_SIZE,
+	IPU6_FW_PSYS_BAMEM0_MAX_SIZE,
+	IPU6_FW_PSYS_DMEM0_MAX_SIZE,
+	IPU6_FW_PSYS_DMEM1_MAX_SIZE,
+	IPU6_FW_PSYS_DMEM2_MAX_SIZE,
+	IPU6_FW_PSYS_DMEM3_MAX_SIZE,
+	IPU6_FW_PSYS_PMEM0_MAX_SIZE
+};
+
+const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = {
+	IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE,
+	IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE,
+};
+
+const u8
+ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = {
+	{
+		/* IPU6_FW_PSYS_SP0_ID */
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_DMEM0_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_SP1_ID */
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_DMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_VP0_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_DMEM3_ID,
+		IPU6_FW_PSYS_VMEM0_ID,
+		IPU6_FW_PSYS_BAMEM0_ID,
+		IPU6_FW_PSYS_PMEM0_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC1_ID BNLM */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC2_ID DM */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC3_ID ACM */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC5_ID OFS pin main */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC6_ID OFS pin display */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC7_ID OFS pin pp */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC9_ID GLTM */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ACC10_ID XNR */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_ICA_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_LSC_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_DPC_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_SIS_A_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_SIS_B_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_B2B_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_AWB_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_AE_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_AF_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_DOL_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_X2B_MD_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_ISA_PAF_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_LB_VMEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_BB_ACC_GDC0_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	},
+	{
+		/* IPU6_FW_PSYS_BB_ACC_TNR_ID */
+		IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+		IPU6_FW_PSYS_N_MEM_ID,
+	}
+};
+
+static const struct ipu_fw_resource_definitions ipu6_defs = {
+	.cells = ipu6_fw_psys_cell_types,
+	.num_cells = IPU6_FW_PSYS_N_CELL_ID,
+	.num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID,
+
+	.dev_channels = ipu6_fw_num_dev_channels,
+	.num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID,
+
+	.num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID,
+	.num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID,
+	.ext_mem_ids = ipu6_fw_psys_mem_size,
+
+	.num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID,
+
+	.dfms = ipu6_fw_psys_dfms,
+
+	.cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID,
+	.cell_mem = &ipu6_fw_psys_c_mem[0][0],
+};
+
+const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs;
+
+/********** Generic resource handling **********/
+
+/*
+ * Extension library gives byte offsets to its internal structures.
+ * use those offsets to update fields. Without extension lib access
+ * structures directly.
+ */
+int ipu6_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index,
+				     u8 value)
+{
+	struct ipu_fw_psys_process_group *parent =
+		(struct ipu_fw_psys_process_group *)((char *)ptr +
+						      ptr->parent_offset);
+
+	ptr->cells[index] = value;
+	parent->resource_bitmap |= 1 << value;
+
+	return 0;
+}
+
+u8 ipu6_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index)
+{
+	return ptr->cells[index];
+}
+
+int ipu6_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr)
+{
+	struct ipu_fw_psys_process_group *parent;
+	u8 cell_id = ipu6_fw_psys_get_process_cell_id(ptr, 0);
+	int retval = -1;
+
+	parent = (struct ipu_fw_psys_process_group *)((char *)ptr +
+						       ptr->parent_offset);
+	if ((1 << cell_id) != 0 &&
+	    ((1 << cell_id) & parent->resource_bitmap)) {
+		ipu6_fw_psys_set_process_cell_id(ptr, 0,
+						 IPU6_FW_PSYS_N_CELL_ID);
+		parent->resource_bitmap &= ~(1 << cell_id);
+		retval = 0;
+	}
+
+	return retval;
+}
+
+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				  u16 value)
+{
+	struct ipu6_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+	pm_ext->dev_chn_offset[offset] = value;
+
+	return 0;
+}
+
+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				     u16 id, u32 bitmap,
+				     u32 active_bitmap)
+{
+	struct ipu6_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+	pm_ext->dfm_port_bitmap[id] = bitmap;
+	pm_ext->dfm_active_port_bitmap[id] = active_bitmap;
+
+	return 0;
+}
+
+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				     u16 type_id, u16 mem_id, u16 offset)
+{
+	struct ipu6_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+	pm_ext->ext_mem_offset[type_id] = offset;
+	pm_ext->ext_mem_id[type_id] = mem_id;
+
+	return 0;
+}
+
+static struct ipu_fw_psys_program_manifest *
+get_program_manifest(const struct ipu_fw_psys_program_group_manifest *manifest,
+		     const unsigned int program_index)
+{
+	struct ipu_fw_psys_program_manifest *prg_manifest_base;
+	u8 *program_manifest = NULL;
+	u8 program_count;
+	unsigned int i;
+
+	program_count = manifest->program_count;
+
+	prg_manifest_base = (struct ipu_fw_psys_program_manifest *)
+		((char *)manifest + manifest->program_manifest_offset);
+	if (program_index < program_count) {
+		program_manifest = (u8 *)prg_manifest_base;
+		for (i = 0; i < program_index; i++)
+			program_manifest +=
+				((struct ipu_fw_psys_program_manifest *)
+				 program_manifest)->size;
+	}
+
+	return (struct ipu_fw_psys_program_manifest *)program_manifest;
+}
+
+int ipu6_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process)
+{
+	u32 program_id = process->program_idx;
+	struct ipu_fw_psys_program_manifest *pm;
+	struct ipu6_fw_psys_program_manifest_ext *pm_ext;
+
+	pm = get_program_manifest(pg_manifest, program_id);
+
+	if (!pm)
+		return -ENOENT;
+
+	if (pm->program_extension_offset) {
+		pm_ext = (struct ipu6_fw_psys_program_manifest_ext *)
+			((u8 *)pm + pm->program_extension_offset);
+
+		gen_pm->dev_chn_size = pm_ext->dev_chn_size;
+		gen_pm->dev_chn_offset = pm_ext->dev_chn_offset;
+		gen_pm->ext_mem_size = pm_ext->ext_mem_size;
+		gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset;
+		gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable;
+		gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap;
+		gen_pm->dfm_active_port_bitmap =
+			pm_ext->dfm_active_port_bitmap;
+	}
+
+	memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells));
+	gen_pm->cell_id = pm->cells[0];
+	gen_pm->cell_type_id = pm->cell_type_id;
+
+	return 0;
+}
+
+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
+			  struct ipu_psys_kcmd *kcmd, const char *note)
+{
+	struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg;
+	u32 pgid = pg->ID;
+	u8 processes = pg->process_count;
+	u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset);
+	unsigned int p, chn, mem, mem_id;
+	int cell;
+
+	dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n",
+		__func__, note, pgid, processes);
+
+	for (p = 0; p < processes; p++) {
+		struct ipu_fw_psys_process *process =
+		    (struct ipu_fw_psys_process *)
+		    ((char *)pg + process_offset_table[p]);
+		struct ipu6_fw_psys_process_ext *pm_ext =
+		    (struct ipu6_fw_psys_process_ext *)((u8 *)process
+		    + process->process_extension_offset);
+		cell = process->cells[0];
+		dev_dbg(&psys->adev->dev, "\t process %i size=%u",
+			p, process->size);
+		if (!process->process_extension_offset)
+			continue;
+
+		for (mem = 0; mem < IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID;
+		    mem++) {
+			mem_id = pm_ext->ext_mem_id[mem];
+			if (mem_id != IPU6_FW_PSYS_N_MEM_ID)
+				dev_dbg(&psys->adev->dev,
+					"\t mem type %u id %d offset=0x%x",
+					mem, mem_id,
+					pm_ext->ext_mem_offset[mem]);
+		}
+		for (chn = 0; chn < IPU6_FW_PSYS_N_DEV_CHN_ID; chn++) {
+			if (pm_ext->dev_chn_offset[chn] != (u16)(-1))
+				dev_dbg(&psys->adev->dev,
+					"\t dev_chn[%u]=0x%x\n",
+					chn, pm_ext->dev_chn_offset[chn]);
+		}
+	}
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
new file mode 100644
index 000000000000..1f917721b70e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <media/ipu-isys.h>
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-phy.h"
+#include "ipu-isys-csi2.h"
+
+struct ipu6_csi2_error {
+	const char *error_string;
+	bool is_info_only;
+};
+
+struct ipu6_csi_irq_info_map {
+	u32 irq_error_mask;
+	u32 irq_num;
+	unsigned int irq_base;
+	unsigned int irq_base_ctrl2;
+	struct ipu6_csi2_error *errors;
+};
+
+/*
+ * Strings corresponding to CSI-2 receiver errors are here.
+ * Corresponding macros are defined in the header file.
+ */
+static struct ipu6_csi2_error dphy_rx_errors[] = {
+	{"Single packet header error corrected", true},
+	{"Multiple packet header errors detected", true},
+	{"Payload checksum (CRC) error", true},
+	{"Transfer FIFO overflow", false},
+	{"Reserved short packet data type detected", true},
+	{"Reserved long packet data type detected", true},
+	{"Incomplete long packet detected", false},
+	{"Frame sync error", false},
+	{"Line sync error", false},
+	{"DPHY recoverable synchronization error", true},
+	{"DPHY fatal error", false},
+	{"DPHY elastic FIFO overflow", false},
+	{"Inter-frame short packet discarded", true},
+	{"Inter-frame long packet discarded", true},
+	{"MIPI pktgen overflow", false},
+	{"MIPI pktgen data loss", false},
+	{"FIFO overflow", false},
+	{"Lane deskew", false},
+	{"SOT sync error", false},
+	{"HSIDLE detected", false}
+};
+
+static refcount_t phy_power_ref_count[IPU_ISYS_CSI_PHY_NUM];
+
+static int ipu6_csi2_phy_power_set(struct ipu_isys *isys,
+				   struct ipu_isys_csi2_config *cfg, bool on)
+{
+	int ret = 0;
+	unsigned int port, phy_id;
+	refcount_t *ref;
+	void __iomem *isys_base = isys->pdata->base;
+	unsigned int nr;
+
+	port = cfg->port;
+	phy_id = port / 4;
+	ref = &phy_power_ref_count[phy_id];
+	dev_dbg(&isys->adev->dev, "for phy %d port %d, lanes: %d\n",
+		phy_id, port, cfg->nlanes);
+
+	nr = (ipu_ver == IPU_VER_6) ? IPU6_ISYS_CSI_PORT_NUM :
+		IPU6SE_ISYS_CSI_PORT_NUM;
+
+	if (!isys_base || port >= nr) {
+		dev_warn(&isys->adev->dev, "invalid port ID %d\n", port);
+		return -EINVAL;
+	}
+
+	if (on) {
+		if (refcount_read(ref)) {
+			/* already up */
+			dev_warn(&isys->adev->dev, "for phy %d is already UP",
+				 phy_id);
+			refcount_inc(ref);
+			return 0;
+		}
+
+		ret = ipu6_isys_phy_powerup_ack(isys, phy_id);
+		if (ret)
+			return ret;
+
+		dev_dbg(&isys->adev->dev, "phy reset assert\n");
+		ipu6_isys_phy_reset(isys, phy_id, 0);
+		dev_dbg(&isys->adev->dev, "phy common init\n");
+		ipu6_isys_phy_common_init(isys);
+
+		dev_dbg(&isys->adev->dev, "phy config\n");
+		ret = ipu6_isys_phy_config(isys);
+		if (ret)
+			return ret;
+
+		dev_dbg(&isys->adev->dev, "phy reset de-assert\n");
+		ipu6_isys_phy_reset(isys, phy_id, 1);
+		dev_dbg(&isys->adev->dev, "phy check ready\n");
+		ret = ipu6_isys_phy_ready(isys, phy_id);
+		if (ret)
+			return ret;
+
+		dev_dbg(&isys->adev->dev, "phy is ready now\n");
+		refcount_set(ref, 1);
+		return 0;
+	}
+
+	/* power off process */
+	if (refcount_dec_and_test(ref))
+		ret = ipu6_isys_phy_powerdown_ack(isys, phy_id);
+	if (ret)
+		dev_err(&isys->adev->dev, "phy poweroff failed!");
+
+	return ret;
+}
+
+static void ipu6_isys_register_errors(struct ipu_isys_csi2 *csi2)
+{
+	u32 mask = 0;
+	u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+			CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+	mask = (ipu_ver == IPU_VER_6) ? IPU6_CSI_RX_ERROR_IRQ_MASK :
+		IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+
+	writel(irq & mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	csi2->receiver_errors |= irq & mask;
+}
+
+void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2)
+{
+	struct ipu6_csi2_error *errors;
+	u32 status;
+	unsigned int i;
+
+	/* Register errors once more in case of error interrupts are disabled */
+	ipu6_isys_register_errors(csi2);
+	status = csi2->receiver_errors;
+	csi2->receiver_errors = 0;
+	errors = dphy_rx_errors;
+
+	for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) {
+		if (status & BIT(i))
+			dev_err_ratelimited(&csi2->isys->adev->dev,
+					    "csi2-%i error: %s\n",
+					    csi2->index,
+					    errors[i].error_string);
+	}
+}
+
+const unsigned int csi2_port_cfg[][3] = {
+	{0, 0, 0x1f}, /* no link */
+	{4, 0, 0x10}, /* x4 + x4 config */
+	{2, 0, 0x12}, /* x2 + x2 config */
+	{1, 0, 0x13}, /* x1 + x1 config */
+	{2, 1, 0x15}, /* x2x1 + x2x1 config */
+	{1, 1, 0x16}, /* x1x1 + x1x1 config */
+	{2, 2, 0x18}, /* x2x2 + x2x2 config */
+	{1, 2, 0x19}, /* x1x2 + x1x2 config */
+};
+
+const unsigned int phy_port_cfg[][4] = {
+	/* port, nlanes, bbindex, portcfg */
+	/* sip0 */
+	{0, 1, 0, 0x15},
+	{0, 2, 0, 0x15},
+	{0, 4, 0, 0x15},
+	{0, 4, 2, 0x22},
+	/* sip1 */
+	{2, 1, 4, 0x15},
+	{2, 2, 4, 0x15},
+	{2, 4, 4, 0x15},
+	{2, 4, 6, 0x22},
+};
+
+static int ipu_isys_csi2_phy_config_by_port(struct ipu_isys *isys,
+					    unsigned int port,
+					    unsigned int nlanes)
+{
+	void __iomem *base = isys->adev->isp->base;
+	u32 val, reg, i;
+	unsigned int bbnum;
+
+	dev_dbg(&isys->adev->dev, "%s port %u with %u lanes", __func__,
+		port, nlanes);
+
+	/* hard code for x2x2 + x2x2 with <1.5Gbps */
+	for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) {
+		/* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+		val = readl(base + reg);
+		val |= 13 << 1;
+		/* val &= ~0x1; */
+		writel(val, base + reg);
+
+		/* cphy_rx_control1.en_crc1 = 1 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i);
+		val = readl(base + reg);
+		val |= 0x1 << 31;
+		writel(val, base + reg);
+
+		/* dphy_cfg.reserved = 1
+		 * dphy_cfg.lden_from_dll_ovrd_0 = 1
+		 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i);
+		val = readl(base + reg);
+		val |= 0x1 << 25;
+		val |= 0x1 << 26;
+		writel(val, base + reg);
+
+		/* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+		val = readl(base + reg);
+		val |= 1;
+		writel(val, base + reg);
+	}
+
+	/* bb afe config, use minimal channel loss */
+	for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) {
+		if (phy_port_cfg[i][0] == port &&
+		    phy_port_cfg[i][1] == nlanes) {
+			bbnum = phy_port_cfg[i][2] / 2;
+			reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum);
+			val = readl(base + reg);
+			val |= phy_port_cfg[i][3];
+			writel(val, base + reg);
+		}
+	}
+
+	return 0;
+}
+
+static void ipu_isys_csi2_rx_control(struct ipu_isys *isys)
+{
+	void __iomem *base = isys->adev->isp->base;
+	u32 val, reg;
+
+	/* lp11 release */
+	reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL;
+	val = readl(base + reg);
+	val |= 0x1;
+	writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL;
+	val = readl(base + reg);
+	val |= 0x1;
+	writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL;
+	val = readl(base + reg);
+	val |= 0x1;
+	writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL;
+	val = readl(base + reg);
+	val |= 0x1;
+	writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL);
+}
+
+static int ipu_isys_csi2_set_port_cfg(struct v4l2_subdev *sd, unsigned int port,
+				      unsigned int nlanes)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+	struct ipu_isys *isys = csi2->isys;
+	unsigned int sip = port / 2;
+	unsigned int index;
+
+	switch (nlanes) {
+	case 1:
+		index = 5;
+		break;
+	case 2:
+		index = 6;
+		break;
+	case 4:
+		index = 1;
+		break;
+	default:
+		dev_err(&isys->adev->dev, "lanes nr %u is unsupported\n",
+			nlanes);
+		return -EINVAL;
+	}
+
+	dev_dbg(&isys->adev->dev, "port config for port %u with %u lanes\n",
+		port, nlanes);
+	writel(csi2_port_cfg[index][2],
+	       isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip));
+
+	return 0;
+}
+
+static void ipu_isys_csi2_set_timing(struct v4l2_subdev *sd,
+				     struct ipu_isys_csi2_timing timing,
+				     unsigned int port,
+				     unsigned int nlanes)
+{
+	u32 port_base;
+	void __iomem *reg;
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+	struct ipu_isys *isys = csi2->isys;
+	unsigned int i;
+
+	port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) :
+		CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port);
+
+	dev_dbg(&isys->adev->dev,
+		"set timing for port %u base 0x%x with %u lanes\n",
+		port, port_base, nlanes);
+
+	reg = isys->pdata->base + port_base;
+	reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE;
+
+	writel(timing.ctermen, reg);
+
+	reg = isys->pdata->base + port_base;
+	reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE;
+	writel(timing.csettle, reg);
+
+	for (i = 0; i < nlanes; i++) {
+		reg = isys->pdata->base + port_base;
+		reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i);
+		writel(timing.dtermen, reg);
+
+		reg = isys->pdata->base + port_base;
+		reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i);
+		writel(timing.dsettle, reg);
+	}
+}
+
+int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd,
+			     struct ipu_isys_csi2_timing timing,
+			     unsigned int nlanes, int enable)
+{
+	struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+	struct ipu_isys *isys = csi2->isys;
+	struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+						    struct ipu_isys_pipeline,
+						    pipe);
+	struct ipu_isys_csi2_config *cfg =
+		v4l2_get_subdev_hostdata(media_entity_to_v4l2_subdev
+					 (ip->external->entity));
+	unsigned int port;
+	int ret;
+	u32 mask = 0;
+
+	port = cfg->port;
+	dev_dbg(&isys->adev->dev, "for port %u\n", port);
+
+	mask = (ipu_ver == IPU_VER_6) ? IPU6_CSI_RX_ERROR_IRQ_MASK :
+		IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+
+	if (!enable) {
+		ipu_isys_csi2_error(csi2);
+		writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE);
+		writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+
+		/* Disable interrupts */
+		writel(0,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+		       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+		writel(mask,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+		       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+		writel(0,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+		writel(0xffffffff,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+		/* Disable clock */
+		writel(0, isys->pdata->base +
+		       CSI_REG_HUB_FW_ACCESS_PORT(port));
+		writel(0, isys->pdata->base +
+		       CSI_REG_HUB_DRV_ACCESS_PORT(port));
+
+		if (ipu_ver == IPU_VER_6SE)
+			return 0;
+
+		/* power down */
+		return ipu6_csi2_phy_power_set(isys, cfg, false);
+	}
+
+	if (ipu_ver == IPU_VER_6) {
+		/* Enable DPHY power */
+		ret = ipu6_csi2_phy_power_set(isys, cfg, true);
+		if (ret) {
+			dev_err(&isys->adev->dev,
+				"CSI-%d PHY power up failed %d\n",
+				cfg->port, ret);
+			return ret;
+		}
+	}
+
+	/* reset port reset */
+	dev_dbg(&isys->adev->dev, "csi port reset assert\n");
+	writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST);
+	usleep_range(100, 200);
+	dev_dbg(&isys->adev->dev, "csi port reset de-assert\n");
+	writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST);
+
+	/* Enable port clock */
+	writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(port));
+	writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT(port));
+
+	if (ipu_ver == IPU_VER_6SE) {
+		ipu_isys_csi2_phy_config_by_port(isys, port, nlanes);
+
+		/* 9'b00010.1000 for 400Mhz isys freqency */
+		writel(0x28,
+		       isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR);
+		/* set port cfg and rx timing */
+		ipu_isys_csi2_set_timing(sd, timing, port, nlanes);
+
+		ret = ipu_isys_csi2_set_port_cfg(sd, port, nlanes);
+		if (ret)
+			return ret;
+
+		ipu_isys_csi2_rx_control(isys);
+	}
+
+	/* enable all error related irq */
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+	/* To save CPU wakeups, disable CSI SOF/EOF irq */
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+	writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+	/* Configure FE/PPI2CSI and enable FE/ PPI2CSI */
+	writel(0, csi2->base + CSI_REG_CSI_FE_MODE);
+	writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL);
+	writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID,
+	       csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL);
+	writel(((nlanes - 1) <<
+		PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT) |
+	       (0 << PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT),
+	       csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF);
+	writel(0x06, csi2->base + CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE);
+	writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+	writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE);
+
+	if (ipu_ver == IPU_VER_6SE)
+		return 0;
+
+	ipu6_isys_phy_ppi_tinit_done(isys, cfg);
+
+	return 0;
+}
+
+void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2)
+{
+	u32 status;
+
+	ipu6_isys_register_errors(csi2);
+
+	status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+	writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+	if (status & IPU_CSI_RX_IRQ_FS_VC)
+		ipu_isys_csi2_sof_event(csi2);
+	if (status & IPU_CSI_RX_IRQ_FE_VC)
+		ipu_isys_csi2_eof_event(csi2);
+}
+
+unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip,
+					     unsigned int *timestamp)
+{
+	struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+	struct ipu_isys *isys = av->isys;
+	unsigned int field = V4L2_FIELD_TOP;
+
+	struct ipu_isys_buffer *short_packet_ib =
+		list_last_entry(&ip->short_packet_active,
+				struct ipu_isys_buffer, head);
+	struct ipu_isys_private_buffer *pb =
+		ipu_isys_buffer_to_private_buffer(short_packet_ib);
+	struct ipu_isys_mipi_packet_header *ph =
+		(struct ipu_isys_mipi_packet_header *)
+		pb->buffer;
+
+	/* Check if the first SOF packet is received. */
+	if ((ph->dtype & IPU_ISYS_SHORT_PACKET_DTYPE_MASK) != 0)
+		dev_warn(&isys->adev->dev, "First short packet is not SOF.\n");
+	field = (ph->word_count % 2) ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+	dev_dbg(&isys->adev->dev,
+		"Interlaced field ready. frame_num = %d field = %d\n",
+		ph->word_count, field);
+
+	return field;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
new file mode 100644
index 000000000000..9db3ef6f8869
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU6_ISYS_CSI2_H
+#define IPU6_ISYS_CSI2_H
+
+struct ipu_isys_csi2_timing;
+struct ipu_isys_csi2;
+struct ipu_isys_pipeline;
+struct v4l2_subdev;
+
+#define IPU_ISYS_SHORT_PACKET_DTYPE_MASK	0x3f
+
+#endif /* IPU6_ISYS_CSI2_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c b/drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c
new file mode 100644
index 000000000000..227a5ba9ed03
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu-isys.h"
+#include "ipu-platform-regs.h"
+
+#define IPU_ISYS_GPC_NUM		16
+
+#ifndef CONFIG_PM
+#define pm_runtime_get_sync(d)		0
+#define pm_runtime_put(d)		0
+#endif
+
+struct ipu_isys_gpc {
+	bool enable;
+	unsigned int route;
+	unsigned int source;
+	unsigned int sense;
+	unsigned int gpcindex;
+	void *prit;
+};
+
+struct ipu_isys_gpcs {
+	bool gpc_enable;
+	struct ipu_isys_gpc gpc[IPU_ISYS_GPC_NUM];
+	void *prit;
+};
+
+static int ipu6_isys_gpc_global_enable_get(void *data, u64 *val)
+{
+	struct ipu_isys_gpcs *isys_gpcs = data;
+	struct ipu_isys *isys = isys_gpcs->prit;
+
+	mutex_lock(&isys->mutex);
+
+	*val = isys_gpcs->gpc_enable;
+
+	mutex_unlock(&isys->mutex);
+	return 0;
+}
+
+static int ipu6_isys_gpc_global_enable_set(void *data, u64 val)
+{
+	struct ipu_isys_gpcs *isys_gpcs = data;
+	struct ipu_isys *isys = isys_gpcs->prit;
+	void __iomem *base;
+	int i, ret;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	if (!isys || !isys->pdata || !isys->pdata->base)
+		return -EINVAL;
+
+	mutex_lock(&isys->mutex);
+
+	base = isys->pdata->base + IPU_ISYS_GPC_BASE;
+
+	ret = pm_runtime_get_sync(&isys->adev->dev);
+	if (ret < 0) {
+		pm_runtime_put(&isys->adev->dev);
+		mutex_unlock(&isys->mutex);
+		return ret;
+	}
+
+	if (!val) {
+		writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+		writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+		writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET);
+		isys_gpcs->gpc_enable = false;
+		for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+			isys_gpcs->gpc[i].enable = 0;
+			isys_gpcs->gpc[i].sense = 0;
+			isys_gpcs->gpc[i].route = 0;
+			isys_gpcs->gpc[i].source = 0;
+		}
+		pm_runtime_mark_last_busy(&isys->adev->dev);
+		pm_runtime_put_autosuspend(&isys->adev->dev);
+	} else {
+		/*
+		 * Set gpc reg and start all gpc here.
+		 * RST free running local timer.
+		 */
+		writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+		writel(0x1, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+
+		for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+			/* Enable */
+			writel(isys_gpcs->gpc[i].enable,
+			       base + IPU_ISF_CDC_MMU_GPC_ENABLE0 + 4 * i);
+			/* Setting (route/source/sense) */
+			writel((isys_gpcs->gpc[i].sense
+					<< IPU_GPC_SENSE_OFFSET)
+				+ (isys_gpcs->gpc[i].route
+					<< IPU_GPC_ROUTE_OFFSET)
+				+ (isys_gpcs->gpc[i].source
+					<< IPU_GPC_SOURCE_OFFSET),
+				base + IPU_ISF_CDC_MMU_GPC_CNT_SEL0 + 4 * i);
+		}
+
+		/* Soft reset and Overall Enable. */
+		writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+		writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET);
+		writel(0x1, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+
+		isys_gpcs->gpc_enable = true;
+	}
+
+	mutex_unlock(&isys->mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_globe_enable_fops,
+			ipu6_isys_gpc_global_enable_get,
+			ipu6_isys_gpc_global_enable_set, "%llu\n");
+
+static int ipu6_isys_gpc_count_get(void *data, u64 *val)
+{
+	struct ipu_isys_gpc *isys_gpc = data;
+	struct ipu_isys *isys = isys_gpc->prit;
+	void __iomem *base;
+
+	if (!isys || !isys->pdata || !isys->pdata->base)
+		return -EINVAL;
+
+	spin_lock(&isys->power_lock);
+	if (isys->power) {
+		base = isys->pdata->base + IPU_ISYS_GPC_BASE;
+		*val = readl(base + IPU_ISF_CDC_MMU_GPC_VALUE0
+				 + 4 * isys_gpc->gpcindex);
+	} else {
+		*val = 0;
+	}
+	spin_unlock(&isys->power_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_count_fops, ipu6_isys_gpc_count_get,
+			NULL, "%llu\n");
+
+int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys)
+{
+	struct dentry *gpcdir;
+	struct dentry *dir;
+	struct dentry *file;
+	int i;
+	char gpcname[10];
+	struct ipu_isys_gpcs *isys_gpcs;
+
+	isys_gpcs = devm_kzalloc(&isys->adev->dev, sizeof(*isys_gpcs),
+				 GFP_KERNEL);
+	if (!isys_gpcs)
+		return -ENOMEM;
+
+	gpcdir = debugfs_create_dir("gpcs", isys->debugfsdir);
+	if (IS_ERR(gpcdir))
+		return -ENOMEM;
+
+	isys_gpcs->prit = isys;
+	file = debugfs_create_file("enable", 0600, gpcdir, isys_gpcs,
+				   &isys_gpc_globe_enable_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+		sprintf(gpcname, "gpc%d", i);
+		dir = debugfs_create_dir(gpcname, gpcdir);
+		if (IS_ERR(dir))
+			goto err;
+
+		file = debugfs_create_bool("enable", 0600, dir,
+					   &isys_gpcs->gpc[i].enable);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("source", 0600, dir,
+					  &isys_gpcs->gpc[i].source);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("route", 0600, dir,
+					  &isys_gpcs->gpc[i].route);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("sense", 0600, dir,
+					  &isys_gpcs->gpc[i].sense);
+		if (IS_ERR(file))
+			goto err;
+
+		isys_gpcs->gpc[i].gpcindex = i;
+		isys_gpcs->gpc[i].prit = isys;
+		file = debugfs_create_file("count", 0400, dir,
+					   &isys_gpcs->gpc[i],
+					   &isys_gpc_count_fops);
+		if (IS_ERR(file))
+			goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(gpcdir);
+	return -ENOMEM;
+}
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-phy.c
new file mode 100644
index 000000000000..6f4561ebb89f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-phy.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <media/ipu-isys.h>
+#include <media/v4l2-device.h>
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-phy.h"
+
+#define LOOP (2000)
+
+#define PHY_REG_INIT_CTL	     0x00000694
+#define PHY_REG_INIT_CTL_PORT_OFFSET 0x00000600
+
+struct phy_reg {
+	u32 reg;
+	u32 val;
+};
+
+struct phy_reg common_init_regs[] = {
+	/* for TGL-U, use 0x80000000 */
+	{0x00000040, 0x80000000},
+	{0x00000044, 0x00a80880},
+	{0x00000044, 0x00b80880},
+	{0x00000010, 0x0000078c},
+	{0x00000344, 0x2f4401e2},
+	{0x00000544, 0x924401e2},
+	{0x00000744, 0x594401e2},
+	{0x00000944, 0x624401e2},
+	{0x00000b44, 0xfc4401e2},
+	{0x00000d44, 0xc54401e2},
+	{0x00000f44, 0x034401e2},
+	{0x00001144, 0x8f4401e2},
+	{0x00001344, 0x754401e2},
+	{0x00001544, 0xe94401e2},
+	{0x00001744, 0xcb4401e2},
+	{0x00001944, 0xfa4401e2}
+};
+
+struct phy_reg x1_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78ea},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xfe6030fa},
+	{0x00000480, 0x29ef5ed0},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x1_port1_config_regs[] = {
+	{0x00000c94, 0xc80060fa},
+	{0x00000c80, 0xcf47abea},
+	{0x00000c90, 0x10a0840b},
+	{0x00000ca8, 0xdf04010a},
+	{0x00000d00, 0x57050060},
+	{0x00000d10, 0x0030001c},
+	{0x00000d38, 0x5f004444},
+	{0x00000d3c, 0x78464204},
+	{0x00000d48, 0x7821f940},
+	{0x00000d4c, 0xb2000433},
+	{0x00000a94, 0xc91030fa},
+	{0x00000a80, 0x5a166ed0},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x5d060100},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x1_port2_config_regs[] = {
+	{0x00001294, 0x28f000fa},
+	{0x00001280, 0x08130cea},
+	{0x00001290, 0x10a0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0x2d20b0fa},
+	{0x00001080, 0xade75dd0},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x1_port3_config_regs[] = {
+	{0x00001894, 0xc80060fa},
+	{0x00001880, 0x0f90fd6a},
+	{0x00001890, 0x10a0840b},
+	{0x000018a8, 0xdf04010a},
+	{0x00001900, 0x57050060},
+	{0x00001910, 0x0030001c},
+	{0x00001938, 0x5f004444},
+	{0x0000193c, 0x78464204},
+	{0x00001948, 0x7821f940},
+	{0x0000194c, 0xb2000433},
+	{0x00001694, 0x3050d0fa},
+	{0x00001680, 0x0ef6d050},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0xe301010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x2_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78ea},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xc80060fa},
+	{0x00000480, 0x29ef5ed8},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000000},
+	{0x00000294, 0xc80060fa},
+	{0x00000280, 0xcb45b950},
+	{0x00000290, 0x10a0540b},
+	{0x000002a8, 0x8c01010a},
+	{0x00000300, 0xef053460},
+	{0x00000310, 0x8030101c},
+	{0x00000338, 0x41808444},
+	{0x0000033c, 0x32422204},
+	{0x00000340, 0x0180088c},
+	{0x00000374, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x2_port1_config_regs[] = {
+	{0x00000c94, 0xc80060fa},
+	{0x00000c80, 0xcf47abea},
+	{0x00000c90, 0x10a0840b},
+	{0x00000ca8, 0xdf04010a},
+	{0x00000d00, 0x57050060},
+	{0x00000d10, 0x0030001c},
+	{0x00000d38, 0x5f004444},
+	{0x00000d3c, 0x78464204},
+	{0x00000d48, 0x7821f940},
+	{0x00000d4c, 0xb2000433},
+	{0x00000a94, 0xc80060fa},
+	{0x00000a80, 0x5a166ed8},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x7a01010a},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000000},
+	{0x00000894, 0xc80060fa},
+	{0x00000880, 0x4d4f21d0},
+	{0x00000890, 0x10a0540b},
+	{0x000008a8, 0x5601010a},
+	{0x00000900, 0xef053460},
+	{0x00000910, 0x8030101c},
+	{0x00000938, 0xdf808444},
+	{0x0000093c, 0xc8422204},
+	{0x00000940, 0x0180088c},
+	{0x00000974, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x2_port2_config_regs[] = {
+	{0x00001294, 0xc80060fa},
+	{0x00001280, 0x08130cea},
+	{0x00001290, 0x10a0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0xc80060fa},
+	{0x00001080, 0xade75dd8},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000000},
+	{0x00000e94, 0xc80060fa},
+	{0x00000e80, 0x0fbf16d0},
+	{0x00000e90, 0x10a0540b},
+	{0x00000ea8, 0x7a01010a},
+	{0x00000f00, 0xf5053460},
+	{0x00000f10, 0xc030101c},
+	{0x00000f38, 0xdf808444},
+	{0x00000f3c, 0xc8422204},
+	{0x00000f40, 0x8180088c},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x2_port3_config_regs[] = {
+	{0x00001894, 0xc80060fa},
+	{0x00001880, 0x0f90fd6a},
+	{0x00001890, 0x10a0840b},
+	{0x000018a8, 0xdf04010a},
+	{0x00001900, 0x57050060},
+	{0x00001910, 0x0030001c},
+	{0x00001938, 0x5f004444},
+	{0x0000193c, 0x78464204},
+	{0x00001948, 0x7821f940},
+	{0x0000194c, 0xb2000433},
+	{0x00001694, 0xc80060fa},
+	{0x00001680, 0x0ef6d058},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0x7a01010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000000},
+	{0x00001494, 0xc80060fa},
+	{0x00001480, 0xf9d34bd0},
+	{0x00001490, 0x10a0540b},
+	{0x000014a8, 0x7a01010a},
+	{0x00001500, 0x1b053460},
+	{0x00001510, 0x0030101c},
+	{0x00001538, 0xdf808444},
+	{0x0000153c, 0xc8422204},
+	{0x00001540, 0x8180088c},
+	{0x00001574, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x4_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78fa},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xfe6030fa},
+	{0x00000480, 0x29ef5ed8},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000004},
+	{0x00000294, 0x23e030fa},
+	{0x00000280, 0xcb45b950},
+	{0x00000290, 0x10a0540b},
+	{0x000002a8, 0x8c01010a},
+	{0x00000300, 0xef053460},
+	{0x00000310, 0x8030101c},
+	{0x00000338, 0x41808444},
+	{0x0000033c, 0x32422204},
+	{0x00000340, 0x0180088c},
+	{0x00000374, 0x00000004},
+	{0x00000894, 0x5620b0fa},
+	{0x00000880, 0x4d4f21dc},
+	{0x00000890, 0x10a0540b},
+	{0x000008a8, 0x5601010a},
+	{0x00000900, 0xef053460},
+	{0x00000910, 0x8030101c},
+	{0x00000938, 0xdf808444},
+	{0x0000093c, 0xc8422204},
+	{0x00000940, 0x0180088c},
+	{0x00000974, 0x00000004},
+	{0x00000a94, 0xc91030fa},
+	{0x00000a80, 0x5a166ecc},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x5d01010a},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000004},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x4_port1_config_regs[] = {
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x4_port2_config_regs[] = {
+	{0x00001294, 0x28f000fa},
+	{0x00001280, 0x08130cfa},
+	{0x00001290, 0x10c0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0x2d20b0fa},
+	{0x00001080, 0xade75dd8},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000004},
+	{0x00000e94, 0xd308d0fa},
+	{0x00000e80, 0x0fbf16d0},
+	{0x00000e90, 0x10a0540b},
+	{0x00000ea8, 0x2c01010a},
+	{0x00000f00, 0xf5053460},
+	{0x00000f10, 0xc030101c},
+	{0x00000f38, 0xdf808444},
+	{0x00000f3c, 0xc8422204},
+	{0x00000f40, 0x8180088c},
+	{0x00000f74, 0x00000004},
+	{0x00001494, 0x136850fa},
+	{0x00001480, 0xf9d34bdc},
+	{0x00001490, 0x10a0540b},
+	{0x000014a8, 0x5a01010a},
+	{0x00001500, 0x1b053460},
+	{0x00001510, 0x0030101c},
+	{0x00001538, 0xdf808444},
+	{0x0000153c, 0xc8422204},
+	{0x00001540, 0x8180088c},
+	{0x00001574, 0x00000004},
+	{0x00001694, 0x3050d0fa},
+	{0x00001680, 0x0ef6d04c},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0xe301010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000004},
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg x4_port3_config_regs[] = {
+	{0x00000000, 0x00000000}
+};
+
+struct phy_reg *x1_config_regs[4] = {
+	x1_port0_config_regs,
+	x1_port1_config_regs,
+	x1_port2_config_regs,
+	x1_port3_config_regs
+};
+
+struct phy_reg *x2_config_regs[4] = {
+	x2_port0_config_regs,
+	x2_port1_config_regs,
+	x2_port2_config_regs,
+	x2_port3_config_regs
+};
+
+struct phy_reg *x4_config_regs[4] = {
+	x4_port0_config_regs,
+	x4_port1_config_regs,
+	x4_port2_config_regs,
+	x4_port3_config_regs
+};
+
+struct phy_reg **config_regs[3] = {
+	x1_config_regs,
+	x2_config_regs,
+	x4_config_regs,
+};
+
+int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id)
+{
+	unsigned int i;
+	u32 val;
+	void __iomem *isys_base = isys->pdata->base;
+
+	val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+	val |= CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN;
+	writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+
+	for (i = 0; i < LOOP; i++) {
+		if (readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id)) &
+		    CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK)
+			return 0;
+		usleep_range(100, 200);
+	}
+
+	dev_warn(&isys->adev->dev, "PHY%d powerup ack timeout", phy_id);
+
+	return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id)
+{
+	unsigned int i;
+	u32 val;
+	void __iomem *isys_base = isys->pdata->base;
+
+	writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+	for (i = 0; i < LOOP; i++) {
+		usleep_range(10, 20);
+		val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id));
+		if (!(val & CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK))
+			return 0;
+	}
+
+	dev_warn(&isys->adev->dev, "PHY %d poweroff ack timeout.\n", phy_id);
+
+	return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id,
+			bool assert)
+{
+	void __iomem *isys_base = isys->pdata->base;
+	u32 val;
+
+	val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+	if (assert)
+		val |= CSI_REG_HUB_GPREG_PHY_CONTROL_RESET;
+	else
+		val &= ~(CSI_REG_HUB_GPREG_PHY_CONTROL_RESET);
+
+	writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+
+	return 0;
+}
+
+int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id)
+{
+	unsigned int i;
+	u32 val;
+	void __iomem *isys_base = isys->pdata->base;
+
+	for (i = 0; i < LOOP; i++) {
+		val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id));
+		dev_dbg(&isys->adev->dev, "PHY%d ready status 0x%x\n",
+			phy_id, val);
+		if (val & CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY)
+			return 0;
+		usleep_range(10, 20);
+	}
+
+	dev_warn(&isys->adev->dev, "PHY%d ready timeout\n", phy_id);
+
+	return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_ppi_tinit_done(struct ipu_isys *isys,
+				 struct ipu_isys_csi2_config *cfg)
+{
+	unsigned int i, j;
+	unsigned int port, phy_id;
+	u32 val;
+	void __iomem *phy_base;
+	struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+
+	port = cfg->port;
+	phy_id = port / 4;
+	phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id);
+	for (i = 0; i < 11; i++) {
+		/* ignore 5 as it is not needed */
+		if (i == 5)
+			continue;
+		for (j = 0; j < LOOP; j++) {
+			val = readl(phy_base + IPU6_ISYS_PHY_DBBS_UDLN(i) +
+				    PHY_DBBUDLN_PPI_STATUS);
+			if (val & PHY_DBBUDLN_TINIT_DONE) {
+				j = 0xffff;
+				continue;
+			}
+			usleep_range(10, 20);
+		}
+		if (j == LOOP)
+			dev_dbg(&isys->adev->dev,
+				"phy %d ppi %d tinit NOT done!",
+				phy_id, i);
+	}
+
+	return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_common_init(struct ipu_isys *isys)
+{
+	unsigned int phy_id;
+	void __iomem *phy_base;
+	struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+	struct v4l2_async_subdev *asd;
+	struct sensor_async_subdev *s_asd;
+	unsigned int i;
+
+	list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) {
+		s_asd = container_of(asd, struct sensor_async_subdev, asd);
+		phy_id = s_asd->csi2.port / 4;
+		phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id);
+
+		for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) {
+			writel(common_init_regs[i].val,
+				phy_base + common_init_regs[i].reg);
+		}
+	}
+
+	return 0;
+}
+
+static int ipu6_isys_driver_port_to_phy_port(struct ipu_isys_csi2_config *cfg)
+{
+	int phy_port;
+	int ret;
+
+	if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1))
+		return -EINVAL;
+
+	/* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */
+	/* normalize driver port number */
+	phy_port = cfg->port % 4;
+
+	/* swap port number only for A and B */
+	if (phy_port == 0)
+		phy_port = 1;
+	else if (phy_port == 1)
+		phy_port = 0;
+
+	ret = phy_port;
+
+	/* check validity per lane configuration */
+	if ((cfg->nlanes == 4) &&
+		 !(phy_port == 0 || phy_port == 2))
+		ret = -EINVAL;
+	else if ((cfg->nlanes == 2 || cfg->nlanes == 1) &&
+		 !(phy_port >= 0 && phy_port <= 3))
+		ret = -EINVAL;
+
+	return ret;
+}
+
+int ipu6_isys_phy_config(struct ipu_isys *isys)
+{
+	unsigned int phy_port, phy_id;
+	void __iomem *phy_base;
+	struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+	struct phy_reg **phy_config_regs;
+	struct v4l2_async_subdev *asd;
+	struct sensor_async_subdev *s_asd;
+	struct ipu_isys_csi2_config cfg;
+	int i;
+
+	list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) {
+		s_asd = container_of(asd, struct sensor_async_subdev, asd);
+		cfg.port = s_asd->csi2.port;
+		cfg.nlanes = s_asd->csi2.nlanes;
+		phy_port = ipu6_isys_driver_port_to_phy_port(&cfg);
+		if (phy_port < 0) {
+			dev_err(&isys->adev->dev, "invalid port %d for lane %d",
+				cfg.port, cfg.nlanes);
+			return -ENXIO;
+		}
+
+		phy_id = cfg.port / 4;
+		phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id);
+		dev_dbg(&isys->adev->dev, "port%d PHY%u lanes %u\n",
+			cfg.port, phy_id, cfg.nlanes);
+
+		phy_config_regs = config_regs[cfg.nlanes/2];
+		cfg.port = phy_port;
+		for (i = 0; phy_config_regs[cfg.port][i].reg; i++) {
+			writel(phy_config_regs[cfg.port][i].val,
+				phy_base + phy_config_regs[cfg.port][i].reg);
+		}
+	}
+
+	return 0;
+}
+
+void __maybe_unused ipu6_isys_phy_dump_status(struct ipu_isys *isys,
+					      struct ipu_isys_csi2_config *cfg)
+{
+	unsigned int port, phy_id, nlanes;
+	struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+	struct ipu_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+	void __iomem *cbbs1_base;
+
+	port = cfg->port;
+	phy_id = port / 4;
+	nlanes = cfg->nlanes;
+	cbbs1_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id) + PHY_CBBS1_BASE;
+
+	dev_dbg(&isys->adev->dev, "phy rcomp_status 0x%08x, cbb_status 0x%08x",
+		readl(cbbs1_base + PHY_CBBS1_RCOMP_STATUS_REG_1),
+		readl(cbbs1_base + PHY_CBBS1_CBB_STATUS_REG));
+
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-phy.h b/drivers/media/pci/intel/ipu6/ipu6-isys-phy.h
new file mode 100644
index 000000000000..e8c063fcf452
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-phy.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2020 Intel Corporation
+ */
+
+#ifndef IPU6_ISYS_PHY_H
+#define IPU6_ISYS_PHY_H
+
+/* bridge to phy in buttress reg map, each phy has 16 kbytes
+ * for tgl u/y, only 2 phys
+ */
+#define IPU6_ISYS_PHY_0_BASE			0x10000
+#define IPU6_ISYS_PHY_1_BASE			0x14000
+#define IPU6_ISYS_PHY_2_BASE			0x18000
+#define IPU6_ISYS_PHY_BASE(i)			(0x10000 + (i) * 0x4000)
+
+/* ppi mapping per phy :
+ *
+ * x4x4:
+ * port0 - PPI range {0, 1, 2, 3, 4}
+ * port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x4x2:
+ * port0 - PPI range {0, 1, 2, 3, 4}
+ * port2 - PPI range {6, 7, 8}
+ *
+ * x2x4:
+ * port0 - PPI range {0, 1, 2}
+ * port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x2x2:
+ * port0 - PPI range {0, 1, 2}
+ * port1 - PPI range {3, 4, 5}
+ * port2 - PPI range {6, 7, 8}
+ * port3 - PPI range {9, 10, 11}
+ */
+
+/* cbbs config regs */
+#define PHY_CBBS1_BASE				0x0
+/* register offset */
+#define PHY_CBBS1_DFX_VMISCCTL			0x0
+#define PHY_CBBS1_DFX_VBYTESEL0			0x4
+#define PHY_CBBS1_DFX_VBYTESEL1			0x8
+#define PHY_CBBS1_VISA2OBS_CTRL_REG		0xc
+#define PHY_CBBS1_PGL_CTRL_REG			0x10
+#define PHY_CBBS1_RCOMP_CTRL_REG_1		0x14
+#define PHY_CBBS1_RCOMP_CTRL_REG_2		0x18
+#define PHY_CBBS1_RCOMP_CTRL_REG_3		0x1c
+#define PHY_CBBS1_RCOMP_CTRL_REG_4		0x20
+#define PHY_CBBS1_RCOMP_CTRL_REG_5		0x24
+#define PHY_CBBS1_RCOMP_STATUS_REG_1		0x28
+#define PHY_CBBS1_RCOMP_STATUS_REG_2		0x2c
+#define PHY_CBBS1_CLOCK_CTRL_REG		0x30
+#define PHY_CBBS1_CBB_ISOLATION_REG		0x34
+#define PHY_CBBS1_CBB_PLL_CONTROL		0x38
+#define PHY_CBBS1_CBB_STATUS_REG		0x3c
+#define PHY_CBBS1_AFE_CONTROL_REG_1		0x40
+#define PHY_CBBS1_AFE_CONTROL_REG_2		0x44
+#define PHY_CBBS1_CBB_SPARE			0x48
+#define PHY_CBBS1_CRI_CLK_CONTROL		0x4c
+
+/* dbbs shared, i = [0..11] */
+#define PHY_DBBS_SHARED(ppi)			((ppi) * 0x200 + 0x200)
+/* register offset */
+#define PHY_DBBDFE_DFX_V1MISCCTL		0x0
+#define PHY_DBBDFE_DFX_V1BYTESEL0		0x4
+#define PHY_DBBDFE_DFX_V1BYTESEL1		0x8
+#define PHY_DBBDFE_DFX_V2MISCCTL		0xc
+#define PHY_DBBDFE_DFX_V2BYTESEL0		0x10
+#define PHY_DBBDFE_DFX_V2BYTESEL1		0x14
+#define PHY_DBBDFE_GBLCTL			0x18
+#define PHY_DBBDFE_GBL_STATUS			0x1c
+
+/* dbbs udln, i = [0..11] */
+#define IPU6_ISYS_PHY_DBBS_UDLN(ppi)		((ppi) * 0x200 + 0x280)
+/* register offset */
+#define PHY_DBBUDLN_CTL				0x0
+#define PHY_DBBUDLN_CLK_CTL			0x4
+#define PHY_DBBUDLN_SOFT_RST_CTL		0x8
+#define PHY_DBBUDLN_STRAP_VALUES		0xc
+#define PHY_DBBUDLN_TXRX_CTL			0x10
+#define PHY_DBBUDLN_MST_SLV_INIT_CTL		0x14
+#define PHY_DBBUDLN_TX_TIMING_CTL0		0x18
+#define PHY_DBBUDLN_TX_TIMING_CTL1		0x1c
+#define PHY_DBBUDLN_TX_TIMING_CTL2		0x20
+#define PHY_DBBUDLN_TX_TIMING_CTL3		0x24
+#define PHY_DBBUDLN_RX_TIMING_CTL		0x28
+#define PHY_DBBUDLN_PPI_STATUS_CTL		0x2c
+#define PHY_DBBUDLN_PPI_STATUS			0x30
+#define PHY_DBBUDLN_ERR_CTL			0x34
+#define PHY_DBBUDLN_ERR_STATUS			0x38
+#define PHY_DBBUDLN_DFX_LPBK_CTL		0x3c
+#define PHY_DBBUDLN_DFX_PPI_CTL			0x40
+#define PHY_DBBUDLN_DFX_TX_DPHY_CTL		0x44
+#define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_CTL	0x48
+#define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_SEED	0x4c
+#define PHY_DBBUDLN_DFX_PRBSPAT_MAX_WRD_CNT	0x50
+#define PHY_DBBUDLN_DFX_PRBSPAT_STATUS		0x54
+#define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT0_STATUS	0x58
+#define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT1_STATUS	0x5c
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_ERR_STATUS	0x60
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_REF_STATUS		0x64
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT0_STATUS	0x68
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT1_STATUS	0x6c
+#define PHY_DBBUDLN_RSVD_CTL				0x70
+#define PHY_DBBUDLN_TINIT_DONE				BIT(27)
+
+/* dbbs supar, i = [0..11] */
+#define IPU6_ISYS_PHY_DBBS_SUPAR(ppi)		((ppi) * 0x200 + 0x300)
+/* register offset */
+#define PHY_DBBSUPAR_TXRX_FUPAR_CTL		0x0
+#define PHY_DBBSUPAR_TXHS_AFE_CTL		0x4
+#define PHY_DBBSUPAR_TXHS_AFE_LEGDIS_CTL	0x8
+#define PHY_DBBSUPAR_TXHS_AFE_EQ_CTL		0xc
+#define PHY_DBBSUPAR_RXHS_AFE_CTL1		0x10
+#define PHY_DBBSUPAR_RXHS_AFE_PICTL1		0x14
+#define PHY_DBBSUPAR_TXRXLP_AFE_CTL		0x18
+#define PHY_DBBSUPAR_DFX_TXRX_STATUS		0x1c
+#define PHY_DBBSUPAR_DFX_TXRX_CTL		0x20
+#define PHY_DBBSUPAR_DFX_DIGMON_CTL		0x24
+#define PHY_DBBSUPAR_DFX_LOCMON_CTL		0x28
+#define PHY_DBBSUPAR_DFX_RCOMP_CTL1		0x2c
+#define PHY_DBBSUPAR_DFX_RCOMP_CTL2		0x30
+#define PHY_DBBSUPAR_CAL_TOP1			0x34
+#define PHY_DBBSUPAR_CAL_SHARED1		0x38
+#define PHY_DBBSUPAR_CAL_SHARED2		0x3c
+#define PHY_DBBSUPAR_CAL_CDR1			0x40
+#define PHY_DBBSUPAR_CAL_OCAL1			0x44
+#define PHY_DBBSUPAR_CAL_DCC_DLL1		0x48
+#define PHY_DBBSUPAR_CAL_DLL2			0x4c
+#define PHY_DBBSUPAR_CAL_DFX1			0x50
+#define PHY_DBBSUPAR_CAL_DFX2			0x54
+#define PHY_DBBSUPAR_CAL_DFX3			0x58
+#define PHY_BBSUPAR_CAL_DFX4			0x5c
+#define PHY_DBBSUPAR_CAL_DFX5			0x60
+#define PHY_DBBSUPAR_CAL_DFX6			0x64
+#define PHY_DBBSUPAR_CAL_DFX7			0x68
+#define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL1		0x6c
+#define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL2		0x70
+#define	PHY_DBBSUPAR_SPARE			0x74
+
+/* sai, i = [0..11] */
+#define	IPU6_ISYS_PHY_SAI			0xf800
+/* register offset */
+#define PHY_SAI_CTRL_REG0                       0x40
+#define PHY_SAI_CTRL_REG0_1                     0x44
+#define PHY_SAI_WR_REG0                         0x48
+#define PHY_SAI_WR_REG0_1                       0x4c
+#define PHY_SAI_RD_REG0                         0x50
+#define PHY_SAI_RD_REG0_1                       0x54
+
+int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id,
+			bool assert);
+int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_common_init(struct ipu_isys *isys);
+int ipu6_isys_phy_ppi_tinit_done(struct ipu_isys *isys,
+				 struct ipu_isys_csi2_config *cfg);
+int ipu6_isys_phy_config(struct ipu_isys *isys);
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
new file mode 100644
index 000000000000..e2891606a63d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/module.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+#include "ipu-isys.h"
+#include "ipu-isys-tpg.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts[] = {
+	{V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+	 IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+	{}
+};
+
+struct ipu_trace_block isys_trace_blocks[] = {
+	{
+		.offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE,
+		.type = IPU_TRACE_BLOCK_TUN,
+	},
+	{
+		.offset = IPU_TRACE_REG_IS_SP_EVQ_BASE,
+		.type = IPU_TRACE_BLOCK_TM,
+	},
+	{
+		.offset = IPU_TRACE_REG_IS_SP_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		.offset = IPU_TRACE_REG_IS_ISL_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		.offset = IPU_TRACE_REG_IS_MMU_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		/* Note! this covers all 8 blocks */
+		.offset = IPU_TRACE_REG_CSI2_TM_BASE(0),
+		.type = IPU_TRACE_CSI2,
+	},
+	{
+		/* Note! this covers all 11 blocks */
+		.offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0),
+		.type = IPU_TRACE_SIG2CIOS,
+	},
+	{
+		.offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N,
+		.type = IPU_TRACE_TIMER_RST,
+	},
+	{
+		.type = IPU_TRACE_BLOCK_END,
+	}
+};
+
+void isys_setup_hw(struct ipu_isys *isys)
+{
+	void __iomem *base = isys->pdata->base;
+	const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold;
+	u32 irqs = 0;
+	unsigned int i, nr;
+
+	nr = (ipu_ver == IPU_VER_6) ? IPU6_ISYS_CSI_PORT_NUM :
+		IPU6SE_ISYS_CSI_PORT_NUM;
+
+	/* Enable irqs for all MIPI ports */
+	for (i = 0; i < nr; i++)
+		irqs |= IPU_ISYS_UNISPART_IRQ_CSI2(i);
+
+	writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE);
+	writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE);
+	writel(0xffffffff, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR);
+	writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK);
+	writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE);
+
+	irqs = ISYS_UNISPART_IRQS;
+	writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE);
+	writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE);
+	writel(0xffffffff, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR);
+	writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+	writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE);
+
+	writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG);
+	writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG);
+
+	/* Write CDC FIFO threshold values for isys */
+	for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++)
+		writel(thd[i], base + IPU_REG_ISYS_CDC_THRESHOLD(i));
+}
+
+irqreturn_t isys_isr(struct ipu_bus_device *adev)
+{
+	struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+	void __iomem *base = isys->pdata->base;
+	u32 status_sw, status_csi;
+
+	spin_lock(&isys->power_lock);
+	if (!isys->power) {
+		spin_unlock(&isys->power_lock);
+		return IRQ_NONE;
+	}
+
+	status_csi = readl(isys->pdata->base +
+			   IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS);
+	status_sw = readl(isys->pdata->base + IPU_REG_ISYS_UNISPART_IRQ_STATUS);
+
+	writel(ISYS_UNISPART_IRQS & ~IPU_ISYS_UNISPART_IRQ_SW,
+	       base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+
+	do {
+		writel(status_csi, isys->pdata->base +
+			   IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR);
+		writel(status_sw, isys->pdata->base +
+			   IPU_REG_ISYS_UNISPART_IRQ_CLEAR);
+
+		if (isys->isr_csi2_bits & status_csi) {
+			unsigned int i;
+
+			for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) {
+				/* irq from not enabled port */
+				if (!isys->csi2[i].base)
+					continue;
+				if (status_csi & IPU_ISYS_UNISPART_IRQ_CSI2(i))
+					ipu_isys_csi2_isr(&isys->csi2[i]);
+			}
+		}
+
+		writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG);
+
+		if (!isys_isr_one(adev))
+			status_sw = IPU_ISYS_UNISPART_IRQ_SW;
+		else
+			status_sw = 0;
+
+		status_csi = readl(isys->pdata->base +
+				       IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS);
+		status_sw |= readl(isys->pdata->base +
+				       IPU_REG_ISYS_UNISPART_IRQ_STATUS);
+	} while (((status_csi & isys->isr_csi2_bits) ||
+		  (status_sw & IPU_ISYS_UNISPART_IRQ_SW)) &&
+		 !isys->adev->isp->flr_done);
+
+	writel(ISYS_UNISPART_IRQS, base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+
+	spin_unlock(&isys->power_lock);
+
+	return IRQ_HANDLED;
+}
+
+void ipu_isys_tpg_sof_event(struct ipu_isys_tpg *tpg)
+{
+	struct ipu_isys_pipeline *ip = NULL;
+	struct v4l2_event ev = {
+		.type = V4L2_EVENT_FRAME_SYNC,
+	};
+	struct video_device *vdev = tpg->asd.sd.devnode;
+	unsigned long flags;
+	unsigned int i, nr;
+
+	nr = (ipu_ver == IPU_VER_6) ? IPU6_ISYS_CSI_PORT_NUM :
+		IPU6SE_ISYS_CSI_PORT_NUM;
+
+	spin_lock_irqsave(&tpg->isys->lock, flags);
+	for (i = 0; i < nr; i++) {
+		if (tpg->isys->pipes[i] && tpg->isys->pipes[i]->tpg == tpg) {
+			ip = tpg->isys->pipes[i];
+			break;
+		}
+	}
+
+	/* Pipe already vanished */
+	if (!ip) {
+		spin_unlock_irqrestore(&tpg->isys->lock, flags);
+		return;
+	}
+
+	ev.u.frame_sync.frame_sequence =
+		atomic_inc_return(&ip->sequence) - 1;
+	spin_unlock_irqrestore(&tpg->isys->lock, flags);
+
+	v4l2_event_queue(vdev, &ev);
+
+	dev_dbg(&tpg->isys->adev->dev,
+		"sof_event::tpg-%i sequence: %i\n",
+		tpg->index, ev.u.frame_sync.frame_sequence);
+}
+
+void ipu_isys_tpg_eof_event(struct ipu_isys_tpg *tpg)
+{
+	struct ipu_isys_pipeline *ip = NULL;
+	unsigned long flags;
+	unsigned int i, nr;
+	u32 frame_sequence;
+
+	nr = (ipu_ver == IPU_VER_6) ? IPU6_ISYS_CSI_PORT_NUM :
+		IPU6SE_ISYS_CSI_PORT_NUM;
+
+	spin_lock_irqsave(&tpg->isys->lock, flags);
+	for (i = 0; i < nr; i++) {
+		if (tpg->isys->pipes[i] && tpg->isys->pipes[i]->tpg == tpg) {
+			ip = tpg->isys->pipes[i];
+			break;
+		}
+	}
+
+	/* Pipe already vanished */
+	if (!ip) {
+		spin_unlock_irqrestore(&tpg->isys->lock, flags);
+		return;
+	}
+
+	frame_sequence = atomic_read(&ip->sequence);
+
+	spin_unlock_irqrestore(&tpg->isys->lock, flags);
+
+	dev_dbg(&tpg->isys->adev->dev,
+		"eof_event::tpg-%i sequence: %i\n",
+		tpg->index, frame_sequence);
+}
+
+int tpg_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd);
+	__u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE].code;
+	unsigned int bpp = ipu_isys_mbus_code_to_bpp(code);
+	struct ipu_isys_pipeline *ip =
+			to_ipu_isys_pipeline(sd->entity.pipe);
+
+	/*
+	 * MIPI_GEN block is CSI2 FB. Need to enable/disable TPG selection
+	 * register to control the TPG streaming.
+	 */
+	if (tpg->sel)
+		writel(enable ? 1 : 0, tpg->sel);
+
+	if (!enable) {
+		ip->tpg = NULL;
+		writel(0, tpg->base +
+		       CSI_REG_CSI_FE_ENABLE -
+		       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+		writel(CSI_SENSOR_INPUT, tpg->base +
+		       CSI_REG_CSI_FE_MUX_CTRL -
+		       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+		writel(CSI_CNTR_SENSOR_LINE_ID |
+		       CSI_CNTR_SENSOR_FRAME_ID,
+		       tpg->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL -
+		       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+		writel(0, tpg->base + MIPI_GEN_REG_COM_ENABLE);
+		return 0;
+	}
+
+	ip->has_sof = true;
+	ip->tpg = tpg;
+	/* Select MIPI GEN as input */
+	writel(0, tpg->base + CSI_REG_CSI_FE_MODE -
+	       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+	writel(1, tpg->base + CSI_REG_CSI_FE_ENABLE -
+	       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+	writel(CSI_MIPIGEN_INPUT, tpg->base +
+	       CSI_REG_CSI_FE_MUX_CTRL - CSI_REG_PIXGEN_COM_BASE_OFFSET);
+	writel(0, tpg->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL -
+	       CSI_REG_PIXGEN_COM_BASE_OFFSET);
+
+	writel(MIPI_GEN_COM_DTYPE_RAW(bpp),
+	       tpg->base + MIPI_GEN_REG_COM_DTYPE);
+	writel(ipu_isys_mbus_code_to_mipi(code),
+	       tpg->base + MIPI_GEN_REG_COM_VTYPE);
+	writel(0, tpg->base + MIPI_GEN_REG_COM_VCHAN);
+
+	writel(0, tpg->base + MIPI_GEN_REG_SYNG_NOF_FRAMES);
+
+	writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width *
+			    bpp, BITS_PER_BYTE),
+	       tpg->base + MIPI_GEN_REG_COM_WCOUNT);
+	writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width,
+			    MIPI_GEN_PPC),
+	       tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS);
+	writel(tpg->asd.ffmt[TPG_PAD_SOURCE].height,
+	       tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES);
+
+	writel(0, tpg->base + MIPI_GEN_REG_TPG_MODE);
+	writel(-1, tpg->base + MIPI_GEN_REG_TPG_HCNT_MASK);
+	writel(-1, tpg->base + MIPI_GEN_REG_TPG_VCNT_MASK);
+	writel(-1, tpg->base + MIPI_GEN_REG_TPG_XYCNT_MASK);
+	writel(0, tpg->base + MIPI_GEN_REG_TPG_HCNT_DELTA);
+	writel(0, tpg->base + MIPI_GEN_REG_TPG_VCNT_DELTA);
+
+	v4l2_ctrl_handler_setup(&tpg->asd.ctrl_handler);
+
+	writel(2, tpg->base + MIPI_GEN_REG_COM_ENABLE);
+	return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-l-scheduler.c b/drivers/media/pci/intel/ipu6/ipu6-l-scheduler.c
new file mode 100644
index 000000000000..2c9cdc1c376b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-l-scheduler.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include "ipu-psys.h"
+#include "ipu6-ppg.h"
+
+extern bool enable_power_gating;
+
+struct sched_list {
+	struct list_head list;
+	/* to protect the list */
+	struct mutex lock;
+};
+
+static struct sched_list start_list = {
+	.list	= LIST_HEAD_INIT(start_list.list),
+	.lock	= __MUTEX_INITIALIZER(start_list.lock),
+};
+
+static struct sched_list stop_list = {
+	.list	= LIST_HEAD_INIT(stop_list.list),
+	.lock	= __MUTEX_INITIALIZER(stop_list.lock),
+};
+
+static struct sched_list *get_sc_list(enum SCHED_LIST type)
+{
+	/* for debug purposes */
+	WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST);
+
+	if (type == SCHED_START_LIST)
+		return &start_list;
+	return &stop_list;
+}
+
+static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head)
+{
+	struct ipu_psys_ppg *tmp;
+
+	list_for_each_entry(tmp, head, sched_list) {
+		if (kppg == tmp)
+			return true;
+	}
+
+	return false;
+}
+
+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg,
+				    enum SCHED_LIST type)
+{
+	struct sched_list *sc_list = get_sc_list(type);
+	struct ipu_psys_ppg *tmp0, *tmp1;
+	struct ipu_psys *psys = kppg->fh->psys;
+
+	mutex_lock(&sc_list->lock);
+	list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) {
+		if (tmp0 == kppg) {
+			dev_dbg(&psys->adev->dev,
+				 "remove from %s list, kppg(%d 0x%p) state %d\n",
+				 type == SCHED_START_LIST ? "start" : "stop",
+				 kppg->kpg->pg->ID, kppg, kppg->state);
+			list_del_init(&kppg->sched_list);
+		}
+	}
+	mutex_unlock(&sc_list->lock);
+}
+
+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg,
+				 enum SCHED_LIST type)
+{
+	int cur_pri = kppg->pri_base + kppg->pri_dynamic;
+	struct sched_list *sc_list = get_sc_list(type);
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_ppg *tmp0, *tmp1;
+
+	dev_dbg(&psys->adev->dev,
+		"add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n",
+		type == SCHED_START_LIST ? "start" : "stop",
+		kppg->kpg->pg->ID, kppg, kppg->state,
+		kppg->pri_base, kppg->pri_dynamic, kppg->fh);
+
+	mutex_lock(&sc_list->lock);
+	if (list_empty(&sc_list->list)) {
+		list_add(&kppg->sched_list, &sc_list->list);
+		goto out;
+	}
+
+	if (is_kppg_in_list(kppg, &sc_list->list)) {
+		dev_dbg(&psys->adev->dev, "kppg already in list\n");
+		goto out;
+	}
+
+	list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) {
+		int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic;
+
+		dev_dbg(&psys->adev->dev,
+			"found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n",
+			tmp0->kpg->pg->ID, tmp0, tmp0->state,
+			tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh);
+
+		if (type == SCHED_START_LIST && tmp_pri > cur_pri) {
+			list_add(&kppg->sched_list, tmp0->sched_list.prev);
+			goto out;
+		} else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) {
+			list_add(&kppg->sched_list, tmp0->sched_list.prev);
+			goto out;
+		}
+	}
+
+	list_add_tail(&kppg->sched_list, &sc_list->list);
+out:
+	mutex_unlock(&sc_list->lock);
+}
+
+static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys_resource_pool try_res_pool;
+	struct ipu_psys *psys = kppg->fh->psys;
+	int ret;
+	int state;
+
+	mutex_lock(&kppg->mutex);
+	state = kppg->state;
+	mutex_unlock(&kppg->mutex);
+	if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING ||
+	    state == PPG_STATE_RESUMED)
+		return 0;
+
+	ret = ipu_psys_resource_pool_init(&try_res_pool);
+	if (ret < 0) {
+		dev_err(&psys->adev->dev, "unable to alloc pg resources\n");
+		WARN_ON(1);
+		return ret;
+	}
+
+	ipu_psys_resource_copy(&psys->resource_pool_running, &try_res_pool);
+	ret = ipu_psys_try_allocate_resources(&psys->adev->dev,
+					      kppg->kpg->pg,
+					      kppg->manifest,
+					      &try_res_pool);
+
+	ipu_psys_resource_pool_cleanup(&try_res_pool);
+
+	return ret;
+}
+
+static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping)
+{
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_fh *fh;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (kppg->state == PPG_STATE_START ||
+			    kppg->state == PPG_STATE_RESUME) {
+				ipu_psys_scheduler_add_kppg(kppg,
+							    SCHED_START_LIST);
+			} else if (kppg->state == PPG_STATE_RUNNING) {
+				ipu_psys_scheduler_add_kppg(kppg,
+							    SCHED_STOP_LIST);
+			} else if (kppg->state == PPG_STATE_SUSPENDING ||
+				   kppg->state == PPG_STATE_STOPPING) {
+				/* there are some suspending/stopping ppgs */
+				*stopping = true;
+			} else if (kppg->state == PPG_STATE_RESUMING ||
+				   kppg->state == PPG_STATE_STARTING) {
+				   /* how about kppg are resuming/starting? */
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+}
+
+static void ipu_psys_scheduler_update_start_ppg_priority(void)
+{
+	struct sched_list *sc_list = get_sc_list(SCHED_START_LIST);
+	struct ipu_psys_ppg *kppg;
+
+	mutex_lock(&sc_list->lock);
+	if (!list_empty(&sc_list->list))
+		list_for_each_entry(kppg, &sc_list->list, sched_list)
+			kppg->pri_dynamic--;
+	mutex_unlock(&sc_list->lock);
+}
+
+static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys)
+{
+	struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST);
+	struct ipu_psys_ppg *kppg;
+	bool resched = false;
+
+	mutex_lock(&sc_list->lock);
+	if (list_empty(&sc_list->list)) {
+		/* some ppgs are RESUMING/STARTING */
+		dev_dbg(&psys->adev->dev, "no candidated stop ppg\n");
+		mutex_unlock(&sc_list->lock);
+		return false;
+	}
+	kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg,
+				sched_list);
+	mutex_unlock(&sc_list->lock);
+
+	mutex_lock(&kppg->mutex);
+	if (!(kppg->state & PPG_STATE_STOP)) {
+		dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+			__func__, kppg, kppg->state, PPG_STATE_SUSPEND);
+		kppg->state = PPG_STATE_SUSPEND;
+		resched = true;
+	}
+	mutex_unlock(&kppg->mutex);
+
+	return resched;
+}
+
+/*
+ * search all kppgs and sort them into start_list and stop_list, alway start
+ * first kppg(high priority) in start_list;
+ * if there is resource contention, it would switch kppgs in stop_list
+ * to suspend state one by one
+ */
+static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys)
+{
+	struct sched_list *sc_list = get_sc_list(SCHED_START_LIST);
+	struct ipu_psys_ppg *kppg, *kppg0;
+	bool stopping_existed = false;
+	int ret;
+
+	ipu_psys_scheduler_ppg_sort(psys, &stopping_existed);
+
+	mutex_lock(&sc_list->lock);
+	if (list_empty(&sc_list->list)) {
+		dev_dbg(&psys->adev->dev, "no ppg to start\n");
+		mutex_unlock(&sc_list->lock);
+		return false;
+	}
+
+	list_for_each_entry_safe(kppg, kppg0,
+				 &sc_list->list, sched_list) {
+		mutex_unlock(&sc_list->lock);
+
+		ret = ipu_psys_detect_resource_contention(kppg);
+		if (ret < 0) {
+			dev_dbg(&psys->adev->dev,
+				"ppg %d resource detect failed(%d)\n",
+				kppg->kpg->pg->ID, ret);
+			/*
+			 * switch out other ppg in 2 cases:
+			 * 1. resource contention
+			 * 2. no suspending/stopping ppg
+			 */
+			if (ret == -ENOSPC) {
+				if (!stopping_existed &&
+				    ipu_psys_scheduler_switch_ppg(psys)) {
+					return true;
+				}
+				dev_dbg(&psys->adev->dev,
+					"ppg is suspending/stopping\n");
+			} else {
+				dev_err(&psys->adev->dev,
+					"detect resource error %d\n", ret);
+			}
+		} else {
+			kppg->pri_dynamic = 0;
+
+			mutex_lock(&kppg->mutex);
+			if (kppg->state == PPG_STATE_START)
+				ipu_psys_ppg_start(kppg);
+			else
+				ipu_psys_ppg_resume(kppg);
+			mutex_unlock(&kppg->mutex);
+
+			ipu_psys_scheduler_remove_kppg(kppg,
+						       SCHED_START_LIST);
+			ipu_psys_scheduler_update_start_ppg_priority();
+		}
+		mutex_lock(&sc_list->lock);
+	}
+	mutex_unlock(&sc_list->lock);
+
+	return false;
+}
+
+static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+	bool resched = false;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			if (ipu_psys_ppg_enqueue_bufsets(kppg))
+				resched = true;
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	return resched;
+}
+
+/*
+ * This function will check all kppgs within fhs, and if kppg state
+ * is STOP or SUSPEND, l-scheduler will call ppg function to stop
+ * or suspend it and update stop list
+ */
+
+static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+	bool stopping_exit = false;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (kppg->state & PPG_STATE_STOP) {
+				ipu_psys_ppg_stop(kppg);
+				ipu_psys_scheduler_remove_kppg(kppg,
+							       SCHED_STOP_LIST);
+			} else if (kppg->state == PPG_STATE_SUSPEND) {
+				ipu_psys_ppg_suspend(kppg);
+				ipu_psys_scheduler_remove_kppg(kppg,
+							       SCHED_STOP_LIST);
+			} else if (kppg->state == PPG_STATE_SUSPENDING ||
+				   kppg->state == PPG_STATE_STOPPING) {
+				stopping_exit = true;
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+	return stopping_exit;
+}
+
+static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys,
+					      struct ipu_psys_ppg *kppg,
+					      struct ipu_psys_kcmd *kcmd)
+{
+	int old_ppg_state = kppg->state;
+
+	/*
+	 * Respond kcmd when ppg is in stable state:
+	 * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED
+	 */
+	if (kppg->state == PPG_STATE_STARTED ||
+	    kppg->state == PPG_STATE_RESUMED ||
+	    kppg->state == PPG_STATE_RUNNING) {
+		if (kcmd->state == KCMD_STATE_PPG_START) {
+			dev_dbg(&psys->adev->dev, "ppg %p started!\n", kppg);
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, 0);
+		} else if (kcmd->state == KCMD_STATE_PPG_STOP) {
+			kppg->state = PPG_STATE_STOP;
+		}
+	} else if (kppg->state == PPG_STATE_SUSPENDED) {
+		if (kcmd->state == KCMD_STATE_PPG_START) {
+			dev_dbg(&psys->adev->dev, "ppg %p started!\n", kppg);
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, 0);
+		} else if (kcmd->state == KCMD_STATE_PPG_STOP) {
+			/*
+			 * Record the previous state
+			 * because here need resume at first
+			 */
+			kppg->state |= PPG_STATE_STOP;
+		} else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) {
+			kppg->state = PPG_STATE_RESUME;
+		}
+	} else if (kppg->state == PPG_STATE_STOPPED) {
+		if (kcmd->state == KCMD_STATE_PPG_START) {
+			kppg->state = PPG_STATE_START;
+		} else if (kcmd->state == KCMD_STATE_PPG_STOP) {
+			dev_dbg(&psys->adev->dev, "ppg %p stopped!\n", kppg);
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, 0);
+		} else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) {
+			dev_err(&psys->adev->dev, "ppg %p stopped!\n", kppg);
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, -EIO);
+		}
+	}
+
+	if (old_ppg_state != kppg->state)
+		dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+			__func__, kppg, old_ppg_state, kppg->state);
+}
+
+static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys)
+{
+	struct ipu_psys_kcmd *kcmd;
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (list_empty(&kppg->kcmds_new_list)) {
+				mutex_unlock(&kppg->mutex);
+				continue;
+			};
+
+			kcmd = list_first_entry(&kppg->kcmds_new_list,
+						struct ipu_psys_kcmd, list);
+			ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd);
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+}
+
+static bool is_ready_to_enter_power_gating(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (!list_empty(&kppg->kcmds_new_list) ||
+			    !list_empty(&kppg->kcmds_processing_list)) {
+				mutex_unlock(&kppg->mutex);
+				mutex_unlock(&fh->mutex);
+				return false;
+			}
+			if (!(kppg->state == PPG_STATE_RUNNING ||
+			      kppg->state == PPG_STATE_STOPPED ||
+			      kppg->state == PPG_STATE_SUSPENDED)) {
+				mutex_unlock(&kppg->mutex);
+				mutex_unlock(&fh->mutex);
+				return false;
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	return true;
+}
+
+static bool has_pending_kcmd(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (!list_empty(&kppg->kcmds_new_list) ||
+			    !list_empty(&kppg->kcmds_processing_list)) {
+				mutex_unlock(&kppg->mutex);
+				mutex_unlock(&fh->mutex);
+				return true;
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	return false;
+}
+
+static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys)
+{
+	/* Assume power gating process can be aborted directly during START */
+	if (psys->power_gating == PSYS_POWER_GATED) {
+		dev_dbg(&psys->adev->dev, "powergating: exit ---\n");
+		ipu_psys_exit_power_gating(psys);
+	}
+	psys->power_gating = PSYS_POWER_NORMAL;
+	return false;
+}
+
+static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+
+	if (!enable_power_gating)
+		return false;
+
+	if (psys->power_gating == PSYS_POWER_NORMAL &&
+	    is_ready_to_enter_power_gating(psys)) {
+		/* Enter power gating */
+		dev_dbg(&psys->adev->dev, "powergating: enter +++\n");
+		psys->power_gating = PSYS_POWER_GATING;
+	}
+
+	if (psys->power_gating != PSYS_POWER_GATING)
+		return false;
+
+	/* Suspend ppgs one by one */
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			if (kppg->state == PPG_STATE_RUNNING) {
+				kppg->state = PPG_STATE_SUSPEND;
+				mutex_unlock(&kppg->mutex);
+				mutex_unlock(&fh->mutex);
+				return true;
+			}
+
+			if (kppg->state != PPG_STATE_SUSPENDED &&
+			    kppg->state != PPG_STATE_STOPPED) {
+				/* Can't enter power gating */
+				mutex_unlock(&kppg->mutex);
+				mutex_unlock(&fh->mutex);
+				/* Need re-run l-scheduler to suspend ppg? */
+				return (kppg->state & PPG_STATE_STOP ||
+					kppg->state == PPG_STATE_SUSPEND);
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	psys->power_gating = PSYS_POWER_GATED;
+	ipu_psys_enter_power_gating(psys);
+
+	return false;
+}
+
+void ipu_psys_run_next(struct ipu_psys *psys)
+{
+	/* Wake up scheduler due to unfinished work */
+	bool need_trigger = false;
+	/* Wait FW callback if there are stopping/suspending/running ppg */
+	bool wait_fw_finish = false;
+	/*
+	 * Code below will crash if fhs is empty. Normally this
+	 * shouldn't happen.
+	 */
+	if (list_empty(&psys->fhs)) {
+		WARN_ON(1);
+		return;
+	}
+
+	/* Abort power gating process */
+	if (psys->power_gating != PSYS_POWER_NORMAL &&
+	    has_pending_kcmd(psys))
+		need_trigger = ipu_psys_scheduler_exit_power_gating(psys);
+
+	/* Handle kcmd and related ppg switch */
+	if (psys->power_gating == PSYS_POWER_NORMAL) {
+		ipu_psys_scheduler_kcmd_set(psys);
+		wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys);
+		need_trigger |= ipu_psys_scheduler_ppg_start(psys);
+		need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys);
+	}
+	if (!(need_trigger || wait_fw_finish)) {
+		/* Nothing to do, enter power gating */
+		need_trigger = ipu_psys_scheduler_enter_power_gating(psys);
+		if (psys->power_gating == PSYS_POWER_GATING)
+			wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys);
+	}
+
+	if (need_trigger && !wait_fw_finish) {
+		dev_dbg(&psys->adev->dev, "scheduler: wake up\n");
+		atomic_set(&psys->wakeup_count, 1);
+		wake_up_interruptible(&psys->sched_cmd_wq);
+	}
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-resources.h b/drivers/media/pci/intel/ipu6/ipu6-platform-resources.h
new file mode 100644
index 000000000000..b8725e4551cf
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-resources.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_RESOURCES_H
+#define IPU6_PLATFORM_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include "ipu-platform-resources.h"
+
+#define	IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT		0
+
+enum {
+	IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0,
+	IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID,
+	IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID,
+	IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID
+};
+
+enum {
+	IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0,
+	IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID,
+	IPU6_FW_PSYS_LB_VMEM_TYPE_ID,
+	IPU6_FW_PSYS_DMEM_TYPE_ID,
+	IPU6_FW_PSYS_VMEM_TYPE_ID,
+	IPU6_FW_PSYS_BAMEM_TYPE_ID,
+	IPU6_FW_PSYS_PMEM_TYPE_ID,
+	IPU6_FW_PSYS_N_MEM_TYPE_ID
+};
+
+enum ipu6_mem_id {
+	IPU6_FW_PSYS_VMEM0_ID = 0,	/* ISP0 VMEM */
+	IPU6_FW_PSYS_TRANSFER_VMEM0_ID,	/* TRANSFER VMEM 0 */
+	IPU6_FW_PSYS_TRANSFER_VMEM1_ID,	/* TRANSFER VMEM 1 */
+	IPU6_FW_PSYS_LB_VMEM_ID,	/* LB VMEM */
+	IPU6_FW_PSYS_BAMEM0_ID,	/* ISP0 BAMEM */
+	IPU6_FW_PSYS_DMEM0_ID,	/* SPC0 Dmem */
+	IPU6_FW_PSYS_DMEM1_ID,	/* SPP0 Dmem */
+	IPU6_FW_PSYS_DMEM2_ID,	/* SPP1 Dmem */
+	IPU6_FW_PSYS_DMEM3_ID,	/* ISP0 Dmem */
+	IPU6_FW_PSYS_PMEM0_ID,	/* ISP0 PMEM */
+	IPU6_FW_PSYS_N_MEM_ID
+};
+
+enum {
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0,
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID,
+	IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID,
+	IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID,
+	IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID,
+	IPU6_FW_PSYS_N_DEV_CHN_ID
+};
+
+enum {
+	IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0,
+	IPU6_FW_PSYS_SP_SERVER_TYPE_ID,
+	IPU6_FW_PSYS_VP_TYPE_ID,
+	IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+	IPU6_FW_PSYS_GDC_TYPE_ID,
+	IPU6_FW_PSYS_TNR_TYPE_ID,
+	IPU6_FW_PSYS_N_CELL_TYPE_ID
+};
+
+enum {
+	IPU6_FW_PSYS_SP0_ID = 0,
+	IPU6_FW_PSYS_VP0_ID,
+	IPU6_FW_PSYS_PSA_ACC_BNLM_ID,
+	IPU6_FW_PSYS_PSA_ACC_DM_ID,
+	IPU6_FW_PSYS_PSA_ACC_ACM_ID,
+	IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID,
+	IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID,
+	IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID,
+	IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID,
+	IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID,
+	IPU6_FW_PSYS_PSA_ACC_GLTM_ID,
+	IPU6_FW_PSYS_PSA_ACC_XNR_ID,
+	IPU6_FW_PSYS_PSA_VCSC_ID,	/* VCSC */
+	IPU6_FW_PSYS_ISA_ICA_ID,
+	IPU6_FW_PSYS_ISA_LSC_ID,
+	IPU6_FW_PSYS_ISA_DPC_ID,
+	IPU6_FW_PSYS_ISA_SIS_A_ID,
+	IPU6_FW_PSYS_ISA_SIS_B_ID,
+	IPU6_FW_PSYS_ISA_B2B_ID,
+	IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID,
+	IPU6_FW_PSYS_ISA_R2I_DS_A_ID,
+	IPU6_FW_PSYS_ISA_R2I_DS_B_ID,
+	IPU6_FW_PSYS_ISA_AWB_ID,
+	IPU6_FW_PSYS_ISA_AE_ID,
+	IPU6_FW_PSYS_ISA_AF_ID,
+	IPU6_FW_PSYS_ISA_DOL_ID,
+	IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID,
+	IPU6_FW_PSYS_ISA_X2B_MD_ID,
+	IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID,
+	IPU6_FW_PSYS_ISA_PAF_ID,
+	IPU6_FW_PSYS_BB_ACC_GDC0_ID,
+	IPU6_FW_PSYS_BB_ACC_TNR_ID,
+	IPU6_FW_PSYS_N_CELL_ID
+};
+
+enum {
+	IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0,
+	IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID,
+	IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID,
+	IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID,
+	IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID,
+	IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID,
+};
+
+/* Excluding PMEM */
+#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID	(IPU6_FW_PSYS_N_MEM_TYPE_ID - 1)
+#define IPU6_FW_PSYS_N_DEV_DFM_ID	\
+	(IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1)
+
+#define IPU6_FW_PSYS_VMEM0_MAX_SIZE		0x0800
+/* Transfer VMEM0 words, ref HAS Transfer*/
+#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE	0x0800
+/* Transfer VMEM1 words, ref HAS Transfer*/
+#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE	0x0800
+#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE		0x0400	/* LB VMEM words */
+#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE		0x0800
+#define IPU6_FW_PSYS_DMEM0_MAX_SIZE		0x4000
+#define IPU6_FW_PSYS_DMEM1_MAX_SIZE		0x1000
+#define IPU6_FW_PSYS_DMEM2_MAX_SIZE		0x1000
+#define IPU6_FW_PSYS_DMEM3_MAX_SIZE		0x1000
+#define IPU6_FW_PSYS_PMEM0_MAX_SIZE		0x0500
+
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE		30
+#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE		0
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE	30
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE	37
+#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE	8
+#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE		0
+#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE		2
+
+#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE		32
+#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE		32
+#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE		32
+#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE		32
+#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE		32
+#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE		32
+
+struct ipu6_fw_psys_program_manifest_ext {
+	u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+	u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+	u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID];
+	u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID];
+	u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID];
+	u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+	u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+	u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+	u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT];
+};
+
+struct ipu6_fw_psys_process_ext {
+	u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+	u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+	u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID];
+	u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u8 padding[IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT];
+};
+
+#endif /* IPU6_PLATFORM_RESOURCES_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-ppg.c b/drivers/media/pci/intel/ipu6/ipu6-ppg.c
new file mode 100644
index 000000000000..4b8db1826eed
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-ppg.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/cacheflush.h>
+
+#include "ipu6-ppg.h"
+
+static bool enable_suspend_resume;
+module_param(enable_suspend_resume, bool, 0664);
+MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api");
+
+static struct ipu_psys_kcmd *
+ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state)
+{
+	struct ipu_psys_kcmd *kcmd;
+
+	if (list_empty(&kppg->kcmds_new_list))
+		return NULL;
+
+	list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) {
+		if (kcmd->state == state)
+			return kcmd;
+	}
+
+	return NULL;
+}
+
+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys_kcmd *kcmd;
+
+	if (list_empty(&kppg->kcmds_processing_list))
+		return NULL;
+
+	list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) {
+		if (kcmd->state == KCMD_STATE_PPG_STOP)
+			return kcmd;
+	}
+
+	return NULL;
+}
+
+static struct ipu_psys_buffer_set *
+__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size)
+{
+	struct ipu_psys_buffer_set *kbuf_set;
+	struct ipu_psys_scheduler *sched = &fh->sched;
+
+	mutex_lock(&sched->bs_mutex);
+	list_for_each_entry(kbuf_set, &sched->buf_sets, list) {
+		if (!kbuf_set->buf_set_size &&
+		    kbuf_set->size >= buf_set_size) {
+			kbuf_set->buf_set_size = buf_set_size;
+			mutex_unlock(&sched->bs_mutex);
+			return kbuf_set;
+		}
+	}
+
+	mutex_unlock(&sched->bs_mutex);
+	/* no suitable buffer available, allocate new one */
+	kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL);
+	if (!kbuf_set)
+		return NULL;
+
+	kbuf_set->kaddr = dma_alloc_attrs(&fh->psys->adev->dev,
+					  buf_set_size, &kbuf_set->dma_addr,
+					  GFP_KERNEL, 0);
+	if (!kbuf_set->kaddr) {
+		kfree(kbuf_set);
+		return NULL;
+	}
+
+	kbuf_set->buf_set_size = buf_set_size;
+	kbuf_set->size = buf_set_size;
+	mutex_lock(&sched->bs_mutex);
+	list_add(&kbuf_set->list, &sched->buf_sets);
+	mutex_unlock(&sched->bs_mutex);
+
+	return kbuf_set;
+}
+
+static struct ipu_psys_buffer_set *
+ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+			   struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys_fh *fh = kcmd->fh;
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_buffer_set *kbuf_set;
+	size_t buf_set_size;
+	u32 *keb;
+
+	buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd);
+
+	kbuf_set = __get_buf_set(fh, buf_set_size);
+	if (!kbuf_set)
+		goto error;
+
+	kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd,
+							      kbuf_set->kaddr,
+	0);
+
+	ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set,
+					    kbuf_set->dma_addr);
+	keb = kcmd->kernel_enable_bitmap;
+	ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(kbuf_set->buf_set,
+							    keb);
+
+	return kbuf_set;
+error:
+	dev_err(&psys->adev->dev, "failed to create buffer set\n");
+	return NULL;
+}
+
+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd,
+			    struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_buffer_set *kbuf_set;
+	size_t buf_set_size;
+	unsigned int i;
+	int ret;
+
+	buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd);
+
+	kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg);
+	if (!kbuf_set) {
+		ret = -EINVAL;
+		goto error;
+	}
+	kcmd->kbuf_set = kbuf_set;
+	kbuf_set->kcmd = kcmd;
+
+	for (i = 0; i < kcmd->nbuffers; i++) {
+		struct ipu_fw_psys_terminal *terminal;
+		u32 buffer;
+
+		terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+		if (!terminal)
+			continue;
+
+		buffer = (u32)kcmd->kbufs[i]->dma_addr +
+				    kcmd->buffers[i].data_offset;
+
+		ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer);
+		if (ret) {
+			dev_err(&psys->adev->dev, "Unable to set bufset\n");
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	dev_err(&psys->adev->dev, "failed to get buffer set\n");
+	return ret;
+}
+
+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg)
+{
+	u8 queue_id;
+	int old_ppg_state;
+
+	if (!psys || !kppg)
+		return;
+
+	mutex_lock(&kppg->mutex);
+	old_ppg_state = kppg->state;
+	if (kppg->state == PPG_STATE_STOPPING) {
+		unsigned long flags;
+		struct ipu_psys_kcmd tmp_kcmd = {
+			.kpg = kppg->kpg,
+		};
+
+		kppg->state = PPG_STATE_STOPPED;
+		ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+					&psys->resource_pool_running);
+		queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd);
+		ipu_psys_free_cmd_queue_resource(&psys->resource_pool_running,
+						 queue_id);
+		spin_lock_irqsave(&psys->pgs_lock, flags);
+		kppg->kpg->pg_size = 0;
+		spin_unlock_irqrestore(&psys->pgs_lock, flags);
+		pm_runtime_put(&psys->adev->dev);
+	} else {
+		if (kppg->state == PPG_STATE_SUSPENDING) {
+			kppg->state = PPG_STATE_SUSPENDED;
+			ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+						&psys->resource_pool_running);
+		} else if (kppg->state == PPG_STATE_STARTED ||
+			   kppg->state == PPG_STATE_RESUMED) {
+			kppg->state = PPG_STATE_RUNNING;
+		}
+
+		/* Kick l-scheduler thread for FW callback,
+		 * also for checking if need to enter power gating
+		 */
+		atomic_set(&psys->wakeup_count, 1);
+		wake_up_interruptible(&psys->sched_cmd_wq);
+	}
+	if (old_ppg_state != kppg->state)
+		dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+			__func__, kppg, old_ppg_state, kppg->state);
+
+	mutex_unlock(&kppg->mutex);
+}
+
+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg,
+						KCMD_STATE_PPG_START);
+	unsigned int i;
+	int ret;
+
+	if (!kcmd) {
+		dev_err(&psys->adev->dev, "failed to find start kcmd!\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(&psys->adev->dev, "start ppg id %d, addr 0x%p\n",
+		ipu_fw_psys_pg_get_id(kcmd), kppg);
+
+	kppg->state = PPG_STATE_STARTING;
+	for (i = 0; i < kcmd->nbuffers; i++) {
+		struct ipu_fw_psys_terminal *terminal;
+
+		terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+		if (!terminal)
+			continue;
+
+		ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0,
+					       kcmd->buffers[i].len);
+		if (ret) {
+			dev_err(&psys->adev->dev, "Unable to set terminal\n");
+			goto error;
+		}
+	}
+
+	ret = ipu_fw_psys_pg_submit(kcmd);
+	if (ret) {
+		dev_err(&psys->adev->dev, "failed to submit kcmd!\n");
+		goto error;
+	}
+
+	ret = ipu_psys_allocate_resources(&psys->adev->dev,
+					  kcmd->kpg->pg,
+					  kcmd->pg_manifest,
+					  &kcmd->kpg->resource_alloc,
+					  &psys->resource_pool_running);
+	if (ret) {
+		dev_err(&psys->adev->dev, "alloc resources failed!\n");
+		return ret;
+	}
+
+	ret = pm_runtime_get_sync(&psys->adev->dev);
+	if (ret < 0) {
+		dev_err(&psys->adev->dev, "failed to power on psys\n");
+		pm_runtime_put_noidle(&psys->adev->dev);
+		return ret;
+	}
+
+	ret = ipu_psys_kcmd_start(psys, kcmd);
+	if (ret) {
+		list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+		ipu_psys_kcmd_complete(psys, kcmd, -EIO);
+		return ret;
+	}
+
+	dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+		__func__, kppg, kppg->state, PPG_STATE_STARTED);
+	kppg->state = PPG_STATE_STARTED;
+	list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+	ipu_psys_kcmd_complete(psys, kcmd, 0);
+
+	return 0;
+
+error:
+	dev_err(&psys->adev->dev, "failed to start ppg\n");
+	return ret;
+}
+
+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_kcmd tmp_kcmd = {
+		.kpg = kppg->kpg,
+		.fh = kppg->fh,
+	};
+	int ret;
+
+	dev_dbg(&psys->adev->dev, "resume ppg id %d, addr 0x%p\n",
+		ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg);
+
+	kppg->state = PPG_STATE_RESUMING;
+	if (enable_suspend_resume) {
+		ret = ipu_psys_allocate_resources(&psys->adev->dev,
+						  kppg->kpg->pg,
+						  kppg->manifest,
+						  &kppg->kpg->resource_alloc,
+						  &psys->resource_pool_running);
+		if (ret) {
+			dev_err(&psys->adev->dev, "failed to allocate res\n");
+			return -EIO;
+		}
+
+		ret = ipu_fw_psys_ppg_resume(&tmp_kcmd);
+		if (ret) {
+			dev_err(&psys->adev->dev, "failed to resume ppg\n");
+			return -EIO;
+		}
+	} else {
+		kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY;
+		ret = ipu_fw_psys_pg_submit(&tmp_kcmd);
+		if (ret) {
+			dev_err(&psys->adev->dev, "failed to submit kcmd!\n");
+			return ret;
+		}
+
+		ret = ipu_psys_allocate_resources(&psys->adev->dev,
+						  kppg->kpg->pg,
+						  kppg->manifest,
+						  &kppg->kpg->resource_alloc,
+						  &psys->resource_pool_running);
+		if (ret) {
+			dev_err(&psys->adev->dev, "failed to allocate res\n");
+			return ret;
+		}
+
+		ret = ipu_psys_kcmd_start(psys, &tmp_kcmd);
+		if (ret) {
+			dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+			return ret;
+		}
+	}
+	dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+		__func__, kppg, kppg->state, PPG_STATE_RESUMED);
+	kppg->state = PPG_STATE_RESUMED;
+
+	return ret;
+}
+
+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg,
+							   KCMD_STATE_PPG_STOP);
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_kcmd kcmd_temp;
+	int ppg_id, ret = 0;
+
+	if (kcmd) {
+		list_move_tail(&kcmd->list, &kppg->kcmds_processing_list);
+	} else {
+		dev_dbg(&psys->adev->dev, "Exceptional stop happened!\n");
+		kcmd_temp.kpg = kppg->kpg;
+		kcmd_temp.fh = kppg->fh;
+		kcmd = &kcmd_temp;
+		/* delete kppg in stop list to avoid this ppg resuming */
+		ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST);
+	}
+
+	ppg_id = ipu_fw_psys_pg_get_id(kcmd);
+	dev_dbg(&psys->adev->dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg);
+
+	if (kppg->state & PPG_STATE_SUSPENDED) {
+		if (enable_suspend_resume) {
+			dev_dbg(&psys->adev->dev, "need resume before stop!\n");
+			kcmd_temp.kpg = kppg->kpg;
+			kcmd_temp.fh = kppg->fh;
+			ret = ipu_fw_psys_ppg_resume(&kcmd_temp);
+			if (ret)
+				dev_err(&psys->adev->dev,
+					"ppg(%d) failed to resume\n", ppg_id);
+		} else if (kcmd != &kcmd_temp) {
+			unsigned long flags;
+
+			ipu_psys_free_cmd_queue_resource(
+				&psys->resource_pool_running,
+				ipu_fw_psys_ppg_get_base_queue_id(kcmd));
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, 0);
+			spin_lock_irqsave(&psys->pgs_lock, flags);
+			kppg->kpg->pg_size = 0;
+			spin_unlock_irqrestore(&psys->pgs_lock, flags);
+			dev_dbg(&psys->adev->dev,
+				"s_change:%s %p %d -> %d\n", __func__,
+				kppg, kppg->state, PPG_STATE_STOPPED);
+			pm_runtime_put(&psys->adev->dev);
+			kppg->state = PPG_STATE_STOPPED;
+			return 0;
+		}
+	}
+	dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n",
+		__func__, kppg, kppg->state, PPG_STATE_STOPPING);
+	kppg->state = PPG_STATE_STOPPING;
+	ret = ipu_fw_psys_pg_abort(kcmd);
+	if (ret)
+		dev_err(&psys->adev->dev, "ppg(%d) failed to abort\n", ppg_id);
+
+	return ret;
+}
+
+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys *psys = kppg->fh->psys;
+	struct ipu_psys_kcmd tmp_kcmd = {
+		.kpg = kppg->kpg,
+		.fh = kppg->fh,
+	};
+	int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd);
+	int ret = 0;
+
+	dev_dbg(&psys->adev->dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg);
+
+	dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n",
+		__func__, kppg, kppg->state, PPG_STATE_SUSPENDING);
+	kppg->state = PPG_STATE_SUSPENDING;
+	if (enable_suspend_resume)
+		ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd);
+	else
+		ret = ipu_fw_psys_pg_abort(&tmp_kcmd);
+	if (ret)
+		dev_err(&psys->adev->dev, "failed to %s ppg(%d)\n",
+			enable_suspend_resume ? "suspend" : "stop", ret);
+
+	return ret;
+}
+
+static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg)
+{
+	return !list_empty(&kppg->kcmds_new_list);
+}
+
+/*
+ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware
+ * Sometimes, if the ppg is at suspended state, this function will return true
+ * to reschedule and let the resume command scheduled before the buffer sets
+ * enqueuing.
+ */
+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg)
+{
+	struct ipu_psys_kcmd *kcmd, *kcmd0;
+	struct ipu_psys *psys = kppg->fh->psys;
+	bool need_resume = false;
+
+	mutex_lock(&kppg->mutex);
+
+	if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED |
+			   PPG_STATE_RUNNING)) {
+		if (ipu_psys_ppg_is_bufset_existing(kppg)) {
+			list_for_each_entry_safe(kcmd, kcmd0,
+						 &kppg->kcmds_new_list, list) {
+				int ret;
+
+				if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) {
+					need_resume = true;
+					break;
+				}
+
+				ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd);
+				if (ret) {
+					dev_err(&psys->adev->dev,
+						"kppg 0x%p fail to qbufset %d",
+						kppg, ret);
+					break;
+				}
+				list_move_tail(&kcmd->list,
+					       &kppg->kcmds_processing_list);
+				dev_dbg(&psys->adev->dev,
+					"kppg %d %p queue kcmd 0x%p fh 0x%p\n",
+					ipu_fw_psys_pg_get_id(kcmd),
+					kppg, kcmd, kcmd->fh);
+			}
+		}
+	}
+
+	mutex_unlock(&kppg->mutex);
+	return need_resume;
+}
+
+void ipu_psys_enter_power_gating(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+	int ret = 0;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			/* kppg has already power down */
+			if (kppg->state == PPG_STATE_STOPPED) {
+				mutex_unlock(&kppg->mutex);
+				continue;
+			}
+
+			ret = pm_runtime_put_autosuspend(&psys->adev->dev);
+			if (ret < 0) {
+				dev_err(&psys->adev->dev,
+					"failed to power gating off\n");
+				pm_runtime_get_sync(&psys->adev->dev);
+
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+}
+
+void ipu_psys_exit_power_gating(struct ipu_psys *psys)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+	int ret = 0;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		mutex_lock(&fh->mutex);
+		sched = &fh->sched;
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			/* kppg is not started and power up */
+			if (kppg->state == PPG_STATE_START ||
+			    kppg->state == PPG_STATE_STARTING) {
+				mutex_unlock(&kppg->mutex);
+				continue;
+			}
+
+			ret = pm_runtime_get_sync(&psys->adev->dev);
+			if (ret < 0) {
+				dev_err(&psys->adev->dev,
+					"failed to power gating\n");
+				pm_runtime_put_noidle(&psys->adev->dev);
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-ppg.h b/drivers/media/pci/intel/ipu6/ipu6-ppg.h
new file mode 100644
index 000000000000..9ec1baf78631
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-ppg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#ifndef IPU6_PPG_H
+#define IPU6_PPG_H
+
+#include "ipu-psys.h"
+/* starting from '2' in case of someone passes true or false */
+enum SCHED_LIST {
+	SCHED_START_LIST = 2,
+	SCHED_STOP_LIST
+};
+
+enum ipu_psys_power_gating_state {
+	PSYS_POWER_NORMAL = 0,
+	PSYS_POWER_GATING,
+	PSYS_POWER_GATED
+};
+
+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd,
+			    struct ipu_psys_ppg *kppg);
+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg);
+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg,
+				    enum SCHED_LIST type);
+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg,
+				 enum SCHED_LIST type);
+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg);
+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg);
+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg);
+void ipu_psys_enter_power_gating(struct ipu_psys *psys);
+void ipu_psys_exit_power_gating(struct ipu_psys *psys);
+
+#endif /* IPU6_PPG_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-psys-gpc.c b/drivers/media/pci/intel/ipu6/ipu6-psys-gpc.c
new file mode 100644
index 000000000000..4d8ef61d8449
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-psys-gpc.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu-psys.h"
+#include "ipu-platform-regs.h"
+
+/*
+ * GPC (Gerneral Performance Counters)
+ */
+#define IPU_PSYS_GPC_NUM 16
+
+#ifndef CONFIG_PM
+#define pm_runtime_get_sync(d)			0
+#define pm_runtime_put(d)			0
+#endif
+
+struct ipu_psys_gpc {
+	bool enable;
+	unsigned int route;
+	unsigned int source;
+	unsigned int sense;
+	unsigned int gpcindex;
+	void *prit;
+};
+
+struct ipu_psys_gpcs {
+	bool gpc_enable;
+	struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM];
+	void *prit;
+};
+
+static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val)
+{
+	struct ipu_psys_gpcs *psys_gpcs = data;
+	struct ipu_psys *psys = psys_gpcs->prit;
+
+	mutex_lock(&psys->mutex);
+
+	*val = psys_gpcs->gpc_enable;
+
+	mutex_unlock(&psys->mutex);
+	return 0;
+}
+
+static int ipu6_psys_gpc_global_enable_set(void *data, u64 val)
+{
+	struct ipu_psys_gpcs *psys_gpcs = data;
+	struct ipu_psys *psys = psys_gpcs->prit;
+	void __iomem *base;
+	int idx, res;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	if (!psys || !psys->pdata || !psys->pdata->base)
+		return -EINVAL;
+
+	mutex_lock(&psys->mutex);
+
+	base = psys->pdata->base + IPU_GPC_BASE;
+
+	res = pm_runtime_get_sync(&psys->adev->dev);
+	if (res < 0) {
+		pm_runtime_put(&psys->adev->dev);
+		mutex_unlock(&psys->mutex);
+		return res;
+	}
+
+	if (val == 0) {
+		writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST);
+		writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+		writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET);
+		psys_gpcs->gpc_enable = false;
+		for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+			psys_gpcs->gpc[idx].enable = 0;
+			psys_gpcs->gpc[idx].sense = 0;
+			psys_gpcs->gpc[idx].route = 0;
+			psys_gpcs->gpc[idx].source = 0;
+		}
+		pm_runtime_mark_last_busy(&psys->adev->dev);
+		pm_runtime_put_autosuspend(&psys->adev->dev);
+	} else {
+		/* Set gpc reg and start all gpc here.
+		 * RST free running local timer.
+		 */
+		writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST);
+		writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST);
+
+		for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+			/* Enable */
+			writel(psys_gpcs->gpc[idx].enable,
+			       base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx);
+			/* Setting (route/source/sense) */
+			writel((psys_gpcs->gpc[idx].sense
+					<< IPU_GPC_SENSE_OFFSET)
+				+ (psys_gpcs->gpc[idx].route
+					<< IPU_GPC_ROUTE_OFFSET)
+				+ (psys_gpcs->gpc[idx].source
+					<< IPU_GPC_SOURCE_OFFSET),
+				base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx);
+		}
+
+		/* Soft reset and Overall Enable. */
+		writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+		writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET);
+		writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+
+		psys_gpcs->gpc_enable = true;
+	}
+
+	mutex_unlock(&psys->mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops,
+			ipu6_psys_gpc_global_enable_get,
+			ipu6_psys_gpc_global_enable_set, "%llu\n");
+
+static int ipu6_psys_gpc_count_get(void *data, u64 *val)
+{
+	struct ipu_psys_gpc *psys_gpc = data;
+	struct ipu_psys *psys = psys_gpc->prit;
+	void __iomem *base;
+	int res;
+
+	if (!psys || !psys->pdata || !psys->pdata->base)
+		return -EINVAL;
+
+	mutex_lock(&psys->mutex);
+
+	base = psys->pdata->base + IPU_GPC_BASE;
+
+	res = pm_runtime_get_sync(&psys->adev->dev);
+	if (res < 0) {
+		pm_runtime_put(&psys->adev->dev);
+		mutex_unlock(&psys->mutex);
+		return res;
+	}
+
+	*val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex);
+
+	mutex_unlock(&psys->mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops,
+			ipu6_psys_gpc_count_get,
+			NULL, "%llu\n");
+
+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys)
+{
+	struct dentry *gpcdir;
+	struct dentry *dir;
+	struct dentry *file;
+	int idx;
+	char gpcname[10];
+	struct ipu_psys_gpcs *psys_gpcs;
+
+	psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL);
+	if (!psys_gpcs)
+		return -ENOMEM;
+
+	gpcdir = debugfs_create_dir("gpc", psys->debugfsdir);
+	if (IS_ERR(gpcdir))
+		return -ENOMEM;
+
+	psys_gpcs->prit = psys;
+	file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs,
+				   &psys_gpc_globe_enable_fops);
+	if (IS_ERR(file))
+		goto err;
+
+	for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+		sprintf(gpcname, "gpc%d", idx);
+		dir = debugfs_create_dir(gpcname, gpcdir);
+		if (IS_ERR(dir))
+			goto err;
+
+		file = debugfs_create_bool("enable", 0600, dir,
+					   &psys_gpcs->gpc[idx].enable);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("source", 0600, dir,
+					  &psys_gpcs->gpc[idx].source);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("route", 0600, dir,
+					  &psys_gpcs->gpc[idx].route);
+		if (IS_ERR(file))
+			goto err;
+
+		file = debugfs_create_u32("sense", 0600, dir,
+					  &psys_gpcs->gpc[idx].sense);
+		if (IS_ERR(file))
+			goto err;
+
+		psys_gpcs->gpc[idx].gpcindex = idx;
+		psys_gpcs->gpc[idx].prit = psys;
+		file = debugfs_create_file("count", 0400, dir,
+					   &psys_gpcs->gpc[idx],
+					   &psys_gpc_count_fops);
+		if (IS_ERR(file))
+			goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(gpcdir);
+	return -ENOMEM;
+}
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-psys.c b/drivers/media/pci/intel/ipu6/ipu6-psys.c
new file mode 100644
index 000000000000..baf208ac1a3f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-psys.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/init_task.h>
+#include <linux/version.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#include "ipu.h"
+#include "ipu-psys.h"
+#include "ipu6-ppg.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+
+#define is_ppg_kcmd(kcmd) (ipu_fw_psys_pg_get_protocol(	\
+			(struct ipu_psys_kcmd *)kcmd)	\
+			== IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG)
+
+static bool early_pg_transfer;
+module_param(early_pg_transfer, bool, 0664);
+MODULE_PARM_DESC(early_pg_transfer,
+		 "Copy PGs back to user after resource allocation");
+
+bool enable_power_gating = true;
+module_param(enable_power_gating, bool, 0664);
+MODULE_PARM_DESC(enable_power_gating, "enable power gating");
+
+struct ipu_trace_block psys_trace_blocks[] = {
+	{
+		.offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE,
+		.type = IPU_TRACE_BLOCK_TUN,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE,
+		.type = IPU_TRACE_BLOCK_TM,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE,
+		.type = IPU_TRACE_BLOCK_TM,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_SPC_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_MMU_GPC_BASE,
+		.type = IPU_TRACE_BLOCK_GPC,
+	},
+	{
+		.offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N,
+		.type = IPU_TRACE_TIMER_RST,
+	},
+	{
+		.type = IPU_TRACE_BLOCK_END,
+	}
+};
+
+static void ipu6_set_sp_info_bits(void *base)
+{
+	int i;
+
+	writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+	       base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+
+	for (i = 0; i < 4; i++)
+		writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+		       base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i));
+	for (i = 0; i < 4; i++)
+		writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+		       base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i));
+}
+
+#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT        1000
+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on)
+{
+	unsigned int i;
+	u32 val;
+
+	/* power domain req */
+	dev_dbg(&psys->adev->dev, "power %s psys sub-domains",
+		on ? "UP" : "DOWN");
+	if (on)
+		writel(IPU_PSYS_SUBDOMAINS_POWER_MASK,
+		       psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
+	else
+		writel(0x0,
+		       psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
+
+	i = 0;
+	do {
+		usleep_range(10, 20);
+		val = readl(psys->adev->isp->base +
+			    IPU_PSYS_SUBDOMAINS_POWER_STATUS);
+		if (!(val & BIT(31))) {
+			dev_dbg(&psys->adev->dev,
+				"PS sub-domains req done with status 0x%x",
+				val);
+			break;
+		}
+		i++;
+	} while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT);
+
+	if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT)
+		dev_warn(&psys->adev->dev, "Psys sub-domains %s req timeout!",
+			 on ? "UP" : "DOWN");
+}
+
+void ipu_psys_setup_hw(struct ipu_psys *psys)
+{
+	void __iomem *base = psys->pdata->base;
+	void __iomem *spc_regs_base =
+	    base + psys->pdata->ipdata->hw_variant.spc_offset;
+	void *psys_iommu0_ctrl;
+	u32 irqs;
+	const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG;
+	const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR;
+	const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL;
+
+	if (!psys->adev->isp->secure_mode) {
+		/* configure access blocker for non-secure mode */
+		writel(NCI_AB_ACCESS_MODE_RW,
+		       base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+		       IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3));
+		writel(NCI_AB_ACCESS_MODE_RW,
+		       base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+		       IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4));
+		writel(NCI_AB_ACCESS_MODE_RW,
+		       base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+		       IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5));
+	}
+	psys_iommu0_ctrl = base +
+		psys->pdata->ipdata->hw_variant.mmu_hw[0].offset +
+		IPU_MMU_INFO_OFFSET;
+	writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl);
+
+	ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+	ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL);
+
+	/* Enable FW interrupt #0 */
+	writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
+	irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0);
+	writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE);
+	writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE);
+	writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
+	writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK);
+	writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE);
+}
+
+static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_psys_scheduler *sched = &kcmd->fh->sched;
+	struct ipu_psys_ppg *kppg;
+
+	mutex_lock(&kcmd->fh->mutex);
+	if (list_empty(&sched->ppgs))
+		goto not_found;
+
+	list_for_each_entry(kppg, &sched->ppgs, list) {
+		if (ipu_fw_psys_pg_get_token(kcmd)
+		    != kppg->token)
+			continue;
+		mutex_unlock(&kcmd->fh->mutex);
+		return kppg;
+	}
+
+not_found:
+	mutex_unlock(&kcmd->fh->mutex);
+	return NULL;
+}
+
+/*
+ * Called to free up all resources associated with a kcmd.
+ * After this the kcmd doesn't anymore exist in the driver.
+ */
+void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_scheduler *sched;
+
+	if (!kcmd)
+		return;
+
+	kppg = ipu_psys_identify_kppg(kcmd);
+	sched = &kcmd->fh->sched;
+
+	if (kcmd->kbuf_set) {
+		mutex_lock(&sched->bs_mutex);
+		kcmd->kbuf_set->buf_set_size = 0;
+		mutex_unlock(&sched->bs_mutex);
+		kcmd->kbuf_set = NULL;
+	}
+
+	if (kppg) {
+		mutex_lock(&kppg->mutex);
+		if (!list_empty(&kcmd->list))
+			list_del(&kcmd->list);
+		mutex_unlock(&kppg->mutex);
+	}
+
+	kfree(kcmd->pg_manifest);
+	kfree(kcmd->kbufs);
+	kfree(kcmd->buffers);
+	kfree(kcmd);
+}
+
+static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd,
+					       struct ipu_psys_fh *fh)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_kcmd *kcmd;
+	struct ipu_psys_kbuffer *kpgbuf;
+	unsigned int i;
+	int ret, prevfd, fd;
+
+	fd = prevfd = -1;
+
+	if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS)
+		return NULL;
+
+	if (!cmd->pg_manifest_size)
+		return NULL;
+
+	kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL);
+	if (!kcmd)
+		return NULL;
+
+	kcmd->state = KCMD_STATE_PPG_NEW;
+	kcmd->fh = fh;
+	INIT_LIST_HEAD(&kcmd->list);
+
+	mutex_lock(&fh->mutex);
+	fd = cmd->pg;
+	kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+	if (!kpgbuf || !kpgbuf->sgt) {
+		dev_err(&psys->adev->dev, "%s kbuf %p with fd %d not found.\n",
+			__func__, kpgbuf, fd);
+		mutex_unlock(&fh->mutex);
+		goto error;
+	}
+
+	/* check and remap if possibe */
+	ret = ipu_psys_mapbuf_with_lock(fd, fh, kpgbuf);
+	if (ret) {
+		dev_err(&psys->adev->dev, "%s remap failed\n", __func__);
+		mutex_unlock(&fh->mutex);
+		goto error;
+	}
+
+	kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+	if (!kpgbuf || !kpgbuf->sgt) {
+		WARN(1, "kbuf not found or unmapped.\n");
+		mutex_unlock(&fh->mutex);
+		goto error;
+	}
+	mutex_unlock(&fh->mutex);
+
+	kcmd->pg_user = kpgbuf->kaddr;
+	kcmd->kpg = __get_pg_buf(psys, kpgbuf->len);
+	if (!kcmd->kpg)
+		goto error;
+
+	memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size);
+
+	kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL);
+	if (!kcmd->pg_manifest)
+		goto error;
+
+	ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest,
+			     cmd->pg_manifest_size);
+	if (ret)
+		goto error;
+
+	kcmd->pg_manifest_size = cmd->pg_manifest_size;
+
+	kcmd->user_token = cmd->user_token;
+	kcmd->issue_id = cmd->issue_id;
+	kcmd->priority = cmd->priority;
+	if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM)
+		goto error;
+
+	/*
+	 * Kenel enable bitmap be used only.
+	 */
+	memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap,
+	       sizeof(cmd->kernel_enable_bitmap));
+
+	kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd);
+	kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers),
+				GFP_KERNEL);
+	if (!kcmd->buffers)
+		goto error;
+
+	kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]),
+			      GFP_KERNEL);
+	if (!kcmd->kbufs)
+		goto error;
+
+	/* should be stop cmd for ppg */
+	if (!cmd->buffers) {
+		kcmd->state = KCMD_STATE_PPG_STOP;
+		return kcmd;
+	}
+
+	if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount)
+		goto error;
+
+	ret = copy_from_user(kcmd->buffers, cmd->buffers,
+			     kcmd->nbuffers * sizeof(*kcmd->buffers));
+	if (ret)
+		goto error;
+
+	for (i = 0; i < kcmd->nbuffers; i++) {
+		struct ipu_fw_psys_terminal *terminal;
+
+		terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+		if (!terminal)
+			continue;
+
+		if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) {
+			kcmd->state = KCMD_STATE_PPG_START;
+			continue;
+		}
+		if (kcmd->state == KCMD_STATE_PPG_START) {
+			dev_err(&psys->adev->dev,
+				"err: all buffer.flags&DMA_HANDLE must 0\n");
+			goto error;
+		}
+
+		mutex_lock(&fh->mutex);
+		fd = kcmd->buffers[i].base.fd;
+		kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+		if (!kpgbuf || !kpgbuf->sgt) {
+			dev_err(&psys->adev->dev,
+				"%s kcmd->buffers[%d] %p fd %d not found.\n",
+				__func__, i, kpgbuf, fd);
+			mutex_unlock(&fh->mutex);
+			goto error;
+		}
+
+		ret = ipu_psys_mapbuf_with_lock(fd, fh, kpgbuf);
+		if (ret) {
+			dev_err(&psys->adev->dev, "%s remap failed\n",
+				__func__);
+			mutex_unlock(&fh->mutex);
+			goto error;
+		}
+
+		kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+		if (!kpgbuf || !kpgbuf->sgt) {
+			WARN(1, "kbuf not found or unmapped.\n");
+			mutex_unlock(&fh->mutex);
+			goto error;
+		}
+		mutex_unlock(&fh->mutex);
+		kcmd->kbufs[i] = kpgbuf;
+		if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt ||
+		    kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used)
+			goto error;
+		if ((kcmd->kbufs[i]->flags &
+		     IPU_BUFFER_FLAG_NO_FLUSH) ||
+		    (kcmd->buffers[i].flags &
+		     IPU_BUFFER_FLAG_NO_FLUSH) ||
+		    prevfd == kcmd->buffers[i].base.fd)
+			continue;
+
+		prevfd = kcmd->buffers[i].base.fd;
+		dma_sync_sg_for_device(&psys->adev->dev,
+				       kcmd->kbufs[i]->sgt->sgl,
+				       kcmd->kbufs[i]->sgt->orig_nents,
+				       DMA_BIDIRECTIONAL);
+	}
+
+	if (kcmd->state != KCMD_STATE_PPG_START)
+		kcmd->state = KCMD_STATE_PPG_ENQUEUE;
+
+	return kcmd;
+error:
+	ipu_psys_kcmd_free(kcmd);
+
+	dev_dbg(&psys->adev->dev, "failed to copy cmd\n");
+
+	return NULL;
+}
+
+static struct ipu_psys_buffer_set *
+ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr)
+{
+	struct ipu_psys_fh *fh;
+	struct ipu_psys_buffer_set *kbuf_set;
+	struct ipu_psys_scheduler *sched;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		sched = &fh->sched;
+		mutex_lock(&sched->bs_mutex);
+		list_for_each_entry(kbuf_set, &sched->buf_sets, list) {
+			if (kbuf_set->buf_set &&
+			    kbuf_set->buf_set->ipu_virtual_address == addr) {
+				mutex_unlock(&sched->bs_mutex);
+				return kbuf_set;
+			}
+		}
+		mutex_unlock(&sched->bs_mutex);
+	}
+
+	return NULL;
+}
+
+static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys,
+						dma_addr_t pg_addr)
+{
+	struct ipu_psys_scheduler *sched;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_fh *fh;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		sched = &fh->sched;
+		mutex_lock(&fh->mutex);
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			if (pg_addr != kppg->kpg->pg_dma_addr)
+				continue;
+			mutex_unlock(&fh->mutex);
+			return kppg;
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	return NULL;
+}
+
+/*
+ * Move kcmd into completed state (due to running finished or failure).
+ * Fill up the event struct and notify waiters.
+ */
+void ipu_psys_kcmd_complete(struct ipu_psys *psys,
+			    struct ipu_psys_kcmd *kcmd, int error)
+{
+	struct ipu_psys_fh *fh = kcmd->fh;
+
+	kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE;
+	kcmd->ev.user_token = kcmd->user_token;
+	kcmd->ev.issue_id = kcmd->issue_id;
+	kcmd->ev.error = error;
+
+	if (kcmd->constraint.min_freq)
+		ipu_buttress_remove_psys_constraint(psys->adev->isp,
+						    &kcmd->constraint);
+
+	if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) {
+		struct ipu_psys_kbuffer *kbuf;
+
+		kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh,
+							kcmd->pg_user);
+		if (kbuf && kbuf->valid)
+			memcpy(kcmd->pg_user,
+			       kcmd->kpg->pg, kcmd->kpg->pg_size);
+		else
+			dev_dbg(&psys->adev->dev, "Skipping unmapped buffer\n");
+	}
+
+	kcmd->state = KCMD_STATE_PPG_COMPLETE;
+	wake_up_interruptible(&fh->wait);
+}
+
+/*
+ * Submit kcmd into psys queue. If running fails, complete the kcmd
+ * with an error.
+ *
+ * Found a runnable PG. Move queue to the list tail for round-robin
+ * scheduling and run the PG. Start the watchdog timer if the PG was
+ * started successfully. Enable PSYS power if requested.
+ */
+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd)
+{
+	int ret;
+
+	if (psys->adev->isp->flr_done)
+		return -EIO;
+
+	if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg)
+		memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
+
+	ret = ipu_fw_psys_pg_start(kcmd);
+	if (ret) {
+		dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+		goto error;
+	}
+
+	ipu_fw_psys_pg_dump(psys, kcmd, "run");
+
+	ret = ipu_fw_psys_pg_disown(kcmd);
+	if (ret) {
+		dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+		goto error;
+	}
+
+	return 0;
+
+error:
+	dev_err(&psys->adev->dev, "failed to start process group\n");
+	return ret;
+}
+
+void ipu_psys_watchdog_work(struct work_struct *work)
+{
+	struct ipu_psys *psys = container_of(work,
+					     struct ipu_psys, watchdog_work);
+	dev_dbg(&psys->adev->dev, "watchdog for ppg not implemented yet!\n");
+}
+
+static void ipu_psys_watchdog(struct timer_list *t)
+{
+	struct ipu_psys_kcmd *kcmd = from_timer(kcmd, t, watchdog);
+	struct ipu_psys *psys = kcmd->fh->psys;
+
+	queue_work(IPU_PSYS_WORK_QUEUE, &psys->watchdog_work);
+}
+
+static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_psys_fh *fh = kcmd->fh;
+	struct ipu_psys_scheduler *sched = &fh->sched;
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_resource_pool *rpr;
+	unsigned long flags;
+	u8 id;
+	bool resche = true;
+
+	rpr = &psys->resource_pool_running;
+	if (kcmd->state == KCMD_STATE_PPG_START) {
+		int queue_id;
+		int ret;
+
+		mutex_lock(&psys->mutex);
+		queue_id = ipu_psys_allocate_cmd_queue_resource(rpr);
+		if (queue_id == -ENOSPC) {
+			dev_err(&psys->adev->dev, "no available queue\n");
+			mutex_unlock(&psys->mutex);
+			return -ENOMEM;
+		}
+		mutex_unlock(&psys->mutex);
+
+		kppg = kzalloc(sizeof(*kppg), GFP_KERNEL);
+		if (!kppg)
+			return -ENOMEM;
+
+		kppg->fh = fh;
+		kppg->kpg = kcmd->kpg;
+		kppg->state = PPG_STATE_START;
+		kppg->pri_base = kcmd->priority;
+		kppg->pri_dynamic = 0;
+		INIT_LIST_HEAD(&kppg->list);
+
+		mutex_init(&kppg->mutex);
+		INIT_LIST_HEAD(&kppg->kcmds_new_list);
+		INIT_LIST_HEAD(&kppg->kcmds_processing_list);
+		INIT_LIST_HEAD(&kppg->kcmds_finished_list);
+		INIT_LIST_HEAD(&kppg->sched_list);
+
+		kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL);
+		if (!kppg->manifest) {
+			kfree(kppg);
+			return -ENOMEM;
+		}
+		memcpy(kppg->manifest, kcmd->pg_manifest,
+		       kcmd->pg_manifest_size);
+
+		/*
+		 * set token as start cmd will immediately be followed by a
+		 * enqueue cmd so that kppg could be retrieved.
+		 */
+		kppg->token = (u64)kcmd->kpg;
+		ipu_fw_psys_pg_set_token(kcmd, kppg->token);
+		ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id);
+		ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd,
+						      kcmd->kpg->pg_dma_addr);
+		if (ret) {
+			kfree(kppg->manifest);
+			kfree(kppg);
+			return -EIO;
+		}
+		memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
+
+		mutex_lock(&fh->mutex);
+		list_add_tail(&kppg->list, &sched->ppgs);
+		mutex_unlock(&fh->mutex);
+
+		mutex_lock(&kppg->mutex);
+		list_add(&kcmd->list, &kppg->kcmds_new_list);
+		mutex_unlock(&kppg->mutex);
+
+		dev_dbg(&psys->adev->dev,
+			"START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n",
+			ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id);
+
+		/* Kick l-scheduler thread */
+		atomic_set(&psys->wakeup_count, 1);
+		wake_up_interruptible(&psys->sched_cmd_wq);
+
+		return 0;
+	}
+
+	kppg = ipu_psys_identify_kppg(kcmd);
+	spin_lock_irqsave(&psys->pgs_lock, flags);
+	kcmd->kpg->pg_size = 0;
+	spin_unlock_irqrestore(&psys->pgs_lock, flags);
+	if (!kppg) {
+		dev_err(&psys->adev->dev, "token not match\n");
+		return -EINVAL;
+	}
+
+	kcmd->kpg = kppg->kpg;
+
+	dev_dbg(&psys->adev->dev, "%s ppg(%d, 0x%p) kcmd %p\n",
+		(kcmd->state == KCMD_STATE_PPG_STOP) ?
+		"STOP" : "ENQUEUE",
+		ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd);
+
+	if (kcmd->state == KCMD_STATE_PPG_STOP) {
+		mutex_lock(&kppg->mutex);
+		if (kppg->state == PPG_STATE_STOPPED) {
+			dev_dbg(&psys->adev->dev,
+				"kppg 0x%p  stopped!\n", kppg);
+			id = ipu_fw_psys_ppg_get_base_queue_id(kcmd);
+			ipu_psys_free_cmd_queue_resource(rpr, id);
+			list_add(&kcmd->list, &kppg->kcmds_finished_list);
+			ipu_psys_kcmd_complete(psys, kcmd, 0);
+			spin_lock_irqsave(&psys->pgs_lock, flags);
+			kppg->kpg->pg_size = 0;
+			spin_unlock_irqrestore(&psys->pgs_lock, flags);
+			pm_runtime_put(&psys->adev->dev);
+			resche = false;
+		} else {
+			list_add(&kcmd->list, &kppg->kcmds_new_list);
+		}
+		mutex_unlock(&kppg->mutex);
+	} else {
+		int ret;
+
+		ret = ipu_psys_ppg_get_bufset(kcmd, kppg);
+		if (ret)
+			return ret;
+
+		mutex_lock(&kppg->mutex);
+		list_add_tail(&kcmd->list, &kppg->kcmds_new_list);
+		mutex_unlock(&kppg->mutex);
+	}
+
+	if (resche) {
+		/* Kick l-scheduler thread */
+		atomic_set(&psys->wakeup_count, 1);
+		wake_up_interruptible(&psys->sched_cmd_wq);
+	}
+	return 0;
+}
+
+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_kcmd *kcmd;
+	size_t pg_size;
+	int ret;
+
+	if (psys->adev->isp->flr_done)
+		return -EIO;
+
+	kcmd = ipu_psys_copy_cmd(cmd, fh);
+	if (!kcmd)
+		return -EINVAL;
+
+	pg_size = ipu_fw_psys_pg_get_size(kcmd);
+	if (pg_size > kcmd->kpg->pg_size) {
+		dev_dbg(&psys->adev->dev, "pg size mismatch %lu %lu\n",
+			pg_size, kcmd->kpg->pg_size);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!is_ppg_kcmd(kcmd)) {
+		dev_err(&psys->adev->dev, "No support legacy pg now\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	timer_setup(&kcmd->watchdog, ipu_psys_watchdog, 0);
+
+	if (cmd->min_psys_freq) {
+		kcmd->constraint.min_freq = cmd->min_psys_freq;
+		ipu_buttress_add_psys_constraint(psys->adev->isp,
+						 &kcmd->constraint);
+	}
+
+	ret = ipu_psys_kcmd_send_to_ppg(kcmd);
+	if (ret)
+		goto error;
+
+	dev_dbg(&psys->adev->dev,
+		"IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n",
+		cmd->user_token, cmd->issue_id, cmd->priority);
+
+	return 0;
+
+error:
+	ipu_psys_kcmd_free(kcmd);
+
+	return ret;
+}
+
+static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys,
+				   struct ipu_psys_kcmd *kcmd)
+{
+	struct ipu_psys_fh *fh;
+	struct ipu_psys_kcmd *kcmd0;
+	struct ipu_psys_ppg *kppg;
+	struct ipu_psys_scheduler *sched;
+
+	list_for_each_entry(fh, &psys->fhs, list) {
+		sched = &fh->sched;
+		mutex_lock(&fh->mutex);
+		if (list_empty(&sched->ppgs)) {
+			mutex_unlock(&fh->mutex);
+			continue;
+		}
+		list_for_each_entry(kppg, &sched->ppgs, list) {
+			mutex_lock(&kppg->mutex);
+			list_for_each_entry(kcmd0,
+					    &kppg->kcmds_processing_list,
+					    list) {
+				if (kcmd0 == kcmd) {
+					mutex_unlock(&kppg->mutex);
+					mutex_unlock(&fh->mutex);
+					return true;
+				}
+			}
+			mutex_unlock(&kppg->mutex);
+		}
+		mutex_unlock(&fh->mutex);
+	}
+
+	return false;
+}
+
+void ipu_psys_handle_events(struct ipu_psys *psys)
+{
+	struct ipu_psys_kcmd *kcmd;
+	struct ipu_fw_psys_event event;
+	struct ipu_psys_ppg *kppg;
+	bool error;
+	u32 hdl;
+	u16 cmd, status;
+	int res, state;
+
+	do {
+		memset(&event, 0, sizeof(event));
+		if (!ipu_fw_psys_rcv_event(psys, &event))
+			break;
+
+		if (!event.context_handle)
+			break;
+
+		dev_dbg(&psys->adev->dev, "ppg event: 0x%x, %d, status %d\n",
+			event.context_handle, event.command, event.status);
+
+		error = false;
+		/*
+		 * event.command == CMD_RUN shows this is fw processing frame
+		 * done as pPG mode, and event.context_handle should be pointer
+		 * of buffer set; so we make use of this pointer to lookup
+		 * kbuffer_set and kcmd
+		 */
+		hdl = event.context_handle;
+		cmd = event.command;
+		status = event.status;
+
+		kppg = NULL;
+		kcmd = NULL;
+		if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) {
+			struct ipu_psys_buffer_set *kbuf_set;
+			/*
+			 * Need change ppg state when the 1st running is done
+			 * (after PPG started/resumed)
+			 */
+			kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl);
+			if (kbuf_set)
+				kcmd = kbuf_set->kcmd;
+			if (!kbuf_set || !kcmd)
+				error = true;
+			else
+				kppg = ipu_psys_identify_kppg(kcmd);
+		} else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP ||
+			   cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND ||
+			   cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) {
+			/*
+			 * STOP/SUSPEND/RESUME cmd event would run this branch;
+			 * only stop cmd queued by user has stop_kcmd and need
+			 * to notify user to dequeue.
+			 */
+			kppg = ipu_psys_lookup_ppg(psys, hdl);
+			if (kppg) {
+				mutex_lock(&kppg->mutex);
+				state = kppg->state;
+				if (state == PPG_STATE_STOPPING) {
+					kcmd = ipu_psys_ppg_get_stop_kcmd(kppg);
+					if (!kcmd)
+						error = true;
+				}
+				mutex_unlock(&kppg->mutex);
+			} else {
+				error = true;
+			}
+		} else {
+			dev_err(&psys->adev->dev, "invalid event\n");
+			continue;
+		}
+
+		if (error) {
+			dev_err(&psys->adev->dev, "event error, command %d\n",
+				cmd);
+			break;
+		}
+
+		dev_dbg(&psys->adev->dev, "event to kppg 0x%p, kcmd 0x%p\n",
+			kppg, kcmd);
+
+		if (kppg)
+			ipu_psys_ppg_complete(psys, kppg);
+
+		if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) {
+			res = (status == IPU_PSYS_EVENT_CMD_COMPLETE ||
+			       status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ?
+				0 : -EIO;
+			mutex_lock(&kppg->mutex);
+			list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+			mutex_unlock(&kppg->mutex);
+			ipu_psys_kcmd_complete(psys, kcmd, res);
+		}
+	} while (1);
+}
+
+int ipu_psys_fh_init(struct ipu_psys_fh *fh)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp;
+	struct ipu_psys_scheduler *sched = &fh->sched;
+	int i;
+
+	mutex_init(&sched->bs_mutex);
+	INIT_LIST_HEAD(&sched->buf_sets);
+	INIT_LIST_HEAD(&sched->ppgs);
+	pm_runtime_dont_use_autosuspend(&psys->adev->dev);
+	/* allocate and map memory for buf_sets */
+	for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) {
+		kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL);
+		if (!kbuf_set)
+			goto out_free_buf_sets;
+		kbuf_set->kaddr = dma_alloc_attrs(&psys->adev->dev,
+						  IPU_PSYS_BUF_SET_MAX_SIZE,
+						  &kbuf_set->dma_addr,
+						  GFP_KERNEL,
+						  0);
+		if (!kbuf_set->kaddr) {
+			kfree(kbuf_set);
+			goto out_free_buf_sets;
+		}
+		kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE;
+		list_add(&kbuf_set->list, &sched->buf_sets);
+	}
+
+	return 0;
+
+out_free_buf_sets:
+	list_for_each_entry_safe(kbuf_set, kbuf_set_tmp,
+				 &sched->buf_sets, list) {
+		dma_free_attrs(&psys->adev->dev,
+			       kbuf_set->size, kbuf_set->kaddr,
+			       kbuf_set->dma_addr, 0);
+		list_del(&kbuf_set->list);
+		kfree(kbuf_set);
+	}
+	mutex_destroy(&sched->bs_mutex);
+
+	return -ENOMEM;
+}
+
+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_ppg *kppg, *kppg0;
+	struct ipu_psys_kcmd *kcmd, *kcmd0;
+	struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0;
+	struct ipu_psys_scheduler *sched = &fh->sched;
+	struct ipu_psys_resource_pool *rpr;
+	struct ipu_psys_resource_alloc *alloc;
+	u8 id;
+
+	mutex_lock(&fh->mutex);
+	if (!list_empty(&sched->ppgs)) {
+		list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) {
+			mutex_unlock(&fh->mutex);
+			mutex_lock(&kppg->mutex);
+			if (!(kppg->state &
+			      (PPG_STATE_STOPPED |
+			       PPG_STATE_STOPPING))) {
+				unsigned long flags;
+				struct ipu_psys_kcmd tmp = {
+					.kpg = kppg->kpg,
+				};
+
+				rpr = &psys->resource_pool_running;
+				alloc = &kppg->kpg->resource_alloc;
+				id = ipu_fw_psys_ppg_get_base_queue_id(&tmp);
+				ipu_psys_ppg_stop(kppg);
+				ipu_psys_free_resources(alloc, rpr);
+				ipu_psys_free_cmd_queue_resource(rpr, id);
+				dev_dbg(&psys->adev->dev,
+				    "s_change:%s %p %d -> %d\n", __func__,
+				    kppg, kppg->state, PPG_STATE_STOPPED);
+				kppg->state = PPG_STATE_STOPPED;
+				spin_lock_irqsave(&psys->pgs_lock, flags);
+				kppg->kpg->pg_size = 0;
+				spin_unlock_irqrestore(&psys->pgs_lock, flags);
+				if (psys->power_gating != PSYS_POWER_GATED)
+					pm_runtime_put(&psys->adev->dev);
+			}
+			mutex_unlock(&kppg->mutex);
+
+			list_for_each_entry_safe(kcmd, kcmd0,
+						 &kppg->kcmds_new_list, list) {
+				kcmd->pg_user = NULL;
+				ipu_psys_kcmd_free(kcmd);
+			}
+
+			list_for_each_entry_safe(kcmd, kcmd0,
+						 &kppg->kcmds_processing_list,
+						 list) {
+				kcmd->pg_user = NULL;
+				ipu_psys_kcmd_free(kcmd);
+			}
+
+			list_for_each_entry_safe(kcmd, kcmd0,
+						 &kppg->kcmds_finished_list,
+						 list) {
+				kcmd->pg_user = NULL;
+				ipu_psys_kcmd_free(kcmd);
+			}
+
+			mutex_lock(&kppg->mutex);
+			list_del(&kppg->list);
+			mutex_unlock(&kppg->mutex);
+
+			mutex_destroy(&kppg->mutex);
+			kfree(kppg->manifest);
+			kfree(kppg);
+			mutex_lock(&fh->mutex);
+		}
+	}
+	mutex_unlock(&fh->mutex);
+
+	mutex_lock(&sched->bs_mutex);
+	list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) {
+		dma_free_attrs(&psys->adev->dev,
+			       kbuf_set->size, kbuf_set->kaddr,
+			       kbuf_set->dma_addr, 0);
+		list_del(&kbuf_set->list);
+		kfree(kbuf_set);
+	}
+	mutex_unlock(&sched->bs_mutex);
+	mutex_destroy(&sched->bs_mutex);
+
+	return 0;
+}
+
+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh)
+{
+	struct ipu_psys_scheduler *sched = &fh->sched;
+	struct ipu_psys_kcmd *kcmd;
+	struct ipu_psys_ppg *kppg;
+
+	mutex_lock(&fh->mutex);
+	if (list_empty(&sched->ppgs)) {
+		mutex_unlock(&fh->mutex);
+		return NULL;
+	}
+
+	list_for_each_entry(kppg, &sched->ppgs, list) {
+		mutex_lock(&kppg->mutex);
+		if (list_empty(&kppg->kcmds_finished_list)) {
+			mutex_unlock(&kppg->mutex);
+			continue;
+		}
+
+		kcmd = list_first_entry(&kppg->kcmds_finished_list,
+					struct ipu_psys_kcmd, list);
+		mutex_unlock(&fh->mutex);
+		mutex_unlock(&kppg->mutex);
+		dev_dbg(&fh->psys->adev->dev,
+			"get completed kcmd 0x%p\n", kcmd);
+		return kcmd;
+	}
+	mutex_unlock(&fh->mutex);
+
+	return NULL;
+}
+
+long ipu_ioctl_dqevent(struct ipu_psys_event *event,
+		       struct ipu_psys_fh *fh, unsigned int f_flags)
+{
+	struct ipu_psys *psys = fh->psys;
+	struct ipu_psys_kcmd *kcmd = NULL;
+	int rval;
+
+	dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n");
+
+	if (!(f_flags & O_NONBLOCK)) {
+		rval = wait_event_interruptible(fh->wait,
+						(kcmd =
+						 ipu_get_completed_kcmd(fh)));
+		if (rval == -ERESTARTSYS)
+			return rval;
+	}
+
+	if (!kcmd) {
+		kcmd = ipu_get_completed_kcmd(fh);
+		if (!kcmd)
+			return -ENODATA;
+	}
+
+	*event = kcmd->ev;
+	ipu_psys_kcmd_free(kcmd);
+
+	return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
new file mode 100644
index 000000000000..79f4f3db0374
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu.h"
+#include "ipu-cpd.h"
+#include "ipu-isys.h"
+#include "ipu-psys.h"
+#include "ipu-platform.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+struct ipu_cell_program_t {
+	unsigned int magic_number;
+
+	unsigned int blob_offset;
+	unsigned int blob_size;
+
+	unsigned int start[3];
+
+	unsigned int icache_source;
+	unsigned int icache_target;
+	unsigned int icache_size;
+
+	unsigned int pmem_source;
+	unsigned int pmem_target;
+	unsigned int pmem_size;
+
+	unsigned int data_source;
+	unsigned int data_target;
+	unsigned int data_size;
+
+	unsigned int bss_target;
+	unsigned int bss_size;
+
+	unsigned int cell_id;
+	unsigned int regs_addr;
+
+	unsigned int cell_pmem_data_bus_address;
+	unsigned int cell_dmem_data_bus_address;
+	unsigned int cell_pmem_control_bus_address;
+	unsigned int cell_dmem_control_bus_address;
+
+	unsigned int next;
+	unsigned int dummy[2];
+};
+
+static unsigned int ipu6se_csi_offsets[] = {
+	IPU_CSI_PORT_A_ADDR_OFFSET,
+	IPU_CSI_PORT_B_ADDR_OFFSET,
+	IPU_CSI_PORT_C_ADDR_OFFSET,
+	IPU_CSI_PORT_D_ADDR_OFFSET,
+};
+
+static unsigned int ipu6se_tpg_offsets[] = {
+	IPU_CSI_PORT_A_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_B_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_C_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_D_PIXGEN_ADDR_OFFSET,
+};
+
+static unsigned int ipu6_csi_offsets[] = {
+	IPU_CSI_PORT_A_ADDR_OFFSET,
+	IPU_CSI_PORT_B_ADDR_OFFSET,
+	IPU_CSI_PORT_C_ADDR_OFFSET,
+	IPU_CSI_PORT_D_ADDR_OFFSET,
+	IPU_CSI_PORT_E_ADDR_OFFSET,
+	IPU_CSI_PORT_F_ADDR_OFFSET,
+	IPU_CSI_PORT_G_ADDR_OFFSET,
+	IPU_CSI_PORT_H_ADDR_OFFSET
+};
+
+static unsigned int ipu6_tpg_offsets[] = {
+	IPU_CSI_PORT_A_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_B_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_C_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_D_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_E_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_F_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_G_PIXGEN_ADDR_OFFSET,
+	IPU_CSI_PORT_H_PIXGEN_ADDR_OFFSET
+};
+
+struct ipu_isys_internal_pdata isys_ipdata = {
+	.hw_variant = {
+		       .offset = IPU_UNIFIED_OFFSET,
+		       .nr_mmus = 3,
+		       .mmu_hw = {
+				{
+				   .offset = IPU_ISYS_IOMMU0_OFFSET,
+				   .info_bits =
+				   IPU_INFO_REQUEST_DESTINATION_IOSF,
+				   .nr_l1streams = 16,
+				   .l1_block_sz = {
+						   3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
+						   1, 1, 1, 1, 1, 1
+				   },
+				   .nr_l2streams = 16,
+				   .l2_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .insert_read_before_invalidate = false,
+				   .zlw_invalidate = false,
+				   .l1_stream_id_reg_offset =
+				   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+				   .l2_stream_id_reg_offset =
+				   IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+				},
+				{
+				   .offset = IPU_ISYS_IOMMU1_OFFSET,
+				   .info_bits = IPU_INFO_STREAM_ID_SET(0),
+				   .nr_l1streams = 16,
+				   .l1_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 1, 1, 4
+				   },
+				   .nr_l2streams = 16,
+				   .l2_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .insert_read_before_invalidate = false,
+				   .zlw_invalidate = false,
+				   .l1_stream_id_reg_offset =
+				   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+				   .l2_stream_id_reg_offset =
+				   IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+				},
+				{
+				   .offset = IPU_ISYS_IOMMUI_OFFSET,
+				   .info_bits = IPU_INFO_STREAM_ID_SET(0),
+				   .nr_l1streams = 0,
+				   .nr_l2streams = 0,
+				   .insert_read_before_invalidate = false,
+				},
+			},
+		       .cdc_fifos = 3,
+		       .cdc_fifo_threshold = {6, 8, 2},
+		       .dmem_offset = IPU_ISYS_DMEM_OFFSET,
+		       .spc_offset = IPU_ISYS_SPC_OFFSET,
+	},
+	.isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN,
+};
+
+struct ipu_psys_internal_pdata psys_ipdata = {
+	.hw_variant = {
+		       .offset = IPU_UNIFIED_OFFSET,
+		       .nr_mmus = 4,
+		       .mmu_hw = {
+				{
+				   .offset = IPU_PSYS_IOMMU0_OFFSET,
+				   .info_bits =
+				   IPU_INFO_REQUEST_DESTINATION_IOSF,
+				   .nr_l1streams = 16,
+				   .l1_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .nr_l2streams = 16,
+				   .l2_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .insert_read_before_invalidate = false,
+				   .zlw_invalidate = false,
+				   .l1_stream_id_reg_offset =
+				   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+				   .l2_stream_id_reg_offset =
+				   IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+				},
+				{
+				   .offset = IPU_PSYS_IOMMU1_OFFSET,
+				   .info_bits = IPU_INFO_STREAM_ID_SET(0),
+				   .nr_l1streams = 32,
+				   .l1_block_sz = {
+						   1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 10,
+						   5, 4, 14, 6, 4, 14, 6, 4, 8,
+						   4, 2, 1, 1, 1, 1, 14
+				   },
+				   .nr_l2streams = 32,
+				   .l2_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .insert_read_before_invalidate = false,
+				   .zlw_invalidate = false,
+				   .l1_stream_id_reg_offset =
+				   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+				   .l2_stream_id_reg_offset =
+				   IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
+				},
+				{
+				   .offset = IPU_PSYS_IOMMU1R_OFFSET,
+				   .info_bits = IPU_INFO_STREAM_ID_SET(0),
+				   .nr_l1streams = 16,
+				   .l1_block_sz = {
+						   1, 4, 4, 4, 4, 16, 8, 4, 32,
+						   16, 16, 2, 2, 2, 1, 12
+				   },
+				   .nr_l2streams = 16,
+				   .l2_block_sz = {
+						   2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+						   2, 2, 2, 2, 2, 2
+				   },
+				   .insert_read_before_invalidate = false,
+				   .zlw_invalidate = false,
+				   .l1_stream_id_reg_offset =
+				   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+				   .l2_stream_id_reg_offset =
+				   IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+				},
+				{
+				   .offset = IPU_PSYS_IOMMUI_OFFSET,
+				   .info_bits = IPU_INFO_STREAM_ID_SET(0),
+				   .nr_l1streams = 0,
+				   .nr_l2streams = 0,
+				   .insert_read_before_invalidate = false,
+				},
+		},
+	       .dmem_offset = IPU_PSYS_DMEM_OFFSET,
+	},
+};
+
+const struct ipu_buttress_ctrl isys_buttress_ctrl = {
+	.ratio = IPU_IS_FREQ_CTL_DEFAULT_RATIO,
+	.qos_floor = IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+	.freq_ctl = IPU_BUTTRESS_REG_IS_FREQ_CTL,
+	.pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+	.pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+	.pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+	.pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+const struct ipu_buttress_ctrl psys_buttress_ctrl = {
+	.ratio = IPU_PS_FREQ_CTL_DEFAULT_RATIO,
+	.qos_floor = IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+	.freq_ctl = IPU_BUTTRESS_REG_PS_FREQ_CTL,
+	.pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+	.pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+	.pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+	.pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static void ipu6_pkg_dir_configure_spc(struct ipu_device *isp,
+				       const struct ipu_hw_variants *hw_variant,
+				       int pkg_dir_idx, void __iomem *base,
+				       u64 *pkg_dir,
+				       dma_addr_t pkg_dir_vied_address)
+{
+	struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
+	struct ipu_isys *isys = ipu_bus_get_drvdata(isp->isys);
+	unsigned int server_fw_virtaddr;
+	struct ipu_cell_program_t *prog;
+	void __iomem *spc_base;
+	dma_addr_t dma_addr;
+
+	if (!pkg_dir || !isp->cpd_fw) {
+		dev_err(&isp->pdev->dev, "invalid addr\n");
+		return;
+	}
+
+	server_fw_virtaddr = *(pkg_dir + (pkg_dir_idx + 1) * 2);
+	if (pkg_dir_idx == IPU_CPD_PKG_DIR_ISYS_SERVER_IDX) {
+		dma_addr = sg_dma_address(isys->fw_sgt.sgl);
+		prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data +
+							(server_fw_virtaddr -
+							 dma_addr));
+	} else {
+		dma_addr = sg_dma_address(psys->fw_sgt.sgl);
+		prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data +
+							(server_fw_virtaddr -
+							 dma_addr));
+	}
+
+	spc_base = base + prog->regs_addr;
+	if (spc_base != (base + hw_variant->spc_offset))
+		dev_warn(&isp->pdev->dev,
+			 "SPC reg addr 0x%p not matching value from CPD 0x%p\n",
+			 base + hw_variant->spc_offset, spc_base);
+	writel(server_fw_virtaddr + prog->blob_offset +
+	       prog->icache_source, spc_base + IPU_PSYS_REG_SPC_ICACHE_BASE);
+	writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+	       spc_base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+	writel(prog->start[1], spc_base + IPU_PSYS_REG_SPC_START_PC);
+	writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
+}
+
+void ipu_configure_spc(struct ipu_device *isp,
+		       const struct ipu_hw_variants *hw_variant,
+		       int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+		       dma_addr_t pkg_dir_dma_addr)
+{
+	u32 val;
+	void __iomem *dmem_base = base + hw_variant->dmem_offset;
+	void __iomem *spc_regs_base = base + hw_variant->spc_offset;
+
+	val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+	val |= IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+	writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+
+	if (isp->secure_mode)
+		writel(IPU_PKG_DIR_IMR_OFFSET, dmem_base);
+	else
+		ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
+					   pkg_dir, pkg_dir_dma_addr);
+}
+EXPORT_SYMBOL(ipu_configure_spc);
+
+int ipu_buttress_psys_freq_get(void *data, u64 *val)
+{
+	struct ipu_device *isp = data;
+	u32 reg_val;
+	int rval;
+
+	rval = pm_runtime_get_sync(&isp->psys->dev);
+	if (rval < 0) {
+		pm_runtime_put(&isp->psys->dev);
+		dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval);
+		return rval;
+	}
+
+	reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL);
+
+	pm_runtime_put(&isp->psys->dev);
+
+	*val = IPU_PS_FREQ_RATIO_BASE *
+	    (reg_val & IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK);
+
+	return 0;
+}
+
+int ipu_buttress_isys_freq_get(void *data, u64 *val)
+{
+	struct ipu_device *isp = data;
+	u32 reg_val;
+	int rval;
+
+	rval = pm_runtime_get_sync(&isp->isys->dev);
+	if (rval < 0) {
+		pm_runtime_put(&isp->isys->dev);
+		dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval);
+		return rval;
+	}
+
+	reg_val = readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL);
+
+	pm_runtime_put(&isp->isys->dev);
+
+	*val = IPU_IS_FREQ_RATIO_BASE *
+	    (reg_val & IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK);
+
+	return 0;
+}
+
+void ipu_internal_pdata_init(void)
+{
+	if (ipu_ver == IPU_VER_6) {
+		isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6_csi_offsets);
+		isys_ipdata.csi2.offsets = ipu6_csi_offsets;
+		isys_ipdata.tpg.ntpgs = ARRAY_SIZE(ipu6_tpg_offsets);
+		isys_ipdata.tpg.offsets = ipu6_tpg_offsets;
+		isys_ipdata.tpg.sels = NULL;
+		isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
+		psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
+
+	} else if (ipu_ver == IPU_VER_6SE) {
+		isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6se_csi_offsets);
+		isys_ipdata.csi2.offsets = ipu6se_csi_offsets;
+		isys_ipdata.tpg.ntpgs = ARRAY_SIZE(ipu6se_tpg_offsets);
+		isys_ipdata.tpg.offsets = ipu6se_tpg_offsets;
+		isys_ipdata.tpg.sels = NULL;
+		isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
+		psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
+	}
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6se-fw-resources.c b/drivers/media/pci/intel/ipu6/ipu6se-fw-resources.c
new file mode 100644
index 000000000000..e94519a34f6b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6se-fw-resources.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2019 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6se-platform-resources.h"
+
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = {
+	IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID,
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID  /* PAF */
+};
+
+const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = {
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE,
+};
+
+const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = {
+	IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE,
+	IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE,
+	IPU6SE_FW_PSYS_DMEM0_MAX_SIZE,
+	IPU6SE_FW_PSYS_DMEM1_MAX_SIZE
+};
+
+const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = {
+	IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE,
+	IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE
+};
+
+const u8
+ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = {
+	{ /* IPU6SE_FW_PSYS_SP0_ID */
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_DMEM0_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_ICA_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_LSC_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_DPC_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_B2B_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+
+	{ /* IPU6SE_FW_PSYS_ISA_BNLM_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_DM_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_AWB_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_AE_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_AF_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	},
+	{ /* IPU6SE_FW_PSYS_ISA_PAF_ID */
+		IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+		IPU6SE_FW_PSYS_LB_VMEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+		IPU6SE_FW_PSYS_N_MEM_ID,
+	}
+};
+
+static const struct ipu_fw_resource_definitions ipu6se_defs = {
+	.cells = ipu6se_fw_psys_cell_types,
+	.num_cells = IPU6SE_FW_PSYS_N_CELL_ID,
+	.num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID,
+
+	.dev_channels = ipu6se_fw_num_dev_channels,
+	.num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID,
+
+	.num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID,
+	.num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID,
+	.ext_mem_ids = ipu6se_fw_psys_mem_size,
+
+	.num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID,
+
+	.dfms = ipu6se_fw_psys_dfms,
+
+	.cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID,
+	.cell_mem = &ipu6se_fw_psys_c_mem[0][0],
+};
+
+const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs;
+
+int ipu6se_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+				    u16 value)
+{
+	struct ipu6se_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
+						       ps_ext_offset);
+
+	pm_ext->dev_chn_offset[offset] = value;
+
+	return 0;
+}
+
+int ipu6se_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+				       u16 id, u32 bitmap,
+				       u32 active_bitmap)
+{
+	struct ipu6se_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
+						       ps_ext_offset);
+
+	pm_ext->dfm_port_bitmap[id] = bitmap;
+	pm_ext->dfm_active_port_bitmap[id] = active_bitmap;
+
+	return 0;
+}
+
+int ipu6se_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+				       u16 type_id, u16 mem_id, u16 offset)
+{
+	struct ipu6se_fw_psys_process_ext *pm_ext;
+	u8 ps_ext_offset;
+
+	ps_ext_offset = ptr->process_extension_offset;
+	if (!ps_ext_offset)
+		return -EINVAL;
+
+	pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
+						       ps_ext_offset);
+
+	pm_ext->ext_mem_offset[type_id] = offset;
+	pm_ext->ext_mem_id[type_id] = mem_id;
+
+	return 0;
+}
+
+static struct ipu_fw_psys_program_manifest *
+get_program_manifest(const struct ipu_fw_psys_program_group_manifest *manifest,
+		     const unsigned int program_index)
+{
+	struct ipu_fw_psys_program_manifest *prg_manifest_base;
+	u8 *program_manifest = NULL;
+	u8 program_count;
+	unsigned int i;
+
+	program_count = manifest->program_count;
+
+	prg_manifest_base = (struct ipu_fw_psys_program_manifest *)
+		((char *)manifest + manifest->program_manifest_offset);
+	if (program_index < program_count) {
+		program_manifest = (u8 *)prg_manifest_base;
+		for (i = 0; i < program_index; i++)
+			program_manifest +=
+				((struct ipu_fw_psys_program_manifest *)
+				 program_manifest)->size;
+	}
+
+	return (struct ipu_fw_psys_program_manifest *)program_manifest;
+}
+
+int ipu6se_fw_psys_get_program_manifest_by_process(
+	struct ipu_fw_generic_program_manifest *gen_pm,
+	const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+	struct ipu_fw_psys_process *process)
+{
+	u32 program_id = process->program_idx;
+	struct ipu_fw_psys_program_manifest *pm;
+	struct ipu6se_fw_psys_program_manifest_ext *pm_ext;
+
+	pm = get_program_manifest(pg_manifest, program_id);
+
+	if (!pm)
+		return -ENOENT;
+
+	if (pm->program_extension_offset) {
+		pm_ext = (struct ipu6se_fw_psys_program_manifest_ext *)
+			((u8 *)pm + pm->program_extension_offset);
+
+		gen_pm->dev_chn_size = pm_ext->dev_chn_size;
+		gen_pm->dev_chn_offset = pm_ext->dev_chn_offset;
+		gen_pm->ext_mem_size = pm_ext->ext_mem_size;
+		gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset;
+		gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable;
+		gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap;
+		gen_pm->dfm_active_port_bitmap =
+			pm_ext->dfm_active_port_bitmap;
+	}
+
+	memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells));
+	gen_pm->cell_id = pm->cells[0];
+	gen_pm->cell_type_id = pm->cell_type_id;
+
+	return 0;
+}
+
+void ipu6se_fw_psys_pg_dump(struct ipu_psys *psys,
+			    struct ipu_psys_kcmd *kcmd, const char *note)
+{
+	struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg;
+	u32 pgid = pg->ID;
+	u8 processes = pg->process_count;
+	u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset);
+	unsigned int p, chn, mem, mem_id;
+	int cell;
+
+	dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n",
+		__func__, note, pgid, processes);
+
+	for (p = 0; p < processes; p++) {
+		struct ipu_fw_psys_process *process =
+		    (struct ipu_fw_psys_process *)
+		    ((char *)pg + process_offset_table[p]);
+		struct ipu6se_fw_psys_process_ext *pm_ext =
+		    (struct ipu6se_fw_psys_process_ext *)((u8 *)process
+		    + process->process_extension_offset);
+		cell = process->cells[0];
+		dev_dbg(&psys->adev->dev, "\t process %i size=%u",
+			p, process->size);
+		if (!process->process_extension_offset)
+			continue;
+
+		for (mem = 0; mem < IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID;
+		    mem++) {
+			mem_id = pm_ext->ext_mem_id[mem];
+			if (mem_id != IPU6SE_FW_PSYS_N_MEM_ID)
+				dev_dbg(&psys->adev->dev,
+					"\t mem type %u id %d offset=0x%x",
+					mem, mem_id,
+					pm_ext->ext_mem_offset[mem]);
+		}
+		for (chn = 0; chn < IPU6SE_FW_PSYS_N_DEV_CHN_ID; chn++) {
+			if (pm_ext->dev_chn_offset[chn] != (u16)(-1))
+				dev_dbg(&psys->adev->dev,
+					"\t dev_chn[%u]=0x%x\n",
+					chn, pm_ext->dev_chn_offset[chn]);
+		}
+	}
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6se-platform-resources.h b/drivers/media/pci/intel/ipu6/ipu6se-platform-resources.h
new file mode 100644
index 000000000000..5a28a975a8b8
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6se-platform-resources.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU6SE_PLATFORM_RESOURCES_H
+#define IPU6SE_PLATFORM_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include "ipu-platform-resources.h"
+
+#define	IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT		1
+
+enum {
+	IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0,
+	IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID,
+	IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID,
+	IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID
+};
+
+enum {
+	IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0,
+	IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID,
+	IPU6SE_FW_PSYS_DMEM_TYPE_ID,
+	IPU6SE_FW_PSYS_VMEM_TYPE_ID,
+	IPU6SE_FW_PSYS_BAMEM_TYPE_ID,
+	IPU6SE_FW_PSYS_PMEM_TYPE_ID,
+	IPU6SE_FW_PSYS_N_MEM_TYPE_ID
+};
+
+enum ipu6se_mem_id {
+	IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0,	/* TRANSFER VMEM 0 */
+	IPU6SE_FW_PSYS_LB_VMEM_ID,	/* LB VMEM */
+	IPU6SE_FW_PSYS_DMEM0_ID,	/* SPC0 Dmem */
+	IPU6SE_FW_PSYS_DMEM1_ID,	/* SPP0 Dmem */
+	IPU6SE_FW_PSYS_N_MEM_ID
+};
+
+enum {
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID,
+	IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID,
+	IPU6SE_FW_PSYS_N_DEV_CHN_ID
+};
+
+enum {
+	IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0,
+	IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID,
+	IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID,
+	IPU6SE_FW_PSYS_N_CELL_TYPE_ID
+};
+
+enum {
+	IPU6SE_FW_PSYS_SP0_ID = 0,
+	IPU6SE_FW_PSYS_ISA_ICA_ID,
+	IPU6SE_FW_PSYS_ISA_LSC_ID,
+	IPU6SE_FW_PSYS_ISA_DPC_ID,
+	IPU6SE_FW_PSYS_ISA_B2B_ID,
+	IPU6SE_FW_PSYS_ISA_BNLM_ID,
+	IPU6SE_FW_PSYS_ISA_DM_ID,
+	IPU6SE_FW_PSYS_ISA_R2I_SIE_ID,
+	IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID,
+	IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID,
+	IPU6SE_FW_PSYS_ISA_AWB_ID,
+	IPU6SE_FW_PSYS_ISA_AE_ID,
+	IPU6SE_FW_PSYS_ISA_AF_ID,
+	IPU6SE_FW_PSYS_ISA_PAF_ID,
+	IPU6SE_FW_PSYS_N_CELL_ID
+};
+
+enum {
+	IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0,
+	IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID,
+};
+
+/* Excluding PMEM */
+#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1)
+#define IPU6SE_FW_PSYS_N_DEV_DFM_ID	\
+	(IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1)
+#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE		0x0800
+/* Transfer VMEM0 words, ref HAS Transfer*/
+#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE	0x0800
+#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE		0x0400	/* LB VMEM words */
+#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE		0x4000
+#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE		0x1000
+
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE		22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE	22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE	22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE		0
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE		2
+
+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE		32
+#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE		32
+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE		32
+#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE		32
+
+struct ipu6se_fw_psys_program_manifest_ext {
+	u32 dfm_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
+	u32 dfm_active_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
+	u16 ext_mem_size[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 ext_mem_offset[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 dev_chn_size[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
+	u16 dev_chn_offset[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
+	u8 is_dfm_relocatable[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
+	u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+	u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+	u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+	u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+	u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT];
+};
+
+struct ipu6se_fw_psys_process_ext {
+	u32 dfm_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
+	u32 dfm_active_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
+	u16 ext_mem_offset[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u16 dev_chn_offset[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
+	u8 ext_mem_id[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
+	u8 padding[IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT];
+};
+
+#endif /* IPU6SE_PLATFORM_RESOURCES_H */
diff --git a/include/media/ipu-isys.h b/include/media/ipu-isys.h
new file mode 100644
index 000000000000..ed202c7c6134
--- /dev/null
+++ b/include/media/ipu-isys.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2020 Intel Corporation */
+
+#ifndef MEDIA_IPU_H
+#define MEDIA_IPU_H
+
+#include <linux/i2c.h>
+#include <linux/clkdev.h>
+#include <media/v4l2-async.h>
+
+#define IPU_ISYS_MAX_CSI2_LANES		4
+
+struct ipu_isys_csi2_config {
+	unsigned int nlanes;
+	unsigned int port;
+};
+
+struct ipu_isys_subdev_i2c_info {
+	struct i2c_board_info board_info;
+	int i2c_adapter_id;
+	char i2c_adapter_bdf[32];
+};
+
+struct ipu_isys_subdev_info {
+	struct ipu_isys_csi2_config *csi2;
+	struct ipu_isys_subdev_i2c_info i2c;
+};
+
+struct ipu_isys_clk_mapping {
+	struct clk_lookup clkdev_data;
+	char *platform_clock_name;
+};
+
+struct ipu_isys_subdev_pdata {
+	struct ipu_isys_subdev_info **subdevs;
+	struct ipu_isys_clk_mapping *clk_map;
+};
+
+struct sensor_async_subdev {
+	struct v4l2_async_subdev asd;
+	struct ipu_isys_csi2_config csi2;
+};
+
+#endif /* MEDIA_IPU_H */
diff --git a/include/uapi/linux/ipu-isys.h b/include/uapi/linux/ipu-isys.h
new file mode 100644
index 000000000000..a0e657704087
--- /dev/null
+++ b/include/uapi/linux/ipu-isys.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef UAPI_LINUX_IPU_ISYS_H
+#define UAPI_LINUX_IPU_ISYS_H
+
+#define V4L2_CID_IPU_BASE	(V4L2_CID_USER_BASE + 0x1080)
+
+#define V4L2_CID_IPU_STORE_CSI2_HEADER	(V4L2_CID_IPU_BASE + 2)
+
+#define VIDIOC_IPU_GET_DRIVER_VERSION \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 3, uint32_t)
+
+#endif /* UAPI_LINUX_IPU_ISYS_H */
diff --git a/include/uapi/linux/ipu-psys.h b/include/uapi/linux/ipu-psys.h
new file mode 100644
index 000000000000..30538e706062
--- /dev/null
+++ b/include/uapi/linux/ipu-psys.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef _UAPI_IPU_PSYS_H
+#define _UAPI_IPU_PSYS_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+struct ipu_psys_capability {
+	uint32_t version;
+	uint8_t driver[20];
+	uint32_t pg_count;
+	uint8_t dev_model[32];
+	uint32_t reserved[17];
+} __attribute__ ((packed));
+
+struct ipu_psys_event {
+	uint32_t type;		/* IPU_PSYS_EVENT_TYPE_ */
+	uint64_t user_token;
+	uint64_t issue_id;
+	uint32_t buffer_idx;
+	uint32_t error;
+	int32_t reserved[2];
+} __attribute__ ((packed));
+
+#define IPU_PSYS_EVENT_TYPE_CMD_COMPLETE	1
+#define IPU_PSYS_EVENT_TYPE_BUFFER_COMPLETE	2
+
+/**
+ * struct ipu_psys_buffer - for input/output terminals
+ * @len:	total allocated size @ base address
+ * @userptr:	user pointer
+ * @fd:		DMA-BUF handle
+ * @data_offset:offset to valid data
+ * @bytes_used:	amount of valid data including offset
+ * @flags:	flags
+ */
+struct ipu_psys_buffer {
+	uint64_t len;
+	union {
+		int fd;
+		void __user *userptr;
+		uint64_t reserved;
+	} base;
+	uint32_t data_offset;
+	uint32_t bytes_used;
+	uint32_t flags;
+	uint32_t reserved[2];
+} __attribute__ ((packed));
+
+#define IPU_BUFFER_FLAG_INPUT	(1 << 0)
+#define IPU_BUFFER_FLAG_OUTPUT	(1 << 1)
+#define IPU_BUFFER_FLAG_MAPPED	(1 << 2)
+#define IPU_BUFFER_FLAG_NO_FLUSH	(1 << 3)
+#define IPU_BUFFER_FLAG_DMA_HANDLE	(1 << 4)
+#define IPU_BUFFER_FLAG_USERPTR	(1 << 5)
+
+#define	IPU_PSYS_CMD_PRIORITY_HIGH	0
+#define	IPU_PSYS_CMD_PRIORITY_MED	1
+#define	IPU_PSYS_CMD_PRIORITY_LOW	2
+#define	IPU_PSYS_CMD_PRIORITY_NUM	3
+
+/**
+ * struct ipu_psys_command - processing command
+ * @issue_id:		unique id for the command set by user
+ * @user_token:		token of the command
+ * @priority:		priority of the command
+ * @pg_manifest:	userspace pointer to program group manifest
+ * @buffers:		userspace pointers to array of psys dma buf structs
+ * @pg:			process group DMA-BUF handle
+ * @pg_manifest_size:	size of program group manifest
+ * @bufcount:		number of buffers in buffers array
+ * @min_psys_freq:	minimum psys frequency in MHz used for this cmd
+ * @frame_counter:      counter of current frame synced between isys and psys
+ * @kernel_enable_bitmap:       enable bits for each individual kernel
+ * @terminal_enable_bitmap:     enable bits for each individual terminals
+ * @routing_enable_bitmap:      enable bits for each individual routing
+ * @rbm:                        enable bits for routing
+ *
+ * Specifies a processing command with input and output buffers.
+ */
+struct ipu_psys_command {
+	uint64_t issue_id;
+	uint64_t user_token;
+	uint32_t priority;
+	void __user *pg_manifest;
+	struct ipu_psys_buffer __user *buffers;
+	int pg;
+	uint32_t pg_manifest_size;
+	uint32_t bufcount;
+	uint32_t min_psys_freq;
+	uint32_t frame_counter;
+	uint32_t kernel_enable_bitmap[4];
+	uint32_t terminal_enable_bitmap[4];
+	uint32_t routing_enable_bitmap[4];
+	uint32_t rbm[5];
+	uint32_t reserved[2];
+} __attribute__ ((packed));
+
+struct ipu_psys_manifest {
+	uint32_t index;
+	uint32_t size;
+	void __user *manifest;
+	uint32_t reserved[5];
+} __attribute__ ((packed));
+
+#define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability)
+#define IPU_IOC_MAPBUF _IOWR('A', 2, int)
+#define IPU_IOC_UNMAPBUF _IOWR('A', 3, int)
+#define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer)
+#define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer)
+#define IPU_IOC_QCMD _IOWR('A', 6, struct ipu_psys_command)
+#define IPU_IOC_DQEVENT _IOWR('A', 7, struct ipu_psys_event)
+#define IPU_IOC_CMD_CANCEL _IOWR('A', 8, struct ipu_psys_command)
+#define IPU_IOC_GET_MANIFEST _IOWR('A', 9, struct ipu_psys_manifest)
+
+#endif /* _UAPI_IPU_PSYS_H */
-- 
2.31.1




More information about the kernel-team mailing list