[PATCH 18/29][SRU][OEM-5.14] UBUNTU: SAUCE: intel visual sensing controller(VSC) driver first release
You-Sheng Yang
vicamo.yang at canonical.com
Tue Dec 28 06:19:44 UTC 2021
From: Wentong Wu <wentong.wu at intel.com>
BugLink: https://bugs.launchpad.net/bugs/1955383
(backported from
https://github.com/intel/ivsc-driver/commit/badabfda2bcd7fa7e06178c880c7ad16f02414f2)
Signed-off-by: You-Sheng Yang <vicamo.yang at canonical.com>
---
drivers/gpio/Kconfig | 11 +
drivers/gpio/Makefile | 1 +
drivers/gpio/gpio-ljca.c | 468 +++++++++
drivers/i2c/busses/Kconfig | 10 +
drivers/i2c/busses/Makefile | 1 +
drivers/i2c/busses/i2c-ljca.c | 381 +++++++
drivers/mfd/Kconfig | 10 +
drivers/mfd/Makefile | 2 +
drivers/mfd/ljca.c | 1136 ++++++++++++++++++++
drivers/misc/Kconfig | 1 +
drivers/misc/Makefile | 1 +
drivers/misc/ivsc/Kconfig | 40 +
drivers/misc/ivsc/Makefile | 9 +
drivers/misc/ivsc/intel_vsc.c | 247 +++++
drivers/misc/ivsc/intel_vsc.h | 177 ++++
drivers/misc/ivsc/mei_ace.c | 589 +++++++++++
drivers/misc/ivsc/mei_ace_debug.c | 696 +++++++++++++
drivers/misc/ivsc/mei_csi.c | 456 ++++++++
drivers/misc/ivsc/mei_pse.c | 944 +++++++++++++++++
drivers/misc/mei/Kconfig | 7 +
drivers/misc/mei/Makefile | 4 +
drivers/misc/mei/hw-vsc.c | 1624 +++++++++++++++++++++++++++++
drivers/misc/mei/hw-vsc.h | 377 +++++++
drivers/misc/mei/spi-vsc.c | 217 ++++
drivers/spi/Kconfig | 10 +
drivers/spi/Makefile | 1 +
drivers/spi/spi-ljca.c | 328 ++++++
include/linux/mfd/ljca.h | 47 +
include/linux/vsc.h | 81 ++
29 files changed, 7876 insertions(+)
create mode 100644 drivers/gpio/gpio-ljca.c
create mode 100644 drivers/i2c/busses/i2c-ljca.c
create mode 100644 drivers/mfd/ljca.c
create mode 100644 drivers/misc/ivsc/Kconfig
create mode 100644 drivers/misc/ivsc/Makefile
create mode 100644 drivers/misc/ivsc/intel_vsc.c
create mode 100644 drivers/misc/ivsc/intel_vsc.h
create mode 100644 drivers/misc/ivsc/mei_ace.c
create mode 100644 drivers/misc/ivsc/mei_ace_debug.c
create mode 100644 drivers/misc/ivsc/mei_csi.c
create mode 100644 drivers/misc/ivsc/mei_pse.c
create mode 100644 drivers/misc/mei/hw-vsc.c
create mode 100644 drivers/misc/mei/hw-vsc.h
create mode 100644 drivers/misc/mei/spi-vsc.c
create mode 100644 drivers/spi/spi-ljca.c
create mode 100644 include/linux/mfd/ljca.h
create mode 100644 include/linux/vsc.h
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8471066b0b57..992d5e5f749d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -353,6 +353,17 @@ config GPIO_IXP4XX
IXP4xx series of chips.
If unsure, say N.
+
+config GPIO_LJCA
+ tristate "INTEL La Jolla Cove Adapter GPIO support"
+ depends on MFD_LJCA
+ help
+ Select this option to enable GPIO driver for the INTEL
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-ljca.
+
config GPIO_LOGICVC
tristate "Xylon LogiCVC GPIO support"
depends on MFD_SYSCON && OF
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index cf381f2de10e..5059b9b5c807 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o
obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
new file mode 100644
index 000000000000..efaa690215bf
--- /dev/null
+++ b/drivers/gpio/gpio-ljca.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-GPIO driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define GPIO_PAYLOAD_LEN(pin_num) \
+ (sizeof(struct gpio_packet) + (pin_num) * sizeof(struct gpio_op))
+
+/* GPIO commands */
+#define GPIO_CONFIG 1
+#define GPIO_READ 2
+#define GPIO_WRITE 3
+#define GPIO_INT_EVENT 4
+#define GPIO_INT_MASK 5
+#define GPIO_INT_UNMASK 6
+
+#define GPIO_CONF_DISABLE BIT(0)
+#define GPIO_CONF_INPUT BIT(1)
+#define GPIO_CONF_OUTPUT BIT(2)
+#define GPIO_CONF_PULLUP BIT(3)
+#define GPIO_CONF_PULLDOWN BIT(4)
+#define GPIO_CONF_DEFAULT BIT(5)
+#define GPIO_CONF_INTERRUPT BIT(6)
+#define GPIO_INT_TYPE BIT(7)
+
+#define GPIO_CONF_EDGE (1 << 7)
+#define GPIO_CONF_LEVEL (0 << 7)
+
+/* Intentional overlap with PULLUP / PULLDOWN */
+#define GPIO_CONF_SET BIT(3)
+#define GPIO_CONF_CLR BIT(4)
+
+struct gpio_op {
+ u8 index;
+ u8 value;
+} __packed;
+
+struct gpio_packet {
+ u8 num;
+ struct gpio_op item[];
+} __packed;
+
+struct ljca_gpio_dev {
+ struct platform_device *pdev;
+ struct gpio_chip gc;
+ struct ljca_gpio_info *ctr_info;
+ DECLARE_BITMAP(unmasked_irqs, MAX_GPIO_NUM);
+ DECLARE_BITMAP(enabled_irqs, MAX_GPIO_NUM);
+ DECLARE_BITMAP(reenable_irqs, MAX_GPIO_NUM);
+ u8 *connect_mode;
+ struct mutex irq_lock;
+ struct work_struct work;
+ struct mutex trans_lock;
+
+ u8 obuf[256];
+ u8 ibuf[256];
+};
+
+static bool ljca_gpio_valid(struct ljca_gpio_dev *ljca_gpio, int gpio_id)
+{
+ if (gpio_id >= ljca_gpio->ctr_info->num ||
+ !test_bit(gpio_id, ljca_gpio->ctr_info->valid_pin_map)) {
+ dev_err(&ljca_gpio->pdev->dev,
+ "invalid gpio gpio_id gpio_id:%d\n", gpio_id);
+ return false;
+ }
+
+ return true;
+}
+
+static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ if (!ljca_gpio_valid(ljca_gpio, gpio_id))
+ return -EINVAL;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
+ packet->num = 1;
+
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_CONFIG, packet,
+ GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct gpio_packet *ack_packet;
+ int ret;
+ int ibuf_len;
+
+ if (!ljca_gpio_valid(ljca_gpio, gpio_id))
+ return -EINVAL;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_READ, packet,
+ GPIO_PAYLOAD_LEN(packet->num), ljca_gpio->ibuf,
+ &ibuf_len);
+
+ ack_packet = (struct gpio_packet *)ljca_gpio->ibuf;
+ if (ret || !ibuf_len || ack_packet->num != packet->num) {
+ dev_err(&ljca_gpio->pdev->dev, "%s failed gpio_id:%d ret %d %d",
+ __func__, gpio_id, ret, ack_packet->num);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return -EIO;
+ }
+
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return (ack_packet->item[0].value > 0) ? 1 : 0;
+}
+
+static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
+ int value)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = (value & 1);
+
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_WRITE, packet,
+ GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ return ljca_gpio_read(ljca_gpio, offset);
+}
+
+static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = ljca_gpio_write(ljca_gpio, offset, val);
+ if (ret)
+ dev_err(chip->parent,
+ "%s offset:%d val:%d set value failed %d\n", __func__,
+ offset, val, ret);
+}
+
+static int ljca_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_INPUT | GPIO_CONF_CLR;
+
+ return gpio_config(ljca_gpio, offset, config);
+}
+
+static int ljca_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_OUTPUT | GPIO_CONF_CLR;
+ int ret;
+
+ ret = gpio_config(ljca_gpio, offset, config);
+ if (ret)
+ return ret;
+
+ ljca_gpio_set_value(chip, offset, val);
+ return 0;
+}
+
+static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ if (!ljca_gpio_valid(ljca_gpio, offset))
+ return -EINVAL;
+
+ ljca_gpio->connect_mode[offset] = 0;
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ljca_gpio->connect_mode[offset] |= GPIO_CONF_PULLUP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ljca_gpio->connect_mode[offset] |= GPIO_CONF_PULLDOWN;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_PERSIST_STATE:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id,
+ bool enable)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = 0;
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+
+ ret = ljca_transfer(ljca_gpio->pdev,
+ enable == true ? GPIO_INT_UNMASK : GPIO_INT_MASK,
+ packet, GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static void ljca_gpio_async(struct work_struct *work)
+{
+ struct ljca_gpio_dev *ljca_gpio =
+ container_of(work, struct ljca_gpio_dev, work);
+ int gpio_id;
+ int unmasked;
+
+ for_each_set_bit (gpio_id, ljca_gpio->reenable_irqs,
+ ljca_gpio->gc.ngpio) {
+ clear_bit(gpio_id, ljca_gpio->reenable_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ if (unmasked)
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ }
+}
+
+void ljca_gpio_event_cb(struct platform_device *pdev, u8 cmd,
+ const void *evt_data, int len)
+{
+ const struct gpio_packet *packet = evt_data;
+ struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev);
+ int i;
+ int irq;
+
+ if (cmd != GPIO_INT_EVENT)
+ return;
+
+ for (i = 0; i < packet->num; i++) {
+ irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
+ if (!irq) {
+ dev_err(ljca_gpio->gc.parent,
+ "gpio_id %d not mapped to IRQ\n",
+ packet->item[i].index);
+ return;
+ }
+
+ generic_handle_irq(irq);
+
+ set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
+ dev_dbg(ljca_gpio->gc.parent, "%s got one interrupt %d %d %d\n",
+ __func__, i, packet->item[i].index,
+ packet->item[i].value);
+ }
+
+ schedule_work(&ljca_gpio->work);
+}
+
+static void ljca_irq_unmask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+ set_bit(gpio_id, ljca_gpio->unmasked_irqs);
+}
+
+static void ljca_irq_mask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+ clear_bit(gpio_id, ljca_gpio->unmasked_irqs);
+}
+
+static int ljca_irq_set_type(struct irq_data *irqd, unsigned type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ ljca_gpio->connect_mode[gpio_id] = GPIO_CONF_INTERRUPT;
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_LEVEL | GPIO_CONF_PULLUP;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_LEVEL | GPIO_CONF_PULLDOWN;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_EDGE | GPIO_CONF_PULLUP;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_EDGE | GPIO_CONF_PULLDOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d %x\n", __func__, gpio_id,
+ ljca_gpio->connect_mode[gpio_id]);
+ return 0;
+}
+
+static void ljca_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&ljca_gpio->irq_lock);
+}
+
+static void ljca_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+ int enabled;
+ int unmasked;
+
+ enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ dev_dbg(ljca_gpio->gc.parent, "%s %d %d %d\n", __func__, gpio_id,
+ enabled, unmasked);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ gpio_config(ljca_gpio, gpio_id, 0);
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ set_bit(gpio_id, ljca_gpio->enabled_irqs);
+ } else {
+ ljca_enable_irq(ljca_gpio, gpio_id, false);
+ clear_bit(gpio_id, ljca_gpio->enabled_irqs);
+ }
+ }
+
+ mutex_unlock(&ljca_gpio->irq_lock);
+}
+
+static unsigned int ljca_irq_startup(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+
+ return gpiochip_lock_as_irq(gc, irqd_to_hwirq(irqd));
+}
+
+static void ljca_irq_shutdown(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+
+ gpiochip_unlock_as_irq(gc, irqd_to_hwirq(irqd));
+}
+
+static struct irq_chip ljca_gpio_irqchip = {
+ .name = "ljca-irq",
+ .irq_mask = ljca_irq_mask,
+ .irq_unmask = ljca_irq_unmask,
+ .irq_set_type = ljca_irq_set_type,
+ .irq_bus_lock = ljca_irq_bus_lock,
+ .irq_bus_sync_unlock = ljca_irq_bus_unlock,
+ .irq_startup = ljca_irq_startup,
+ .irq_shutdown = ljca_irq_shutdown,
+};
+
+static int ljca_gpio_probe(struct platform_device *pdev)
+{
+ struct ljca_gpio_dev *ljca_gpio;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct gpio_irq_chip *girq;
+
+ ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL);
+ if (!ljca_gpio)
+ return -ENOMEM;
+
+ ljca_gpio->ctr_info = &pdata->gpio_info;
+ ljca_gpio->connect_mode =
+ devm_kcalloc(&pdev->dev, ljca_gpio->ctr_info->num,
+ sizeof(*ljca_gpio->connect_mode), GFP_KERNEL);
+ if (!ljca_gpio->connect_mode)
+ return -ENOMEM;
+
+ mutex_init(&ljca_gpio->irq_lock);
+ mutex_init(&ljca_gpio->trans_lock);
+ ljca_gpio->pdev = pdev;
+ ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
+ ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
+ ljca_gpio->gc.get = ljca_gpio_get_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_config = ljca_gpio_set_config;
+ ljca_gpio->gc.can_sleep = true;
+ ljca_gpio->gc.parent = &pdev->dev;
+
+ ljca_gpio->gc.base = -1;
+ ljca_gpio->gc.ngpio = ljca_gpio->ctr_info->num;
+ ljca_gpio->gc.label = "ljca-gpio";
+ ljca_gpio->gc.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, ljca_gpio);
+ ljca_register_event_cb(pdev, ljca_gpio_event_cb);
+
+ girq = &ljca_gpio->gc.irq;
+ girq->chip = &ljca_gpio_irqchip;
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
+ INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
+ return devm_gpiochip_add_data(&pdev->dev, &ljca_gpio->gc, ljca_gpio);
+}
+
+static int ljca_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ljca_gpio_driver = {
+ .driver.name = "ljca-gpio",
+ .probe = ljca_gpio_probe,
+ .remove = ljca_gpio_remove,
+};
+
+module_platform_driver(ljca_gpio_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye at intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang at intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-gpio");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 10acece9d7b9..b8a357ecffa2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1402,4 +1402,14 @@ config I2C_FSI
This driver can also be built as a module. If so, the module will be
called as i2c-fsi.
+config I2C_LJCA
+ tristate "I2C functionality of INTEL La Jolla Cove Adapter"
+ depends on MFD_LJCA
+ help
+ If you say yes to this option, I2C functionality support of INTEL
+ La Jolla Cove Adapter (LJCA) will be included.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-ljca.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 69e9963615f6..63ae989d6e56 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -146,5 +146,6 @@ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_XGENE_SLIMPRO) += i2c-xgene-slimpro.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_I2C_FSI) += i2c-fsi.o
+obj-$(CONFIG_I2C_LJCA) += i2c-ljca.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c
new file mode 100644
index 000000000000..de66a41f61ae
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ljca.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-I2C driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* I2C commands */
+enum i2c_cmd {
+ I2C_INIT = 1,
+ I2C_XFER,
+ I2C_START,
+ I2C_STOP,
+ I2C_READ,
+ I2C_WRITE,
+};
+
+enum i2c_address_mode {
+ I2C_ADDRESS_MODE_7BIT,
+ I2C_ADDRESS_MODE_10BIT,
+};
+
+enum xfer_type {
+ READ_XFER_TYPE,
+ WRITE_XFER_TYPE,
+};
+
+#define DEFAULT_I2C_CONTROLLER_ID 1
+#define DEFAULT_I2C_CAPACITY 0
+#define DEFAULT_I2C_INTR_PIN 0
+
+/* I2C r/w Flags */
+#define I2C_SLAVE_TRANSFER_WRITE (0)
+#define I2C_SLAVE_TRANSFER_READ (1)
+
+/* i2c init flags */
+#define I2C_INIT_FLAG_MODE_MASK (0x1 << 0)
+#define I2C_INIT_FLAG_MODE_POLLING (0x0 << 0)
+#define I2C_INIT_FLAG_MODE_INTERRUPT (0x1 << 0)
+
+#define I2C_FLAG_ADDR_16BIT (0x1 << 0)
+
+#define I2C_INIT_FLAG_FREQ_MASK (0x3 << 1)
+#define I2C_FLAG_FREQ_100K (0x0 << 1)
+#define I2C_FLAG_FREQ_400K (0x1 << 1)
+#define I2C_FLAG_FREQ_1M (0x2 << 1)
+
+/* I2C Transfer */
+struct i2c_xfer {
+ u8 id;
+ u8 slave;
+ u16 flag; /* speed, 8/16bit addr, addr increase, etc */
+ u16 addr;
+ u16 len;
+ u8 data[];
+} __packed;
+
+/* I2C raw commands: Init/Start/Read/Write/Stop */
+struct i2c_rw_packet {
+ u8 id;
+ __le16 len;
+ u8 data[];
+} __packed;
+
+#define LJCA_I2C_MAX_XFER_SIZE 256
+#define LJCA_I2C_BUF_SIZE \
+ (LJCA_I2C_MAX_XFER_SIZE + sizeof(struct i2c_rw_packet))
+
+struct ljca_i2c_dev {
+ struct platform_device *pdev;
+ struct ljca_i2c_info *ctr_info;
+ struct i2c_adapter adap;
+
+ u8 obuf[LJCA_I2C_BUF_SIZE];
+ u8 ibuf[LJCA_I2C_BUF_SIZE];
+};
+
+static u8 ljca_i2c_format_slave_addr(u8 slave_addr, enum i2c_address_mode mode)
+{
+ if (mode == I2C_ADDRESS_MODE_7BIT)
+ return slave_addr << 1;
+
+ return 0xFF;
+}
+
+static int ljca_i2c_init(struct ljca_i2c_dev *ljca_i2c, u8 id)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] = I2C_FLAG_FREQ_400K;
+
+ return ljca_transfer(ljca_i2c->pdev, I2C_INIT, w_packet,
+ sizeof(*w_packet) + 1, NULL, NULL);
+}
+
+static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ enum xfer_type type)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] =
+ ljca_i2c_format_slave_addr(slave_addr, I2C_ADDRESS_MODE_7BIT);
+ w_packet->data[0] |= (type == READ_XFER_TYPE) ?
+ I2C_SLAVE_TRANSFER_READ :
+ I2C_SLAVE_TRANSFER_WRITE;
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_START, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) < 0 ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c start failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] = 0;
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_STOP, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) < 0 ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c stop failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_pure_read(struct ljca_i2c_dev *ljca_i2c, u8 *data, int len)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ibuf_len;
+ int ret;
+
+ if (len > LJCA_I2C_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(len);
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_READ, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+ if (ret) {
+ dev_err(&ljca_i2c->adap.dev, "I2C_READ failed ret:%d\n", ret);
+ return ret;
+ }
+
+ if (ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) != len ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c raw read failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ memcpy(data, r_packet->data, len);
+
+ return 0;
+}
+
+static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr, u8 *data,
+ u8 len)
+{
+ int ret;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, READ_XFER_TYPE);
+ if (ret)
+ return ret;
+
+ ret = ljca_i2c_pure_read(ljca_i2c, data, len);
+ if (ret) {
+ dev_err(&ljca_i2c->adap.dev, "i2c raw read failed ret:%d\n",
+ ret);
+
+ return ret;
+ }
+
+ return ljca_i2c_stop(ljca_i2c, slave_addr);
+}
+
+static int ljca_i2c_pure_write(struct ljca_i2c_dev *ljca_i2c, u8 *data, u8 len)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ if (len > LJCA_I2C_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(len);
+ memcpy(w_packet->data, data, len);
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_WRITE, w_packet,
+ sizeof(*w_packet) + w_packet->len, r_packet,
+ &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) != len ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c write failed len:%d id:%d/%d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ u8 *data, u8 len)
+{
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, WRITE_XFER_TYPE);
+ if (ret)
+ return ret;
+
+ ret = ljca_i2c_pure_write(ljca_i2c, data, len);
+ if (ret)
+ return ret;
+
+ return ljca_i2c_stop(ljca_i2c, slave_addr);
+}
+
+static int ljca_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg,
+ int num)
+{
+ struct ljca_i2c_dev *ljca_i2c;
+ struct i2c_msg *cur_msg;
+ int i, ret;
+
+ ljca_i2c = i2c_get_adapdata(adapter);
+ if (!ljca_i2c)
+ return -EINVAL;
+
+ for (i = 0; !ret && i < num; i++) {
+ cur_msg = &msg[i];
+ dev_dbg(&adapter->dev, "i:%d msg:(%d %d)\n", i, cur_msg->flags,
+ cur_msg->len);
+ if (cur_msg->flags & I2C_M_RD)
+ ret = ljca_i2c_read(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+
+ else
+ ret = ljca_i2c_write(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static u32 ljca_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks ljca_i2c_quirks = {
+ .max_read_len = LJCA_I2C_MAX_XFER_SIZE,
+ .max_write_len = LJCA_I2C_MAX_XFER_SIZE,
+};
+
+static const struct i2c_algorithm ljca_i2c_algo = {
+ .master_xfer = ljca_i2c_xfer,
+ .functionality = ljca_i2c_func,
+};
+
+static int ljca_i2c_probe(struct platform_device *pdev)
+{
+ struct ljca_i2c_dev *ljca_i2c;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ ljca_i2c = devm_kzalloc(&pdev->dev, sizeof(*ljca_i2c), GFP_KERNEL);
+ if (!ljca_i2c)
+ return -ENOMEM;
+
+ ljca_i2c->pdev = pdev;
+ ljca_i2c->ctr_info = &pdata->i2c_info;
+
+ ljca_i2c->adap.owner = THIS_MODULE;
+ ljca_i2c->adap.class = I2C_CLASS_HWMON;
+ ljca_i2c->adap.algo = &ljca_i2c_algo;
+ ljca_i2c->adap.dev.parent = &pdev->dev;
+ ACPI_COMPANION_SET(&ljca_i2c->adap.dev, ACPI_COMPANION(&pdev->dev));
+ ljca_i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c_set_adapdata(&ljca_i2c->adap, ljca_i2c);
+ snprintf(ljca_i2c->adap.name, sizeof(ljca_i2c->adap.name), "%s-%s-%d",
+ "ljca-i2c", dev_name(pdev->dev.parent),
+ ljca_i2c->ctr_info->id);
+
+ platform_set_drvdata(pdev, ljca_i2c);
+
+ ret = ljca_i2c_init(ljca_i2c, ljca_i2c->ctr_info->id);
+ if (ret) {
+ dev_err(&pdev->dev, "i2c init failed id:%d\n",
+ ljca_i2c->ctr_info->id);
+ return -EIO;
+ }
+
+ return i2c_add_adapter(&ljca_i2c->adap);
+}
+
+static int ljca_i2c_remove(struct platform_device *pdev)
+{
+ struct ljca_i2c_dev *ljca_i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&ljca_i2c->adap);
+
+ return 0;
+}
+
+static struct platform_driver ljca_i2c_driver = {
+ .driver.name = "ljca-i2c",
+ .probe = ljca_i2c_probe,
+ .remove = ljca_i2c_remove,
+};
+
+module_platform_driver(ljca_i2c_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye at intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang at intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-I2C driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-i2c");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ba4569158179..2df6a6c64b28 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2195,5 +2195,15 @@ config MFD_INTEL_M10_BMC
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_LJCA
+ tristate "Intel La Jolla Cove Adapter support"
+ select MFD_CORE
+ depends on USB
+ help
+ This adds support for Intel La Jolla Cove USB-I2C/SPI/GPIO
+ Adapter (LJCA). Additional drivers such as I2C_LJCA,
+ GPIO_LJCA, etc. must be enabled in order to use the
+ functionality of the device.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c5cefc9191bc..b6b44a6b3b12 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -273,3 +273,5 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
obj-$(CONFIG_MFD_AAEON) += mfd-aaeon.o
+
+obj-$(CONFIG_MFD_LJCA) += ljca.o
diff --git a/drivers/mfd/ljca.c b/drivers/mfd/ljca.c
new file mode 100644
index 000000000000..95baf04a4457
--- /dev/null
+++ b/drivers/mfd/ljca.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+
+enum ljca_acpi_match_adr {
+ LJCA_ACPI_MATCH_GPIO,
+ LJCA_ACPI_MATCH_I2C1,
+ LJCA_ACPI_MATCH_I2C2,
+ LJCA_ACPI_MATCH_SPI1,
+};
+
+static struct mfd_cell_acpi_match ljca_acpi_match_gpio = {
+ .pnpid = "INTC1074",
+};
+
+static struct mfd_cell_acpi_match ljca_acpi_match_i2cs[] = {
+ {
+ .pnpid = "INTC1075",
+ },
+ {
+ .pnpid = "INTC1076",
+ },
+};
+
+static struct mfd_cell_acpi_match ljca_acpi_match_spis[] = {
+ {
+ .pnpid = "INTC1091",
+ },
+};
+
+struct ljca_msg {
+ u8 type;
+ u8 cmd;
+ u8 flags;
+ u8 len;
+ u8 data[];
+} __packed;
+
+struct fw_version {
+ u8 major;
+ u8 minor;
+ u16 patch;
+ u16 build;
+} __packed;
+
+/* stub types */
+enum stub_type {
+ MNG_STUB = 1,
+ DIAG_STUB,
+ GPIO_STUB,
+ I2C_STUB,
+ SPI_STUB,
+};
+
+/* command Flags */
+#define ACK_FLAG BIT(0)
+#define RESP_FLAG BIT(1)
+#define CMPL_FLAG BIT(2)
+
+/* MNG stub commands */
+enum ljca_mng_cmd {
+ MNG_GET_VERSION = 1,
+ MNG_RESET_NOTIFY,
+ MNG_RESET,
+ MNG_ENUM_GPIO,
+ MNG_ENUM_I2C,
+ MNG_POWER_STATE_CHANGE,
+ MNG_SET_DFU_MODE,
+ MNG_ENUM_SPI,
+};
+
+/* DIAG commands */
+enum diag_cmd {
+ DIAG_GET_STATE = 1,
+ DIAG_GET_STATISTIC,
+ DIAG_SET_TRACE_LEVEL,
+ DIAG_SET_ECHO_MODE,
+ DIAG_GET_FW_LOG,
+ DIAG_GET_FW_COREDUMP,
+ DIAG_TRIGGER_WDT,
+ DIAG_TRIGGER_FAULT,
+ DIAG_FEED_WDT,
+ DIAG_GET_SECURE_STATE,
+};
+
+struct ljca_i2c_ctr_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+} __packed;
+
+struct ljca_i2c_descriptor {
+ u8 num;
+ struct ljca_i2c_ctr_info info[];
+} __packed;
+
+struct ljca_spi_ctr_info {
+ u8 id;
+ u8 capacity;
+} __packed;
+
+struct ljca_spi_descriptor {
+ u8 num;
+ struct ljca_spi_ctr_info info[];
+} __packed;
+
+struct ljca_bank_descriptor {
+ u8 bank_id;
+ u8 pin_num;
+
+ /* 1 bit for each gpio, 1 means valid */
+ u32 valid_pins;
+} __packed;
+
+struct ljca_gpio_descriptor {
+ u8 pins_per_bank;
+ u8 bank_num;
+ struct ljca_bank_descriptor bank_desc[];
+} __packed;
+
+#define MAX_PACKET_SIZE 64
+#define MAX_PAYLOAD_SIZE (MAX_PACKET_SIZE - sizeof(struct ljca_msg))
+#define USB_WRITE_TIMEOUT 200
+#define USB_WRITE_ACK_TIMEOUT 500
+
+struct ljca_event_cb_entry {
+ struct platform_device *pdev;
+ ljca_event_cb_t notify;
+};
+
+struct ljca_stub_packet {
+ u8 *ibuf;
+ u32 ibuf_len;
+};
+
+struct ljca_stub {
+ struct list_head list;
+ u8 type;
+ struct usb_interface *intf;
+ struct mutex mutex;
+ spinlock_t event_cb_lock;
+
+ struct ljca_stub_packet ipacket;
+
+ /* for identify ack */
+ bool acked;
+ int cur_cmd;
+
+ struct ljca_event_cb_entry event_entry;
+};
+
+static inline void *ljca_priv(const struct ljca_stub *stub)
+{
+ return (char *)stub + sizeof(struct ljca_stub);
+}
+
+enum ljca_state {
+ LJCA_STOPPED,
+ LJCA_INITED,
+ LJCA_RESET_HANDSHAKE,
+ LJCA_RESET_SYNCED,
+ LJCA_ENUM_GPIO_COMPLETE,
+ LJCA_ENUM_I2C_COMPLETE,
+ LJCA_ENUM_SPI_COMPLETE,
+ LJCA_SUSPEND,
+ LJCA_STARTED,
+ LJCA_FAILED,
+};
+
+struct ljca_dev {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ u8 in_ep; /* the address of the bulk in endpoint */
+ u8 out_ep; /* the address of the bulk out endpoint */
+
+ /* the urb/buffer for read */
+ struct urb *in_urb;
+ unsigned char *ibuf;
+ size_t ibuf_len;
+
+ int state;
+
+ atomic_t active_transfers;
+ wait_queue_head_t disconnect_wq;
+
+ struct list_head stubs_list;
+
+ /* to wait for an ongoing write ack */
+ wait_queue_head_t ack_wq;
+
+ struct mfd_cell *cells;
+ int cell_count;
+};
+
+static bool ljca_validate(void *data, u32 data_len)
+{
+ struct ljca_msg *header = (struct ljca_msg *)data;
+
+ return (header->len + sizeof(*header) == data_len);
+}
+
+
+void ljca_dump(struct ljca_dev *ljca, void *buf, int len)
+{
+ int i;
+ u8 tmp[256] = { 0 };
+ int n = 0;
+
+ if (!len)
+ return;
+
+ for (i = 0; i < len; i++)
+ n += scnprintf(tmp + n, sizeof(tmp) - n - 1, "%02x ", ((u8*)buf)[i]);
+
+ dev_dbg(&ljca->intf->dev, "%s\n", tmp);
+}
+
+static struct ljca_stub *ljca_stub_alloc(struct ljca_dev *ljca, int priv_size)
+{
+ struct ljca_stub *stub;
+
+ stub = kzalloc(sizeof(*stub) + priv_size, GFP_KERNEL);
+ if (!stub)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&stub->mutex);
+ spin_lock_init(&stub->event_cb_lock);
+ INIT_LIST_HEAD(&stub->list);
+ list_add_tail(&stub->list, &ljca->stubs_list);
+ dev_dbg(&ljca->intf->dev, "enuming a stub success\n");
+ return stub;
+}
+
+static struct ljca_stub *ljca_stub_find(struct ljca_dev *ljca, u8 type)
+{
+ struct ljca_stub *stub;
+
+ list_for_each_entry (stub, &ljca->stubs_list, list) {
+ if (stub->type == type)
+ return stub;
+ }
+
+ dev_err(&ljca->intf->dev, "usb stub not find, type: %d", type);
+ return ERR_PTR(-ENODEV);
+}
+
+static void ljca_stub_notify(struct ljca_stub *stub, u8 cmd,
+ const void *evt_data, int len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ if (stub->event_entry.notify && stub->event_entry.pdev)
+ stub->event_entry.notify(stub->event_entry.pdev, cmd, evt_data,
+ len);
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+}
+
+static int ljca_parse(struct ljca_dev *ljca, struct ljca_msg *header)
+{
+ struct ljca_stub *stub;
+
+ stub = ljca_stub_find(ljca, header->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ if (!(header->flags & ACK_FLAG)) {
+ ljca_stub_notify(stub, header->cmd, header->data, header->len);
+ return 0;
+ }
+
+ if (stub->cur_cmd != header->cmd) {
+ dev_err(&ljca->intf->dev, "header->cmd:%x != stub->cur_cmd:%x",
+ header->cmd, stub->cur_cmd);
+ return -EINVAL;
+ }
+
+ stub->ipacket.ibuf_len = header->len;
+ if (stub->ipacket.ibuf)
+ memcpy(stub->ipacket.ibuf, header->data, header->len);
+
+ stub->acked = true;
+ wake_up(&ljca->ack_wq);
+
+ return 0;
+}
+
+static int ljca_stub_write(struct ljca_stub *stub, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len, bool wait_ack)
+{
+ struct ljca_msg *header;
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ int ret;
+ u8 flags = CMPL_FLAG;
+ int actual;
+
+ atomic_inc(&ljca->active_transfers);
+ if (ljca->state == LJCA_STOPPED)
+ return -ENODEV;
+
+ if (obuf_len > MAX_PAYLOAD_SIZE)
+ return -EINVAL;
+
+ if (wait_ack)
+ flags |= ACK_FLAG;
+
+ stub->ipacket.ibuf_len = 0;
+ header = kmalloc(sizeof(*header) + obuf_len, GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+
+ header->type = stub->type;
+ header->cmd = cmd;
+ header->flags = flags;
+ header->len = obuf_len;
+
+ memcpy(header->data, obuf, obuf_len);
+ dev_dbg(&ljca->intf->dev, "send: type:%d cmd:%d flags:%d len:%d\n",
+ header->type, header->cmd, header->flags, header->len);
+ ljca_dump(ljca, header->data, header->len);
+
+ mutex_lock(&stub->mutex);
+ stub->cur_cmd = cmd;
+ stub->ipacket.ibuf = ibuf;
+ stub->acked = false;
+ usb_autopm_get_interface(ljca->intf);
+ ret = usb_bulk_msg(ljca->udev,
+ usb_sndbulkpipe(ljca->udev, ljca->out_ep), header,
+ sizeof(struct ljca_msg) + obuf_len, &actual,
+ USB_WRITE_TIMEOUT);
+ kfree(header);
+ if (ret || actual != sizeof(struct ljca_msg) + obuf_len) {
+ dev_err(&ljca->intf->dev,
+ "bridge write failed ret:%d total_len:%d\n ", ret,
+ actual);
+ goto error;
+ }
+
+ usb_autopm_put_interface(ljca->intf);
+
+ if (wait_ack) {
+ ret = wait_event_interruptible_timeout(
+ ljca->ack_wq, stub->acked,
+ msecs_to_jiffies(USB_WRITE_ACK_TIMEOUT));
+ if (!ret || !stub->acked) {
+ dev_err(&ljca->intf->dev,
+ "acked sem wait timed out ret:%d timeout:%d ack:%d\n",
+ ret, USB_WRITE_ACK_TIMEOUT, stub->acked);
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ if (ibuf_len)
+ *ibuf_len = stub->ipacket.ibuf_len;
+
+ stub->ipacket.ibuf = NULL;
+ stub->ipacket.ibuf_len = 0;
+ ret = 0;
+error:
+ atomic_dec(&ljca->active_transfers);
+ if (ljca->state == LJCA_STOPPED)
+ wake_up(&ljca->disconnect_wq);
+
+ mutex_unlock(&stub->mutex);
+ return ret;
+}
+
+static int ljca_transfer_internal(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len, bool wait_ack)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+
+ if (!pdev)
+ return -EINVAL;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ return ljca_stub_write(stub, cmd, obuf, obuf_len, ibuf, ibuf_len, wait_ack);
+}
+
+int ljca_transfer(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len)
+{
+ return ljca_transfer_internal(pdev, cmd, obuf, obuf_len, ibuf, ibuf_len,
+ true);
+}
+EXPORT_SYMBOL_GPL(ljca_transfer);
+
+int ljca_transfer_noack(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len)
+{
+ return ljca_transfer_internal(pdev, cmd, obuf, obuf_len, NULL, NULL,
+ false);
+}
+EXPORT_SYMBOL_GPL(ljca_transfer_noack);
+
+int ljca_register_event_cb(struct platform_device *pdev,
+ ljca_event_cb_t event_cb)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+ unsigned long flags;
+
+ if (!pdev)
+ return -EINVAL;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ stub->event_entry.notify = event_cb;
+ stub->event_entry.pdev = pdev;
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ljca_register_event_cb);
+
+void ljca_unregister_event_cb(struct platform_device *pdev)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+ unsigned long flags;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return;
+
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ stub->event_entry.notify = NULL;
+ stub->event_entry.pdev = NULL;
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ljca_unregister_event_cb);
+
+static void ljca_stub_cleanup(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+ struct ljca_stub *next;
+
+ list_for_each_entry_safe (stub, next, &ljca->stubs_list, list) {
+ list_del_init(&stub->list);
+ mutex_destroy(&stub->mutex);
+ kfree(stub);
+ }
+}
+
+static void ljca_read_complete(struct urb *urb)
+{
+ struct ljca_dev *ljca = urb->context;
+ struct ljca_msg *header = urb->transfer_buffer;
+ int len = urb->actual_length;
+ int ret;
+
+ dev_dbg(&ljca->intf->dev,
+ "bulk read urb got message from fw, status:%d data_len:%d\n",
+ urb->status, urb->actual_length);
+
+ if (urb->status) {
+ /* sync/async unlink faults aren't errors */
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)
+ return;
+
+ dev_err(&ljca->intf->dev, "read bulk urb transfer failed: %d\n",
+ urb->status);
+ goto resubmit;
+ }
+
+ dev_dbg(&ljca->intf->dev, "receive: type:%d cmd:%d flags:%d len:%d\n",
+ header->type, header->cmd, header->flags, header->len);
+ ljca_dump(ljca, header->data, header->len);
+
+ if (!ljca_validate(header, len)) {
+ dev_err(&ljca->intf->dev,
+ "data not correct header->len:%d payload_len:%d\n ",
+ header->len, len);
+ goto resubmit;
+ }
+
+ ret = ljca_parse(ljca, header);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "failed to parse data: ret:%d type:%d len: %d", ret,
+ header->type, header->len);
+
+resubmit:
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "failed submitting read urb, error %d\n", ret);
+}
+
+static int ljca_start(struct ljca_dev *ljca)
+{
+ int ret;
+
+ usb_fill_bulk_urb(ljca->in_urb, ljca->udev,
+ usb_rcvbulkpipe(ljca->udev, ljca->in_ep), ljca->ibuf,
+ ljca->ibuf_len, ljca_read_complete, ljca);
+
+ ret = usb_submit_urb(ljca->in_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ljca->intf->dev,
+ "failed submitting read urb, error %d\n", ret);
+ }
+ return ret;
+}
+
+struct ljca_mng_priv {
+ long reset_id;
+};
+
+static int ljca_mng_reset_handshake(struct ljca_stub *stub)
+{
+ int ret;
+ struct ljca_mng_priv *priv;
+ __le32 reset_id;
+ __le32 reset_id_ret = 0;
+ int ilen;
+
+ priv = ljca_priv(stub);
+ reset_id = cpu_to_le32(priv->reset_id++);
+ ret = ljca_stub_write(stub, MNG_RESET_NOTIFY, &reset_id,
+ sizeof(reset_id), &reset_id_ret, &ilen, true);
+ if (ret || ilen != sizeof(reset_id_ret) || reset_id_ret != reset_id) {
+ dev_err(&stub->intf->dev,
+ "MNG_RESET_NOTIFY failed reset_id:%d/%d ret:%d\n",
+ le32_to_cpu(reset_id_ret), le32_to_cpu(reset_id), ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline int ljca_mng_reset(struct ljca_stub *stub)
+{
+ return ljca_stub_write(stub, MNG_RESET, NULL, 0, NULL, NULL, true);
+}
+
+static int ljca_add_mfd_cell(struct ljca_dev *ljca, struct mfd_cell *cell)
+{
+ struct mfd_cell *new_cells;
+ new_cells = krealloc_array(ljca->cells, (ljca->cell_count + 1),
+ sizeof(struct mfd_cell), GFP_KERNEL);
+ if (!new_cells)
+ return -ENOMEM;
+
+ memcpy(&new_cells[ljca->cell_count], cell, sizeof(*cell));
+ ljca->cells = new_cells;
+ ljca->cell_count++;
+
+ return 0;
+}
+
+static int ljca_gpio_stub_init(struct ljca_dev *ljca,
+ struct ljca_gpio_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct mfd_cell cell = { 0 };
+ struct ljca_platform_data *pdata;
+ int gpio_num = desc->pins_per_bank * desc->bank_num;
+ int i;
+ u32 valid_pin[MAX_GPIO_NUM / (sizeof(u32) * BITS_PER_BYTE)];
+
+ if (gpio_num > MAX_GPIO_NUM)
+ return -EINVAL;
+
+ stub = ljca_stub_alloc(ljca, sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = GPIO_STUB;
+ stub->intf = ljca->intf;
+
+ pdata = ljca_priv(stub);
+ pdata->type = stub->type;
+ pdata->gpio_info.num = gpio_num;
+
+ for (i = 0; i < desc->bank_num; i++)
+ valid_pin[i] = desc->bank_desc[i].valid_pins;
+
+ bitmap_from_arr32(pdata->gpio_info.valid_pin_map, valid_pin, gpio_num);
+
+ cell.name = "ljca-gpio";
+ cell.platform_data = pdata;
+ cell.pdata_size = sizeof(*pdata);
+ cell.acpi_match = &ljca_acpi_match_gpio;
+
+ return ljca_add_mfd_cell(ljca, &cell);
+}
+
+static int ljca_mng_enum_gpio(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_gpio_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_GPIO, NULL, 0, desc, &len, true);
+ if (ret || len != sizeof(*desc) + desc->bank_num *
+ sizeof(desc->bank_desc[0])) {
+ dev_err(&stub->intf->dev,
+ "enum gpio failed ret:%d len:%d bank_num:%d\n", ret,
+ len, desc->bank_num);
+ }
+
+ ret = ljca_gpio_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_i2c_stub_init(struct ljca_dev *ljca,
+ struct ljca_i2c_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct ljca_platform_data *pdata;
+ int i;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, desc->num * sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = I2C_STUB;
+ stub->intf = ljca->intf;
+ pdata = ljca_priv(stub);
+
+ for (i = 0; i < desc->num; i++) {
+ struct mfd_cell cell = { 0 };
+ pdata[i].type = stub->type;
+
+ pdata[i].i2c_info.id = desc->info[i].id;
+ pdata[i].i2c_info.capacity = desc->info[i].capacity;
+ pdata[i].i2c_info.intr_pin = desc->info[i].intr_pin;
+
+ cell.name = "ljca-i2c";
+ cell.platform_data = &pdata[i];
+ cell.pdata_size = sizeof(pdata[i]);
+ if (i < ARRAY_SIZE(ljca_acpi_match_i2cs))
+ cell.acpi_match = &ljca_acpi_match_i2cs[i];
+
+ ret = ljca_add_mfd_cell(ljca, &cell);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ljca_mng_enum_i2c(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_i2c_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_I2C, NULL, 0, desc, &len, true);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "MNG_ENUM_I2C failed ret:%d len:%d num:%d\n", ret, len,
+ desc->num);
+ kfree(desc);
+ return -EIO;
+ }
+
+ ret = ljca_i2c_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_spi_stub_init(struct ljca_dev *ljca,
+ struct ljca_spi_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct ljca_platform_data *pdata;
+ int i;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, desc->num * sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = SPI_STUB;
+ stub->intf = ljca->intf;
+ pdata = ljca_priv(stub);
+
+ for (i = 0; i < desc->num; i++) {
+ struct mfd_cell cell = { 0 };
+ pdata[i].type = stub->type;
+
+ pdata[i].spi_info.id = desc->info[i].id;
+ pdata[i].spi_info.capacity = desc->info[i].capacity;
+
+ cell.name = "ljca-spi";
+ cell.platform_data = &pdata[i];
+ cell.pdata_size = sizeof(pdata[i]);
+ if (i < ARRAY_SIZE(ljca_acpi_match_spis))
+ cell.acpi_match = &ljca_acpi_match_spis[i];
+
+ ret = ljca_add_mfd_cell(ljca, &cell);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ljca_mng_enum_spi(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_spi_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_SPI, NULL, 0, desc, &len, true);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "MNG_ENUM_I2C failed ret:%d len:%d num:%d\n", ret, len,
+ desc->num);
+ kfree(desc);
+ return -EIO;
+ }
+
+ ret = ljca_spi_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_mng_get_version(struct ljca_stub *stub, char *buf)
+{
+ struct fw_version version = {0};
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, MNG_GET_VERSION, NULL, 0, &version, &len, true);
+ if (ret || len < sizeof(struct fw_version)) {
+ dev_err(&stub->intf->dev,
+ "MNG_GET_VERSION failed ret:%d len:%d\n", ret, len);
+ return ret;
+ }
+
+ return sysfs_emit(buf, "%d.%d.%d.%d\n", version.major, version.minor,
+ le16_to_cpu(version.patch), le16_to_cpu(version.build));
+}
+
+static inline int ljca_mng_set_dfu_mode(struct ljca_stub *stub)
+{
+ return ljca_stub_write(stub, MNG_SET_DFU_MODE, NULL, 0, NULL, NULL, true);
+}
+
+static int ljca_mng_link(struct ljca_dev *ljca, struct ljca_stub *stub)
+{
+ int ret;
+
+ ret = ljca_mng_reset_handshake(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_RESET_SYNCED;
+
+ ret = ljca_mng_enum_gpio(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_GPIO_COMPLETE;
+
+ ret = ljca_mng_enum_i2c(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_I2C_COMPLETE;
+
+ ret = ljca_mng_enum_spi(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_SPI_COMPLETE;
+ return ret;
+}
+
+static int ljca_mng_init(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+ struct ljca_mng_priv *priv;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, sizeof(*priv));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ priv = ljca_priv(stub);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reset_id = 0;
+ stub->type = MNG_STUB;
+ stub->intf = ljca->intf;
+
+ ret = ljca_mng_link(ljca, stub);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "mng stub link done ret:%d state:%d\n", ret,
+ ljca->state);
+
+ return ret;
+}
+
+static inline int ljca_diag_get_fw_log(struct ljca_stub *stub, void *buf)
+{
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, DIAG_GET_FW_LOG, NULL, 0, buf, &len, true);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static inline int ljca_diag_get_coredump(struct ljca_stub *stub, void *buf)
+{
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, DIAG_GET_FW_COREDUMP, NULL, 0, buf, &len, true);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static inline int ljca_diag_set_trace_level(struct ljca_stub *stub, u8 level)
+{
+ return ljca_stub_write(stub, DIAG_SET_TRACE_LEVEL, &level,
+ sizeof(level), NULL, NULL, true);
+}
+
+static int ljca_diag_init(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+
+ stub = ljca_stub_alloc(ljca, 0);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = DIAG_STUB;
+ stub->intf = ljca->intf;
+ return 0;
+}
+
+static void ljca_delete(struct ljca_dev *ljca)
+{
+ usb_free_urb(ljca->in_urb);
+ usb_put_intf(ljca->intf);
+ usb_put_dev(ljca->udev);
+ kfree(ljca->ibuf);
+ kfree(ljca->cells);
+ kfree(ljca);
+}
+
+static int ljca_init(struct ljca_dev *ljca)
+{
+ init_waitqueue_head(&ljca->ack_wq);
+ init_waitqueue_head(&ljca->disconnect_wq);
+ INIT_LIST_HEAD(&ljca->stubs_list);
+
+ ljca->state = LJCA_INITED;
+
+ return 0;
+}
+
+static void ljca_stop(struct ljca_dev *ljca)
+{
+ ljca->state = LJCA_STOPPED;
+ wait_event_interruptible(ljca->disconnect_wq,
+ !atomic_read(&ljca->active_transfers));
+ usb_kill_urb(ljca->in_urb);
+}
+
+static ssize_t cmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *mng_stub = ljca_stub_find(ljca, MNG_STUB);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ if (sysfs_streq(buf, "dfu"))
+ ljca_mng_set_dfu_mode(mng_stub);
+ else if (sysfs_streq(buf, "reset"))
+ ljca_mng_reset(mng_stub);
+ else if (sysfs_streq(buf, "debug"))
+ ljca_diag_set_trace_level(diag_stub, 3);
+
+ return count;
+}
+
+static ssize_t cmd_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "supported cmd: [dfu, reset, debug]");
+}
+static DEVICE_ATTR_RW(cmd);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *stub = ljca_stub_find(ljca, MNG_STUB);
+
+ return ljca_mng_get_version(stub, buf);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t log_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ return ljca_diag_get_fw_log(diag_stub, buf);
+}
+static DEVICE_ATTR_RO(log);
+
+static ssize_t coredump_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ return ljca_diag_get_coredump(diag_stub, buf);
+}
+static DEVICE_ATTR_RO(coredump);
+
+static struct attribute *ljca_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_cmd.attr,
+ &dev_attr_log.attr,
+ &dev_attr_coredump.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ljca);
+
+static int ljca_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct ljca_dev *ljca;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ int ret;
+
+ /* allocate memory for our device state and initialize it */
+ ljca = kzalloc(sizeof(*ljca), GFP_KERNEL);
+ if (!ljca)
+ return -ENOMEM;
+
+ ljca_init(ljca);
+ ljca->udev = usb_get_dev(interface_to_usbdev(intf));
+ ljca->intf = usb_get_intf(intf);
+
+ /* set up the endpoint information use only the first bulk-in and bulk-out endpoints */
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in,
+ &bulk_out, NULL, NULL);
+ if (ret) {
+ dev_err(&intf->dev,
+ "Could not find both bulk-in and bulk-out endpoints\n");
+ goto error;
+ }
+
+ ljca->ibuf_len = usb_endpoint_maxp(bulk_in);
+ ljca->in_ep = bulk_in->bEndpointAddress;
+ ljca->ibuf = kzalloc(ljca->ibuf_len, GFP_KERNEL);
+ if (!ljca->ibuf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ljca->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ljca->in_urb) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ljca->out_ep = bulk_out->bEndpointAddress;
+ dev_dbg(&intf->dev, "bulk_in size:%zu addr:%d bulk_out addr:%d\n",
+ ljca->ibuf_len, ljca->in_ep, ljca->out_ep);
+
+ /* save our data pointer in this intf device */
+ usb_set_intfdata(intf, ljca);
+ ret = ljca_start(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "bridge read start failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = ljca_mng_init(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "register mng stub failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = ljca_diag_init(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "register diag stub failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = mfd_add_hotplug_devices(&intf->dev, ljca->cells,
+ ljca->cell_count);
+ if (ret) {
+ dev_err(&intf->dev, "failed to add mfd devices to core %d\n",
+ ljca->cell_count);
+ goto error;
+ }
+
+ usb_enable_autosuspend(ljca->udev);
+ ljca->state = LJCA_STARTED;
+ dev_info(&intf->dev, "LJCA USB device init success\n");
+ return 0;
+
+error:
+ dev_err(&intf->dev, "LJCA USB device init failed\n");
+ /* this frees allocated memory */
+ ljca_stub_cleanup(ljca);
+ ljca_delete(ljca);
+ return ret;
+}
+
+static void ljca_disconnect(struct usb_interface *intf)
+{
+ struct ljca_dev *ljca;
+
+ ljca = usb_get_intfdata(intf);
+
+ ljca_stop(ljca);
+ mfd_remove_devices(&intf->dev);
+ ljca_stub_cleanup(ljca);
+ usb_set_intfdata(intf, NULL);
+ ljca_delete(ljca);
+ dev_dbg(&intf->dev, "LJCA disconnected\n");
+}
+
+static int ljca_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+
+ ljca_stop(ljca);
+ ljca->state = LJCA_SUSPEND;
+
+ dev_dbg(&intf->dev, "LJCA suspend\n");
+ return 0;
+}
+
+static int ljca_resume(struct usb_interface *intf)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+
+ ljca->state = LJCA_STARTED;
+ dev_dbg(&intf->dev, "LJCA resume\n");
+ return ljca_start(ljca);
+}
+
+static const struct usb_device_id ljca_table[] = {
+ {USB_DEVICE(0x8086, 0x0b63)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, ljca_table);
+
+static struct usb_driver ljca_driver = {
+ .name = "ljca",
+ .probe = ljca_probe,
+ .disconnect = ljca_disconnect,
+ .suspend = ljca_suspend,
+ .resume = ljca_resume,
+ .id_table = ljca_table,
+ .dev_groups = ljca_groups,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(ljca_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye at intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang at intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f4fb5c52b863..7b307803e791 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -464,4 +464,5 @@ source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
+source "drivers/misc/ivsc/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e92a56d4442f..074594167291 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,3 +57,4 @@ obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
+obj-$(CONFIG_INTEL_VSC) += ivsc/
diff --git a/drivers/misc/ivsc/Kconfig b/drivers/misc/ivsc/Kconfig
new file mode 100644
index 000000000000..b46b72c7a0d3
--- /dev/null
+++ b/drivers/misc/ivsc/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2021, Intel Corporation. All rights reserved.
+
+config INTEL_VSC
+ tristate "Intel VSC"
+ select INTEL_MEI_VSC
+ help
+ Add support of Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_CSI
+ tristate "Intel VSC CSI client"
+ depends on INTEL_VSC
+ select INTEL_MEI
+ help
+ Add CSI support for Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_ACE
+ tristate "Intel VSC ACE client"
+ depends on INTEL_VSC
+ select INTEL_MEI
+ help
+ Add ACE support for Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_PSE
+ tristate "Intel VSC PSE client"
+ depends on DEBUG_FS
+ select INTEL_MEI
+ select INTEL_MEI_VSC
+ help
+ Add PSE support for Intel Visual Sensing Controller (IVSC) to
+ expose debugging information in files under /sys/kernel/debug/.
+
+config INTEL_VSC_ACE_DEBUG
+ tristate "Intel VSC ACE debug client"
+ depends on DEBUG_FS
+ select INTEL_MEI
+ select INTEL_MEI_VSC
+ help
+ Add ACE debug support for Intel Visual Sensing Controller (IVSC)
+ to expose debugging information in files under /sys/kernel/debug/.
diff --git a/drivers/misc/ivsc/Makefile b/drivers/misc/ivsc/Makefile
new file mode 100644
index 000000000000..809707404cf9
--- /dev/null
+++ b/drivers/misc/ivsc/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2021, Intel Corporation. All rights reserved.
+
+obj-$(CONFIG_INTEL_VSC) += intel_vsc.o
+obj-$(CONFIG_INTEL_VSC_CSI) += mei_csi.o
+obj-$(CONFIG_INTEL_VSC_ACE) += mei_ace.o
+obj-$(CONFIG_INTEL_VSC_PSE) += mei_pse.o
+obj-$(CONFIG_INTEL_VSC_ACE_DEBUG) += mei_ace_debug.o
diff --git a/drivers/misc/ivsc/intel_vsc.c b/drivers/misc/ivsc/intel_vsc.c
new file mode 100644
index 000000000000..98bd701531b9
--- /dev/null
+++ b/drivers/misc/ivsc/intel_vsc.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/vsc.h>
+#include <linux/wait.h>
+
+#include "intel_vsc.h"
+
+#define ACE_PRIVACY_ON 2
+
+struct intel_vsc {
+ struct mutex mutex;
+
+ void *csi;
+ struct vsc_csi_ops *csi_ops;
+ uint16_t csi_registerred;
+ wait_queue_head_t csi_waitq;
+
+ void *ace;
+ struct vsc_ace_ops *ace_ops;
+ uint16_t ace_registerred;
+ wait_queue_head_t ace_waitq;
+};
+
+static struct intel_vsc vsc;
+
+static int wait_component_ready(void)
+{
+ int ret;
+
+ ret = wait_event_interruptible(vsc.ace_waitq,
+ vsc.ace_registerred);
+ if (ret < 0) {
+ pr_err("wait ace register failed\n");
+ return ret;
+ }
+
+ ret = wait_event_interruptible(vsc.csi_waitq,
+ vsc.csi_registerred);
+ if (ret < 0) {
+ pr_err("wait csi register failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void update_camera_status(struct vsc_camera_status *status,
+ struct camera_status *s)
+{
+ if (status && s) {
+ status->owner = s->camera_owner;
+ status->exposure_level = s->exposure_level;
+ status->status = VSC_PRIVACY_OFF;
+
+ if (s->privacy_stat == ACE_PRIVACY_ON)
+ status->status = VSC_PRIVACY_ON;
+ }
+}
+
+int vsc_register_ace(void *ace, struct vsc_ace_ops *ops)
+{
+ if (ace && ops)
+ if (ops->ipu_own_camera && ops->ace_own_camera) {
+ mutex_lock(&vsc.mutex);
+ vsc.ace = ace;
+ vsc.ace_ops = ops;
+ vsc.ace_registerred = true;
+ mutex_unlock(&vsc.mutex);
+
+ wake_up_interruptible_all(&vsc.ace_waitq);
+ return 0;
+ }
+
+ pr_err("register ace failed\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(vsc_register_ace);
+
+void vsc_unregister_ace(void)
+{
+ mutex_lock(&vsc.mutex);
+ vsc.ace_registerred = false;
+ mutex_unlock(&vsc.mutex);
+}
+EXPORT_SYMBOL_GPL(vsc_unregister_ace);
+
+int vsc_register_csi(void *csi, struct vsc_csi_ops *ops)
+{
+ if (csi && ops)
+ if (ops->set_privacy_callback &&
+ ops->set_owner && ops->set_mipi_conf) {
+ mutex_lock(&vsc.mutex);
+ vsc.csi = csi;
+ vsc.csi_ops = ops;
+ vsc.csi_registerred = true;
+ mutex_unlock(&vsc.mutex);
+
+ wake_up_interruptible_all(&vsc.csi_waitq);
+ return 0;
+ }
+
+ pr_err("register csi failed\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(vsc_register_csi);
+
+void vsc_unregister_csi(void)
+{
+ mutex_lock(&vsc.mutex);
+ vsc.csi_registerred = false;
+ mutex_unlock(&vsc.mutex);
+}
+EXPORT_SYMBOL_GPL(vsc_unregister_csi);
+
+int vsc_acquire_camera_sensor(struct vsc_mipi_config *config,
+ vsc_privacy_callback_t callback,
+ void *handle,
+ struct vsc_camera_status *status)
+{
+ int ret;
+ struct camera_status s;
+ struct mipi_conf conf = { 0 };
+
+ struct vsc_csi_ops *csi_ops;
+ struct vsc_ace_ops *ace_ops;
+
+ if (!config)
+ return -EINVAL;
+
+ ret = wait_component_ready();
+ if (ret)
+ return ret;
+
+ mutex_lock(&vsc.mutex);
+ if (!vsc.csi_registerred || !vsc.ace_registerred) {
+ ret = -1;
+ goto err;
+ }
+
+ csi_ops = vsc.csi_ops;
+ ace_ops = vsc.ace_ops;
+
+ csi_ops->set_privacy_callback(vsc.csi, callback, handle);
+
+ ret = ace_ops->ipu_own_camera(vsc.ace, &s);
+ if (ret) {
+ pr_err("ipu own camera failed\n");
+ goto err;
+ }
+ update_camera_status(status, &s);
+
+ ret = csi_ops->set_owner(vsc.csi, CSI_IPU);
+ if (ret) {
+ pr_err("ipu own csi failed\n");
+ goto err;
+ }
+
+ conf.lane_num = config->lane_num;
+ conf.freq = config->freq;
+ ret = csi_ops->set_mipi_conf(vsc.csi, &conf);
+ if (ret) {
+ pr_err("config mipi failed\n");
+ goto err;
+ }
+
+err:
+ mutex_unlock(&vsc.mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsc_acquire_camera_sensor);
+
+int vsc_release_camera_sensor(struct vsc_camera_status *status)
+{
+ int ret;
+ struct camera_status s;
+
+ struct vsc_csi_ops *csi_ops;
+ struct vsc_ace_ops *ace_ops;
+
+ ret = wait_component_ready();
+ if (ret)
+ return ret;
+
+ mutex_lock(&vsc.mutex);
+ if (!vsc.csi_registerred || !vsc.ace_registerred) {
+ ret = -1;
+ goto err;
+ }
+
+ csi_ops = vsc.csi_ops;
+ ace_ops = vsc.ace_ops;
+
+ csi_ops->set_privacy_callback(vsc.csi, NULL, NULL);
+
+ ret = csi_ops->set_owner(vsc.csi, CSI_FW);
+ if (ret) {
+ pr_err("vsc own csi failed\n");
+ goto err;
+ }
+
+ ret = ace_ops->ace_own_camera(vsc.ace, &s);
+ if (ret) {
+ pr_err("vsc own camera failed\n");
+ goto err;
+ }
+ update_camera_status(status, &s);
+
+err:
+ mutex_unlock(&vsc.mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsc_release_camera_sensor);
+
+static int __init intel_vsc_init(void)
+{
+ memset(&vsc, 0, sizeof(struct intel_vsc));
+
+ mutex_init(&vsc.mutex);
+
+ vsc.csi_registerred = false;
+ vsc.ace_registerred = false;
+
+ init_waitqueue_head(&vsc.ace_waitq);
+ init_waitqueue_head(&vsc.csi_waitq);
+
+ return 0;
+}
+
+static void __exit intel_vsc_exit(void)
+{
+ if (wq_has_sleeper(&vsc.ace_waitq))
+ wake_up_all(&vsc.ace_waitq);
+
+ if (wq_has_sleeper(&vsc.csi_waitq))
+ wake_up_all(&vsc.csi_waitq);
+}
+
+module_init(intel_vsc_init);
+module_exit(intel_vsc_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC");
diff --git a/drivers/misc/ivsc/intel_vsc.h b/drivers/misc/ivsc/intel_vsc.h
new file mode 100644
index 000000000000..6c17b95e4066
--- /dev/null
+++ b/drivers/misc/ivsc/intel_vsc.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INTEL_VSC_H_
+#define _INTEL_VSC_H_
+
+#include <linux/types.h>
+
+/* csi power state definition */
+enum csi_power_state {
+ POWER_OFF = 0,
+ POWER_ON,
+};
+
+/* csi ownership definition */
+enum csi_owner {
+ CSI_FW = 0,
+ CSI_IPU,
+};
+
+/* mipi configuration structure */
+struct mipi_conf {
+ uint32_t lane_num;
+ uint32_t freq;
+
+ /* for future use */
+ uint32_t rsvd[2];
+} __packed;
+
+/* camera status structure */
+struct camera_status {
+ uint8_t camera_owner : 2;
+ uint8_t privacy_stat : 2;
+
+ /* for future use */
+ uint8_t rsvd : 4;
+
+ uint32_t exposure_level;
+} __packed;
+
+struct vsc_ace_ops {
+ /**
+ * @brief ace own camera ownership
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*ace_own_camera)(void *ace, struct camera_status *status);
+
+ /**
+ * @brief ipu own camera ownership
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*ipu_own_camera)(void *ace, struct camera_status *status);
+
+ /**
+ * @brief get current camera status
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_camera_status)(void *ace, struct camera_status *status);
+};
+
+struct vsc_csi_ops {
+ /**
+ * @brief set csi ownership
+ *
+ * @param csi The pointer of csi client device
+ * @param owner The csi ownership going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_owner)(void *csi, enum csi_owner owner);
+
+ /**
+ * @brief get current csi ownership
+ *
+ * @param csi The pointer of csi client device
+ * @param owner The pointer of csi ownership
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_owner)(void *csi, enum csi_owner *owner);
+
+ /**
+ * @brief configure csi with provided parameter
+ *
+ * @param csi The pointer of csi client device
+ * @param config The pointer of csi configuration
+ * parameter going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_mipi_conf)(void *csi, struct mipi_conf *conf);
+
+ /**
+ * @brief get the current csi configuration
+ *
+ * @param csi The pointer of csi client device
+ * @param config The pointer of csi configuration parameter
+ * holding the returned result
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_mipi_conf)(void *csi, struct mipi_conf *conf);
+
+ /**
+ * @brief set csi power state
+ *
+ * @param csi The pointer of csi client device
+ * @param status csi power status going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_power_state)(void *csi, enum csi_power_state state);
+
+ /**
+ * @brief get csi power state
+ *
+ * @param csi The pointer of csi client device
+ * @param status The pointer of variable holding csi power status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_power_state)(void *csi, enum csi_power_state *state);
+
+ /**
+ * @brief set csi privacy callback
+ *
+ * @param csi The pointer of csi client device
+ * @param callback The pointer of privacy callback function
+ * @param handle Privacy callback runtime context
+ */
+ void (*set_privacy_callback)(void *csi,
+ vsc_privacy_callback_t callback,
+ void *handle);
+};
+
+/**
+ * @brief register ace client
+ *
+ * @param ace The pointer of ace client device
+ * @param ops The pointer of ace ops
+ *
+ * @return 0 on success, negative on failure
+ */
+int vsc_register_ace(void *ace, struct vsc_ace_ops *ops);
+
+/**
+ * @brief unregister ace client
+ */
+void vsc_unregister_ace(void);
+
+/**
+ * @brief register csi client
+ *
+ * @param csi The pointer of csi client device
+ * @param ops The pointer of csi ops
+ *
+ * @return 0 on success, negative on failure
+ */
+int vsc_register_csi(void *csi, struct vsc_csi_ops *ops);
+
+/**
+ * @brief unregister csi client
+ */
+void vsc_unregister_csi(void);
+
+#endif
diff --git a/drivers/misc/ivsc/mei_ace.c b/drivers/misc/ivsc/mei_ace.c
new file mode 100644
index 000000000000..9fe65791cc51
--- /dev/null
+++ b/drivers/misc/ivsc/mei_ace.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/vsc.h>
+
+#include "intel_vsc.h"
+
+#define ACE_TIMEOUT (5 * HZ)
+#define MEI_ACE_DRIVER_NAME "vsc_ace"
+
+#define UUID_GET_FW_ID UUID_LE(0x6167DCFB, 0x72F1, 0x4584, \
+ 0xBF, 0xE3, 0x84, 0x17, 0x71, 0xAA, 0x79, 0x0B)
+
+enum notif_rsp {
+ NOTIF = 0,
+ REPLY = 1,
+};
+
+enum notify_type {
+ FW_READY = 8,
+ EXCEPTION = 10,
+ WATCHDOG_TIMEOUT = 15,
+ MANAGEMENT_NOTIF = 16,
+ NOTIFICATION = 27,
+};
+
+enum message_source {
+ FW_MSG = 0,
+ DRV_MSG = 1,
+};
+
+enum notify_event_type {
+ STATE_NOTIF = 0x1,
+ CTRL_NOTIF = 0x2,
+ DPHY_NOTIF = 0x3,
+};
+
+enum ace_cmd_type {
+ ACE_CMD_GET = 3,
+ ACE_CMD_SET = 4,
+};
+
+enum ace_cmd_id {
+ IPU_OWN_CAMERA = 0x13,
+ ACE_OWN_CAMERA = 0x14,
+ GET_CAMERA_STATUS = 0x15,
+ GET_FW_ID = 0x1A,
+};
+
+struct ace_cmd_hdr {
+ uint32_t module_id : 16;
+ uint32_t instance_id : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+ uint32_t _hw_rsvd_2 : 2;
+} __packed;
+
+union ace_cmd_param {
+ uuid_le uuid;
+ uint32_t param;
+};
+
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+union ace_notif_hdr {
+ struct _response {
+ uint32_t status : 24;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed response;
+
+ struct _notify {
+ uint32_t rsvd2 : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed notify;
+
+ struct _management {
+ uint32_t event_id : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t event_data_size : 16;
+ uint32_t request_target : 1;
+ uint32_t request_type : 5;
+ uint32_t request_id : 8;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed management;
+};
+
+union ace_notif_cont {
+ uint16_t module_id;
+ uint8_t state_notif;
+ struct camera_status stat;
+};
+
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct ace_notif *cmd_resp;
+ struct completion response;
+
+ struct completion reply;
+ struct ace_notif *reply_notif;
+
+ uint16_t module_id;
+ uint16_t init_wait_q_woken;
+ wait_queue_head_t init_wait_q;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = DRV_MSG;
+ hdr->init_block = 1;
+ hdr->final_block = 1;
+}
+
+static uint16_t get_fw_id(struct mei_ace *ace)
+{
+ int ret;
+
+ ret = wait_event_interruptible(ace->init_wait_q,
+ ace->init_wait_q_woken);
+ if (ret < 0)
+ dev_warn(&ace->cldev->dev,
+ "incorrect fw id sent to fw\n");
+
+ return ace->module_id;
+}
+
+static int construct_command(struct mei_ace *ace, struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id)
+{
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+ union ace_cmd_param *param = &cmd->param;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case GET_FW_ID:
+ param->uuid = UUID_GET_FW_ID;
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case ACE_OWN_CAMERA:
+ param->param = 0;
+ hdr->module_id = get_fw_id(ace);
+ hdr->param_size = sizeof(param->param);
+ break;
+ case IPU_OWN_CAMERA:
+ case GET_CAMERA_STATUS:
+ hdr->module_id = get_fw_id(ace);
+ break;
+ default:
+ dev_err(&ace->cldev->dev,
+ "sending not supported command");
+ break;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+static int send_command_sync(struct mei_ace *ace,
+ struct ace_cmd *cmd, size_t len)
+{
+ int ret;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ union ace_notif_hdr *resp_hdr = &ace->cmd_resp->hdr;
+ union ace_notif_hdr *reply_hdr = &ace->reply_notif->hdr;
+
+ reinit_completion(&ace->response);
+ reinit_completion(&ace->reply);
+
+ ret = mei_cldev_send(ace->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ace->reply, ACE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d notify reply error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d notify reply timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (reply_hdr->response.cmd_id != cmd_hdr->cmd_id) {
+ dev_err(&ace->cldev->dev,
+ "reply notify mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, reply_hdr->response.cmd_id);
+ return -1;
+ }
+
+ ret = reply_hdr->response.status;
+ if (ret) {
+ dev_err(&ace->cldev->dev,
+ "command %d reply wrong status = %d\n",
+ cmd_hdr->cmd_id, ret);
+ return -1;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ace->response, ACE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d response error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d response timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (resp_hdr->management.request_id != cmd_hdr->cmd_id) {
+ dev_err(&ace->cldev->dev,
+ "command response mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, resp_hdr->management.request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int trigger_get_fw_id(struct mei_ace *ace)
+{
+ int ret;
+ struct ace_cmd cmd;
+ size_t cmd_len;
+
+ cmd_len = construct_command(ace, &cmd, GET_FW_ID);
+
+ ret = mei_cldev_send(ace->cldev, (uint8_t *)&cmd, cmd_len);
+ if (ret < 0)
+ dev_err(&ace->cldev->dev,
+ "send get fw id command fail %d\n", ret);
+
+ return ret;
+}
+
+static int set_camera_ownership(struct mei_ace *ace,
+ enum ace_cmd_id cmd_id,
+ struct camera_status *status)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ int ret;
+
+ cmd_len = construct_command(ace, &cmd, cmd_id);
+
+ mutex_lock(&ace->cmd_mutex);
+
+ ret = send_command_sync(ace, &cmd, cmd_len);
+ if (!ret) {
+ cont = &ace->cmd_resp->cont;
+ memcpy(status, &cont->stat, sizeof(*status));
+ }
+
+ mutex_unlock(&ace->cmd_mutex);
+
+ return ret;
+}
+
+int ipu_own_camera(void *ace, struct camera_status *status)
+{
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ return set_camera_ownership(p_ace, IPU_OWN_CAMERA, status);
+}
+
+int ace_own_camera(void *ace, struct camera_status *status)
+{
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ return set_camera_ownership(p_ace, ACE_OWN_CAMERA, status);
+}
+
+int get_camera_status(void *ace, struct camera_status *status)
+{
+ int ret;
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ cmd_len = construct_command(p_ace, &cmd, GET_CAMERA_STATUS);
+
+ mutex_lock(&p_ace->cmd_mutex);
+
+ ret = send_command_sync(p_ace, &cmd, cmd_len);
+ if (!ret) {
+ cont = &p_ace->cmd_resp->cont;
+ memcpy(status, &cont->stat, sizeof(*status));
+ }
+
+ mutex_unlock(&p_ace->cmd_mutex);
+
+ return ret;
+}
+
+static struct vsc_ace_ops ace_ops = {
+ .ace_own_camera = ace_own_camera,
+ .ipu_own_camera = ipu_own_camera,
+ .get_camera_status = get_camera_status,
+};
+
+static void handle_notify(struct mei_ace *ace, struct ace_notif *resp, int len)
+{
+ union ace_notif_hdr *hdr = &resp->hdr;
+ struct mei_cl_device *cldev = ace->cldev;
+
+ if (hdr->notify.msg_tgt != FW_MSG ||
+ hdr->notify.type != NOTIFICATION) {
+ dev_err(&cldev->dev, "recv incorrect notification\n");
+ return;
+ }
+
+ switch (hdr->notify.notif_type) {
+ /* firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ case FW_READY:
+ dev_info(&cldev->dev, "firmware ready\n");
+
+ trigger_get_fw_id(ace);
+ break;
+
+ case MANAGEMENT_NOTIF:
+ if (hdr->management.event_id == CTRL_NOTIF) {
+ switch (hdr->management.request_id) {
+ case GET_FW_ID:
+ dev_warn(&cldev->dev,
+ "shouldn't reach here\n");
+ break;
+
+ case ACE_OWN_CAMERA:
+ case IPU_OWN_CAMERA:
+ case GET_CAMERA_STATUS:
+ memcpy(ace->cmd_resp, resp, len);
+
+ if (!completion_done(&ace->response))
+ complete(&ace->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "incorrect command id notif\n");
+ break;
+ }
+ }
+ break;
+
+ case EXCEPTION:
+ dev_err(&cldev->dev, "firmware exception\n");
+ break;
+
+ case WATCHDOG_TIMEOUT:
+ dev_err(&cldev->dev, "firmware watchdog timeout\n");
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv unknown notification(%d)\n",
+ hdr->notify.notif_type);
+ break;
+ }
+}
+
+ /* callback for command response receive */
+static void mei_ace_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+ int ret;
+ struct ace_notif resp;
+ union ace_notif_hdr *hdr = &resp.hdr;
+
+ ret = mei_cldev_recv(cldev, (uint8_t *)&resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ } else if (ret < sizeof(union ace_notif_hdr)) {
+ dev_err(&cldev->dev, "recv small data %d\n", ret);
+ return;
+ }
+
+ switch (hdr->notify.rsp) {
+ case REPLY:
+ if (hdr->response.cmd_id == GET_FW_ID) {
+ ace->module_id = resp.cont.module_id;
+
+ ace->init_wait_q_woken = true;
+ wake_up_all(&ace->init_wait_q);
+
+ dev_info(&cldev->dev, "recv firmware id\n");
+ } else {
+ memcpy(ace->reply_notif, &resp, ret);
+
+ if (!completion_done(&ace->reply))
+ complete(&ace->reply);
+ }
+ break;
+
+ case NOTIF:
+ handle_notify(ace, &resp, ret);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv unknown response(%d)\n", hdr->notify.rsp);
+ break;
+ }
+}
+
+static int mei_ace_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_ace *ace;
+ int ret;
+ uint8_t *addr;
+ size_t ace_size = sizeof(struct mei_ace);
+ size_t reply_size = sizeof(struct ace_notif);
+ size_t response_size = sizeof(struct ace_notif);
+
+ ace = kzalloc(ace_size + response_size + reply_size, GFP_KERNEL);
+ if (!ace)
+ return -ENOMEM;
+
+ addr = (uint8_t *)ace;
+ ace->cmd_resp = (struct ace_notif *)(addr + ace_size);
+
+ addr = (uint8_t *)ace->cmd_resp;
+ ace->reply_notif = (struct ace_notif *)(addr + response_size);
+
+ ace->cldev = cldev;
+
+ ace->init_wait_q_woken = false;
+ init_waitqueue_head(&ace->init_wait_q);
+
+ mutex_init(&ace->cmd_mutex);
+ init_completion(&ace->response);
+ init_completion(&ace->reply);
+
+ mei_cldev_set_drvdata(cldev, ace);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable ace client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx cb ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ trigger_get_fw_id(ace);
+
+ vsc_register_ace(ace, &ace_ops);
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ kfree(ace);
+
+ return ret;
+}
+
+static void mei_ace_remove(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+
+ vsc_unregister_ace();
+
+ if (!completion_done(&ace->response))
+ complete(&ace->response);
+
+ if (!completion_done(&ace->reply))
+ complete(&ace->reply);
+
+ if (wq_has_sleeper(&ace->init_wait_q))
+ wake_up_all(&ace->init_wait_q);
+
+ mei_cldev_disable(cldev);
+
+ /* wait until no buffer access */
+ mutex_lock(&ace->cmd_mutex);
+ mutex_unlock(&ace->cmd_mutex);
+
+ kfree(ace);
+}
+
+#define MEI_UUID_ACE UUID_LE(0x5DB76CF6, 0x0A68, 0x4ED6, \
+ 0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
+
+static const struct mei_cl_device_id mei_ace_tbl[] = {
+ { MEI_ACE_DRIVER_NAME, MEI_UUID_ACE, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
+
+static struct mei_cl_driver mei_ace_driver = {
+ .id_table = mei_ace_tbl,
+ .name = MEI_ACE_DRIVER_NAME,
+
+ .probe = mei_ace_probe,
+ .remove = mei_ace_remove,
+};
+
+static int __init mei_ace_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_ace_driver);
+ if (ret) {
+ pr_err("mei ace driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_ace_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_ace_driver);
+}
+
+module_init(mei_ace_init);
+module_exit(mei_ace_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC ACE client");
diff --git a/drivers/misc/ivsc/mei_ace_debug.c b/drivers/misc/ivsc/mei_ace_debug.c
new file mode 100644
index 000000000000..4ae850658ecb
--- /dev/null
+++ b/drivers/misc/ivsc/mei_ace_debug.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+#include <linux/uuid.h>
+
+#define MAX_RECV_SIZE 8192
+#define MAX_LOG_SIZE 0x40000000
+#define LOG_CONFIG_PARAM_COUNT 7
+#define COMMAND_TIMEOUT (5 * HZ)
+#define ACE_LOG_FILE "/var/log/vsc_ace.log"
+#define MEI_ACE_DEBUG_DRIVER_NAME "vsc_ace_debug"
+
+enum notif_rsp {
+ NOTIF = 0,
+ REPLY = 1,
+};
+
+enum message_source {
+ FW_MSG = 0,
+ DRV_MSG = 1,
+};
+
+enum notify_type {
+ LOG_BUFFER_STATUS = 6,
+ FW_READY = 8,
+ MANAGEMENT_NOTIF = 16,
+ NOTIFICATION = 27,
+};
+
+enum notify_event_type {
+ STATE_NOTIF = 1,
+ CTRL_NOTIF = 2,
+};
+
+enum ace_cmd_id {
+ GET_FW_VER = 0,
+ LOG_CONFIG = 6,
+ SET_SYS_TIME = 20,
+ GET_FW_ID = 26,
+};
+
+enum ace_cmd_type {
+ ACE_CMD_GET = 3,
+ ACE_CMD_SET = 4,
+};
+
+struct firmware_version {
+ uint32_t type;
+ uint32_t len;
+
+ uint16_t major;
+ uint16_t minor;
+ uint16_t hotfix;
+ uint16_t build;
+} __packed;
+
+union tracing_config {
+ struct _uart_config {
+ uint32_t instance;
+ uint32_t baudrate;
+ } __packed uart;
+
+ struct _i2c_config {
+ uint32_t instance;
+ uint32_t speed;
+ uint32_t address;
+ uint32_t reg;
+ } __packed i2c;
+};
+
+struct log_config {
+ uint32_t aging_period;
+ uint32_t fifo_period;
+ uint32_t enable;
+ uint32_t priority_mask[16];
+ uint32_t tracing_method;
+ uint32_t tracing_format;
+ union tracing_config config;
+} __packed;
+
+struct ace_cmd_hdr {
+ uint32_t module_id : 16;
+ uint32_t instance_id : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+ uint32_t _hw_rsvd_2 : 2;
+} __packed;
+
+union ace_cmd_param {
+ uuid_le uuid;
+ uint64_t time;
+ struct log_config config;
+};
+
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+union ace_notif_hdr {
+ struct _response {
+ uint32_t status : 24;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed response;
+
+ struct _notify {
+ uint32_t rsvd2 : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed notify;
+
+ struct _log_notify {
+ uint32_t rsvd0 : 12;
+ uint32_t source_core : 4;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed log_notify;
+
+ struct _management {
+ uint32_t event_id : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t event_data_size : 16;
+ uint32_t request_target : 1;
+ uint32_t request_type : 5;
+ uint32_t request_id : 8;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed management;
+};
+
+union ace_notif_cont {
+ uint16_t module_id;
+ struct firmware_version version;
+};
+
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace_debug {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct ace_notif cmd_resp;
+ struct completion response;
+
+ struct completion reply;
+ struct ace_notif reply_notif;
+
+ loff_t pos;
+ struct file *ace_file;
+
+ uint8_t *recv_buf;
+
+ struct dentry *dfs_dir;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = DRV_MSG;
+ hdr->init_block = 1;
+ hdr->final_block = 1;
+}
+
+static int construct_command(struct mei_ace_debug *ad,
+ struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id,
+ void *user_data)
+{
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+ union ace_cmd_param *param = &cmd->param;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case GET_FW_VER:
+ hdr->type = ACE_CMD_GET;
+ break;
+ case SET_SYS_TIME:
+ param->time = ktime_get_ns();
+ hdr->param_size = sizeof(param->time);
+ break;
+ case GET_FW_ID:
+ memcpy(¶m->uuid, user_data, sizeof(param->uuid));
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case LOG_CONFIG:
+ memcpy(¶m->config, user_data, sizeof(param->config));
+ hdr->param_size = sizeof(param->config);
+ break;
+ default:
+ dev_err(&ad->cldev->dev,
+ "sending not supported command");
+ break;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+static int send_command_sync(struct mei_ace_debug *ad,
+ struct ace_cmd *cmd, size_t len)
+{
+ int ret;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ union ace_notif_hdr *reply_hdr = &ad->reply_notif.hdr;
+
+ reinit_completion(&ad->reply);
+
+ ret = mei_cldev_send(ad->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&ad->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ad->reply, COMMAND_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ad->cldev->dev,
+ "command %d notify reply error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ad->cldev->dev,
+ "command %d notify reply timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (reply_hdr->response.cmd_id != cmd_hdr->cmd_id) {
+ dev_err(&ad->cldev->dev,
+ "reply notify mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, reply_hdr->response.cmd_id);
+ return -1;
+ }
+
+ ret = reply_hdr->response.status;
+ if (ret) {
+ dev_err(&ad->cldev->dev,
+ "command %d reply wrong status = %d\n",
+ cmd_hdr->cmd_id, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_system_time(struct mei_ace_debug *ad)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, SET_SYS_TIME, NULL);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_time_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret;
+
+ ret = set_system_time(ad);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int config_log(struct mei_ace_debug *ad, struct log_config *config)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, LOG_CONFIG, config);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_log_config_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret;
+ uint8_t *buf;
+ struct log_config config = {0};
+
+ buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u",
+ &config.aging_period,
+ &config.fifo_period,
+ &config.enable,
+ &config.priority_mask[0],
+ &config.priority_mask[1],
+ &config.tracing_format,
+ &config.tracing_method);
+ if (ret != LOG_CONFIG_PARAM_COUNT) {
+ dev_err(&ad->cldev->dev,
+ "please input all the required parameters\n");
+ return -EINVAL;
+ }
+
+ ret = config_log(ad, &config);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int get_firmware_version(struct mei_ace_debug *ad,
+ struct firmware_version *version)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, GET_FW_VER, NULL);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ if (!ret) {
+ cont = &ad->reply_notif.cont;
+ memcpy(version, &cont->version, sizeof(*version));
+ }
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_firmware_version_read(struct file *file,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret, pos;
+ struct firmware_version version;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = get_firmware_version(ad, &version);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "firmware version: %u.%u.%u.%u\n",
+ version.major, version.minor,
+ version.hotfix, version.build);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+#define AD_DFS_ADD_FILE(name) \
+ debugfs_create_file(#name, 0644, ad->dfs_dir, ad, \
+ &ad_dfs_##name##_fops)
+
+#define AD_DFS_FILE_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .read = ad_##name##_read, \
+ .write = ad_##name##_write, \
+ .open = simple_open, \
+}
+
+#define AD_DFS_FILE_READ_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .read = ad_##name##_read, \
+ .open = simple_open, \
+}
+
+#define AD_DFS_FILE_WRITE_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .write = ad_##name##_write, \
+ .open = simple_open, \
+}
+
+AD_DFS_FILE_WRITE_OPS(time);
+AD_DFS_FILE_WRITE_OPS(log_config);
+
+AD_DFS_FILE_READ_OPS(firmware_version);
+
+static void handle_notify(struct mei_ace_debug *ad,
+ struct ace_notif *notif, int len)
+{
+ int ret;
+ struct file *file;
+ loff_t *pos;
+ union ace_notif_hdr *hdr = ¬if->hdr;
+ struct mei_cl_device *cldev = ad->cldev;
+
+ if (hdr->notify.msg_tgt != FW_MSG ||
+ hdr->notify.type != NOTIFICATION) {
+ dev_err(&cldev->dev, "recv wrong notification\n");
+ return;
+ }
+
+ switch (hdr->notify.notif_type) {
+ case FW_READY:
+ /* firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ dev_info(&cldev->dev, "firmware ready\n");
+ break;
+
+ case LOG_BUFFER_STATUS:
+ if (ad->pos < MAX_LOG_SIZE) {
+ pos = &ad->pos;
+ file = ad->ace_file;
+
+ ret = kernel_write(file,
+ (uint8_t *)notif + sizeof(*hdr),
+ len - sizeof(*hdr),
+ pos);
+ if (ret < 0)
+ dev_err(&cldev->dev,
+ "error in writing log %d\n", ret);
+ else
+ *pos += ret;
+ } else
+ dev_warn(&cldev->dev,
+ "already exceed max log size\n");
+ break;
+
+ case MANAGEMENT_NOTIF:
+ if (hdr->management.event_id == CTRL_NOTIF) {
+ switch (hdr->management.request_id) {
+ case GET_FW_VER:
+ case LOG_CONFIG:
+ case SET_SYS_TIME:
+ case GET_FW_ID:
+ memcpy(&ad->cmd_resp, notif, len);
+
+ if (!completion_done(&ad->response))
+ complete(&ad->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "wrong command id(%d) notif\n",
+ hdr->management.request_id);
+ break;
+ }
+ }
+ break;
+
+ default:
+ dev_info(&cldev->dev,
+ "unexpected notify(%d)\n", hdr->notify.notif_type);
+ break;
+ }
+}
+
+/* callback for command response receive */
+static void mei_ace_debug_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace_debug *ad = mei_cldev_get_drvdata(cldev);
+ int ret;
+ struct ace_notif *notif;
+ union ace_notif_hdr *hdr;
+
+ ret = mei_cldev_recv(cldev, ad->recv_buf, MAX_RECV_SIZE);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ } else if (ret < sizeof(union ace_notif_hdr)) {
+ dev_err(&cldev->dev, "recv small data %d\n", ret);
+ return;
+ }
+ notif = (struct ace_notif *)ad->recv_buf;
+ hdr = ¬if->hdr;
+
+ switch (hdr->notify.rsp) {
+ case REPLY:
+ memcpy(&ad->reply_notif, notif, sizeof(struct ace_notif));
+
+ if (!completion_done(&ad->reply))
+ complete(&ad->reply);
+ break;
+
+ case NOTIF:
+ handle_notify(ad, notif, ret);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "unexpected response(%d)\n", hdr->notify.rsp);
+ break;
+ }
+}
+
+static int mei_ace_debug_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_ace_debug *ad;
+ int ret;
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ ad = kzalloc(sizeof(struct mei_ace_debug), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ ad->recv_buf = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!ad->recv_buf) {
+ kfree(ad);
+ return -ENOMEM;
+ }
+
+ ad->cldev = cldev;
+
+ mutex_init(&ad->cmd_mutex);
+ init_completion(&ad->response);
+ init_completion(&ad->reply);
+
+ mei_cldev_set_drvdata(cldev, ad);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable ace debug client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_debug_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register ace debug rx cb ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ ad->ace_file = filp_open(ACE_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(ad->ace_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", ACE_LOG_FILE);
+ ret = PTR_ERR(ad->ace_file);
+ goto err_disable;
+ }
+ ad->pos = 0;
+
+ ad->dfs_dir = debugfs_create_dir("vsc_ace", NULL);
+ if (ad->dfs_dir) {
+ AD_DFS_ADD_FILE(log_config);
+ AD_DFS_ADD_FILE(time);
+ AD_DFS_ADD_FILE(firmware_version);
+ }
+
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ free_pages((unsigned long)ad->recv_buf, order);
+
+ kfree(ad);
+
+ return ret;
+}
+
+static void mei_ace_debug_remove(struct mei_cl_device *cldev)
+{
+ uint32_t order = get_order(MAX_RECV_SIZE);
+ struct mei_ace_debug *ad = mei_cldev_get_drvdata(cldev);
+
+ if (!completion_done(&ad->response))
+ complete(&ad->response);
+
+ if (!completion_done(&ad->reply))
+ complete(&ad->reply);
+
+ mei_cldev_disable(cldev);
+
+ debugfs_remove_recursive(ad->dfs_dir);
+
+ filp_close(ad->ace_file, NULL);
+
+ /* wait until no buffer access */
+ mutex_lock(&ad->cmd_mutex);
+ mutex_unlock(&ad->cmd_mutex);
+
+ free_pages((unsigned long)ad->recv_buf, order);
+
+ kfree(ad);
+}
+
+#define MEI_UUID_ACE_DEBUG UUID_LE(0xFB285857, 0xFC24, 0x4BF3, 0xBD, \
+ 0x80, 0x2A, 0xBC, 0x44, 0xE3, 0xC2, 0x0B)
+
+static const struct mei_cl_device_id mei_ace_debug_tbl[] = {
+ { MEI_ACE_DEBUG_DRIVER_NAME, MEI_UUID_ACE_DEBUG, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_debug_tbl);
+
+static struct mei_cl_driver mei_ace_debug_driver = {
+ .id_table = mei_ace_debug_tbl,
+ .name = MEI_ACE_DEBUG_DRIVER_NAME,
+
+ .probe = mei_ace_debug_probe,
+ .remove = mei_ace_debug_remove,
+};
+
+static int __init mei_ace_debug_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_ace_debug_driver);
+ if (ret) {
+ pr_err("mei ace debug driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_ace_debug_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_ace_debug_driver);
+}
+
+module_init(mei_ace_debug_init);
+module_exit(mei_ace_debug_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC ACE debug client");
diff --git a/drivers/misc/ivsc/mei_csi.c b/drivers/misc/ivsc/mei_csi.c
new file mode 100644
index 000000000000..ebac873c36eb
--- /dev/null
+++ b/drivers/misc/ivsc/mei_csi.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uuid.h>
+#include <linux/vsc.h>
+
+#include "intel_vsc.h"
+
+#define CSI_TIMEOUT (5 * HZ)
+#define MEI_CSI_DRIVER_NAME "vsc_csi"
+
+/**
+ * Identify the command id that can be downloaded
+ * to firmware, as well as the privacy notify id
+ * used when processing privacy actions.
+ *
+ * This enumeration is local to the mei csi.
+ */
+enum csi_cmd_id {
+ /* used to set csi ownership */
+ CSI_SET_OWNER,
+
+ /* used to get csi ownership */
+ CSI_GET_OWNER,
+
+ /* used to configurate mipi */
+ CSI_SET_MIPI_CONF,
+
+ /* used to get current mipi configuration */
+ CSI_GET_MIPI_CONF,
+
+ /* used to set csi power state */
+ CSI_SET_POWER_STATE,
+
+ /* used to get csi power state */
+ CSI_GET_POWER_STATE,
+
+ /* privacy notification id used when privacy state changes */
+ CSI_PRIVACY_NOTIF,
+};
+
+enum privacy_state {
+ PRIVACY_OFF = 0,
+ PRIVACY_ON,
+};
+
+/**
+ * CSI command structure.
+ */
+struct csi_cmd {
+ uint32_t cmd_id;
+ union _cmd_param {
+ uint32_t param;
+ struct mipi_conf conf;
+ } param;
+} __packed;
+
+/**
+ * CSI command response structure.
+ */
+struct csi_notif {
+ uint32_t cmd_id;
+ int status;
+ union _resp_cont {
+ uint32_t cont;
+ struct mipi_conf conf;
+ } cont;
+} __packed;
+
+struct mei_csi {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct csi_notif *notif;
+ struct completion response;
+
+ spinlock_t privacy_lock;
+ void *handle;
+ vsc_privacy_callback_t callback;
+};
+
+static int mei_csi_send(struct mei_csi *csi, uint8_t *buf, size_t len)
+{
+ struct csi_cmd *cmd = (struct csi_cmd *)buf;
+ int ret;
+
+ reinit_completion(&csi->response);
+
+ ret = mei_cldev_send(csi->cldev, buf, len);
+ if (ret < 0) {
+ dev_err(&csi->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&csi->response, CSI_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&csi->cldev->dev,
+ "command %d response error\n", cmd->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&csi->cldev->dev,
+ "command %d response timeout\n", cmd->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = csi->notif->status;
+ if (ret == -1) {
+ dev_info(&csi->cldev->dev,
+ "privacy on, command id = %d\n", cmd->cmd_id);
+ ret = 0;
+ } else if (ret) {
+ dev_err(&csi->cldev->dev,
+ "command %d response fail %d\n", cmd->cmd_id, ret);
+ return ret;
+ }
+
+ if (csi->notif->cmd_id != cmd->cmd_id) {
+ dev_err(&csi->cldev->dev,
+ "command response id mismatch, sent %d but got %d\n",
+ cmd->cmd_id, csi->notif->cmd_id);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int csi_set_owner(void *csi, enum csi_owner owner)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_OWNER;
+ cmd.param.param = owner;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_owner(void *csi, enum csi_owner *owner)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_OWNER;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret)
+ *owner = p_csi->notif->cont.cont;
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_set_mipi_conf(void *csi, struct mipi_conf *conf)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_MIPI_CONF;
+ cmd.param.conf.freq = conf->freq;
+ cmd.param.conf.lane_num = conf->lane_num;
+ cmd_len += sizeof(cmd.param.conf);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_mipi_conf(void *csi, struct mipi_conf *conf)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ struct mipi_conf *res;
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_MIPI_CONF;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret) {
+ res = &p_csi->notif->cont.conf;
+ conf->freq = res->freq;
+ conf->lane_num = res->lane_num;
+ }
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_set_power_state(void *csi, enum csi_power_state state)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_POWER_STATE;
+ cmd.param.param = state;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_power_state(void *csi, enum csi_power_state *state)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_POWER_STATE;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret)
+ *state = p_csi->notif->cont.cont;
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static void csi_set_privacy_callback(void *csi,
+ vsc_privacy_callback_t callback,
+ void *handle)
+{
+ unsigned long flags;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ spin_lock_irqsave(&p_csi->privacy_lock, flags);
+ p_csi->callback = callback;
+ p_csi->handle = handle;
+ spin_unlock_irqrestore(&p_csi->privacy_lock, flags);
+}
+
+static struct vsc_csi_ops csi_ops = {
+ .set_owner = csi_set_owner,
+ .get_owner = csi_get_owner,
+ .set_mipi_conf = csi_set_mipi_conf,
+ .get_mipi_conf = csi_get_mipi_conf,
+ .set_power_state = csi_set_power_state,
+ .get_power_state = csi_get_power_state,
+ .set_privacy_callback = csi_set_privacy_callback,
+};
+
+static void privacy_notify(struct mei_csi *csi, uint8_t state)
+{
+ unsigned long flags;
+ void *handle;
+ vsc_privacy_callback_t callback;
+
+ spin_lock_irqsave(&csi->privacy_lock, flags);
+ callback = csi->callback;
+ handle = csi->handle;
+ spin_unlock_irqrestore(&csi->privacy_lock, flags);
+
+ if (callback)
+ callback(handle, state);
+}
+
+/**
+ * callback for command response receive
+ */
+static void mei_csi_rx(struct mei_cl_device *cldev)
+{
+ int ret;
+ struct csi_notif notif = {0};
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ ret = mei_cldev_recv(cldev, (uint8_t *)¬if,
+ sizeof(struct csi_notif));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ }
+
+ switch (notif.cmd_id) {
+ case CSI_PRIVACY_NOTIF:
+ switch (notif.cont.cont) {
+ case PRIVACY_ON:
+ privacy_notify(csi, 0);
+
+ dev_info(&cldev->dev, "privacy on\n");
+ break;
+
+ case PRIVACY_OFF:
+ privacy_notify(csi, 1);
+
+ dev_info(&cldev->dev, "privacy off\n");
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv privacy wrong state\n");
+ break;
+ }
+ break;
+
+ case CSI_SET_OWNER:
+ case CSI_GET_OWNER:
+ case CSI_SET_MIPI_CONF:
+ case CSI_GET_MIPI_CONF:
+ case CSI_SET_POWER_STATE:
+ case CSI_GET_POWER_STATE:
+ memcpy(csi->notif, ¬if, ret);
+
+ if (!completion_done(&csi->response))
+ complete(&csi->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv not supported notification(%d)\n",
+ notif.cmd_id);
+ break;
+ }
+}
+
+static int mei_csi_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_csi *csi;
+ int ret;
+ uint8_t *p;
+ size_t csi_size = sizeof(struct mei_csi);
+
+ p = kzalloc(csi_size + sizeof(struct csi_notif), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ csi = (struct mei_csi *)p;
+ csi->notif = (struct csi_notif *)(p + csi_size);
+
+ csi->cldev = cldev;
+
+ mutex_init(&csi->cmd_mutex);
+ init_completion(&csi->response);
+
+ spin_lock_init(&csi->privacy_lock);
+
+ mei_cldev_set_drvdata(cldev, csi);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable csi client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_csi_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ vsc_register_csi(csi, &csi_ops);
+
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ kfree(csi);
+
+ return ret;
+}
+
+static void mei_csi_remove(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ vsc_unregister_csi();
+
+ if (!completion_done(&csi->response))
+ complete(&csi->response);
+
+ mei_cldev_disable(cldev);
+
+ /* wait until no buffer access */
+ mutex_lock(&csi->cmd_mutex);
+ mutex_unlock(&csi->cmd_mutex);
+
+ kfree(csi);
+}
+
+#define MEI_UUID_CSI UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+static const struct mei_cl_device_id mei_csi_tbl[] = {
+ { MEI_CSI_DRIVER_NAME, MEI_UUID_CSI, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
+
+static struct mei_cl_driver mei_csi_driver = {
+ .id_table = mei_csi_tbl,
+ .name = MEI_CSI_DRIVER_NAME,
+
+ .probe = mei_csi_probe,
+ .remove = mei_csi_remove,
+};
+
+static int __init mei_csi_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_csi_driver);
+ if (ret) {
+ pr_err("mei csi driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_csi_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_csi_driver);
+}
+
+module_init(mei_csi_init);
+module_exit(mei_csi_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC CSI client");
diff --git a/drivers/misc/ivsc/mei_pse.c b/drivers/misc/ivsc/mei_pse.c
new file mode 100644
index 000000000000..fc9049515d78
--- /dev/null
+++ b/drivers/misc/ivsc/mei_pse.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+#include <linux/uuid.h>
+
+#define MEI_PSE_DRIVER_NAME "vsc_pse"
+
+#define PSE_TIMEOUT (5 * HZ)
+
+#define CONT_OFFSET offsetof(struct pse_notif, cont)
+#define NOTIF_HEADER_LEN 8
+
+#define MAX_RECV_SIZE 8192
+#define MAX_LOG_SIZE 0x40000000
+#define EM_LOG_FILE "/var/log/vsc_em.log"
+#define SEM_LOG_FILE "/var/log/vsc_sem.log"
+
+#define PM_SUBSYS_MAX 2
+#define PM_STATE_NAME_LEN 16
+#define DEV_NUM 64
+#define DEV_NAME_LEN 32
+
+#define FORMAT "|%16.32s |%12u "
+#define FORMAT_TAIL "|\n"
+#define CONSTRUCTED_FORMAT (FORMAT FORMAT FORMAT FORMAT FORMAT_TAIL)
+#define TITLE "| Device Name | Block Count "
+#define TITLE_TAIL "|"
+#define CONSTRUCTED_TITLE (TITLE TITLE TITLE TITLE TITLE_TAIL)
+
+enum pse_cmd_id {
+ LOG_ONOFF = 0,
+ SET_WATERMARK = 1,
+ DUMP_TRACE = 2,
+ SET_TIMEOUT = 3,
+ SET_LOG_LEVEL = 4,
+ SET_TIME = 5,
+ GET_TIME = 6,
+ DUMP_POWER_DATA = 7,
+ TRACE_DATA_NOTIF = 8,
+ GET_FW_VER = 10,
+};
+
+enum pm_state {
+ ACTIVE = 0,
+ CORE_HALT,
+ CORE_CLK_GATE,
+ DEEP_SLEEP,
+ STATE_MAX,
+};
+
+struct fw_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t hotfix;
+ uint32_t build;
+} __packed;
+
+struct dev_info {
+ char name[DEV_NAME_LEN];
+ uint32_t block_cnt;
+} __packed;
+
+struct dev_list {
+ struct dev_info dev[DEV_NUM];
+ uint32_t dev_cnt;
+} __packed;
+
+struct pm_status {
+ char name[PM_STATE_NAME_LEN];
+ uint64_t start;
+ uint64_t end;
+ uint64_t duration;
+ uint64_t count;
+} __packed;
+
+struct pm_subsys {
+ uint64_t total;
+ struct pm_status status[STATE_MAX];
+ struct dev_list dev;
+ uint16_t crc;
+} __packed;
+
+struct pm_data {
+ struct pm_subsys subsys[PM_SUBSYS_MAX];
+} __packed;
+
+struct pse_cmd {
+ uint32_t cmd_id;
+ union _cmd_param {
+ uint32_t param;
+ uint64_t time;
+ } param;
+} __packed;
+
+struct pse_notif {
+ uint32_t cmd_id;
+ int8_t status;
+ uint8_t source;
+ int16_t size;
+ union _resp_cont {
+ uint64_t time;
+ struct fw_version ver;
+ } cont;
+} __packed;
+
+struct mei_pse {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct pse_notif notif;
+ struct completion response;
+
+ uint8_t *recv_buf;
+
+ uint8_t *pm_data;
+ uint32_t pm_data_pos;
+
+ loff_t em_pos;
+ struct file *em_file;
+
+ loff_t sem_pos;
+ struct file *sem_file;
+
+ struct dentry *dfs_dir;
+};
+
+static int mei_pse_send(struct mei_pse *pse, struct pse_cmd *cmd, size_t len)
+{
+ int ret;
+
+ reinit_completion(&pse->response);
+
+ ret = mei_cldev_send(pse->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&pse->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&pse->response, PSE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&pse->cldev->dev,
+ "command %d response error\n", cmd->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&pse->cldev->dev,
+ "command %d response timeout\n", cmd->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = pse->notif.status;
+ if (ret) {
+ dev_err(&pse->cldev->dev,
+ "command %d response fail %d\n", cmd->cmd_id, ret);
+ return ret;
+ }
+
+ if (pse->notif.cmd_id != cmd->cmd_id) {
+ dev_err(&pse->cldev->dev,
+ "command response id mismatch, sent %d but got %d\n",
+ cmd->cmd_id, pse->notif.cmd_id);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int pse_log_onoff(struct mei_pse *pse, uint8_t onoff)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = LOG_ONOFF;
+ cmd.param.param = onoff;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_log_onoff_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+ uint8_t state;
+
+ ret = kstrtou8_from_user(buf, count, 0, &state);
+ if (ret)
+ return ret;
+
+ pse_log_onoff(pse, state);
+
+ return count;
+}
+
+static int pse_set_watermark(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < -1 || val > 100) {
+ dev_err(&pse->cldev->dev, "error water mark value\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_WATERMARK;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_watermark_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_watermark(pse, val);
+
+ return count;
+}
+
+static int pse_dump_trace(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = DUMP_TRACE;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_dump_trace_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+ uint8_t val;
+
+ ret = kstrtou8_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pse_dump_trace(pse);
+
+ return count;
+}
+
+static int pse_set_timeout(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < -1 || val > 999) {
+ dev_err(&pse->cldev->dev, "error timeout value\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_TIMEOUT;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_timeout_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_timeout(pse, val);
+
+ return count;
+}
+
+static int pse_set_log_level(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < 0 || val > 4) {
+ dev_err(&pse->cldev->dev, "unsupported log level\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_LOG_LEVEL;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_log_level_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_log_level(pse, val);
+
+ return count;
+}
+
+static int pse_set_time(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = SET_TIME;
+ cmd.param.time = ktime_get_ns();
+ cmd_len += sizeof(cmd.param.time);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_time_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+
+ ret = pse_set_time(pse);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int pse_get_time(struct mei_pse *pse, uint64_t *val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = GET_TIME;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+ if (!ret) {
+ *val = pse->notif.cont.time;
+
+ dev_info(&pse->cldev->dev,
+ "time = (%llu) nanoseconds\n", *val);
+ }
+
+ return ret;
+}
+
+static ssize_t pse_time_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, pos;
+ uint64_t val;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_get_time(pse, &val);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "pse time = (%llu) nanoseconds\n", val);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+static int pse_dump_power_data(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = DUMP_POWER_DATA;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static int dump_power_state_data(struct mei_pse *pse,
+ char *addr, int pos, int len)
+{
+ const char * const names[] = {"EM7D", "SEM"};
+ const char *title =
+ "| power states | duration(ms) | count | percentage(%) |";
+ struct pm_subsys *subsys;
+ uint64_t total_duration, duration, num, frac;
+ int i, j;
+
+ for (i = 0; i < PM_SUBSYS_MAX; i++) {
+ subsys = &((struct pm_data *)pse->pm_data)->subsys[i];
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "power state of %s:\n",
+ names[i]);
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "%s\n",
+ title);
+
+ total_duration = 0;
+ for (j = 0; j < STATE_MAX; j++)
+ total_duration += subsys->status[j].duration;
+
+ for (j = 0; j < STATE_MAX; j++) {
+ duration = subsys->status[j].duration * 100;
+ num = duration / total_duration;
+ frac = (duration % total_duration *
+ 10000000 / total_duration + 5) / 10;
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "|%13.16s |%13llu |%6llu |%7u.%06u |\n",
+ subsys->status[j].name,
+ subsys->status[j].duration,
+ subsys->status[j].count,
+ (uint32_t)num,
+ (uint32_t)frac);
+ }
+
+ pos += snprintf((char *)addr + pos, len - pos, "\n");
+ }
+
+ return pos;
+}
+
+static int dump_dev_power_data(struct mei_pse *pse,
+ char *addr, int pos, int len)
+{
+ const char * const names[] = {"EM7D", "SEM"};
+ struct pm_subsys *subsys;
+ int i, j;
+ const char *title = CONSTRUCTED_TITLE;
+ const char *format = CONSTRUCTED_FORMAT;
+
+ for (i = 0; i < PM_SUBSYS_MAX; i++) {
+ subsys = &((struct pm_data *)pse->pm_data)->subsys[i];
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "device list of %s:\n",
+ names[i]);
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "%s\n",
+ title);
+
+ for (j = 0; j < subsys->dev.dev_cnt; j += 4) {
+ switch (subsys->dev.dev_cnt - j) {
+ case 1:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ "", 0,
+ "", 0,
+ "", 0);
+ break;
+
+ case 2:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ "", 0,
+ "", 0);
+ break;
+
+ case 3:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ subsys->dev.dev[j+2].name,
+ subsys->dev.dev[j+2].block_cnt,
+ "", 0);
+ break;
+
+ default:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ subsys->dev.dev[j+2].name,
+ subsys->dev.dev[j+2].block_cnt,
+ subsys->dev.dev[j+3].name,
+ subsys->dev.dev[j+3].block_cnt);
+ break;
+ }
+ }
+
+ if (i < PM_SUBSYS_MAX - 1)
+ pos += snprintf((char *)addr + pos, len - pos, "\n");
+ }
+
+ return pos;
+}
+
+static ssize_t pse_power_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ int ret, pos = 0;
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_dump_power_data(pse);
+ if (ret)
+ goto out;
+
+ pos = dump_power_state_data(pse, (char *)addr, pos, PAGE_SIZE);
+ pos = dump_dev_power_data(pse, (char *)addr, pos, PAGE_SIZE);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+static int pse_get_fw_ver(struct mei_pse *pse, struct fw_version *ver)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = GET_FW_VER;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ if (!ret) {
+ memcpy(ver, &pse->notif.cont.ver, sizeof(*ver));
+
+ dev_info(&pse->cldev->dev,
+ "fw version: %u.%u.%u.%u\n",
+ ver->major, ver->minor, ver->hotfix, ver->build);
+ }
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_fw_ver_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, pos;
+ struct fw_version ver;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_get_fw_ver(pse, &ver);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "fw version: %u.%u.%u.%u\n",
+ ver.major, ver.minor, ver.hotfix, ver.build);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+#define PSE_DFS_ADD_FILE(name) \
+ debugfs_create_file(#name, 0644, pse->dfs_dir, pse, \
+ &pse_dfs_##name##_fops)
+
+#define PSE_DFS_FILE_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .read = pse_##name##_read, \
+ .write = pse_##name##_write, \
+ .open = simple_open, \
+}
+
+#define PSE_DFS_FILE_READ_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .read = pse_##name##_read, \
+ .open = simple_open, \
+}
+
+#define PSE_DFS_FILE_WRITE_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .write = pse_##name##_write, \
+ .open = simple_open, \
+}
+
+PSE_DFS_FILE_WRITE_OPS(log_onoff);
+PSE_DFS_FILE_WRITE_OPS(watermark);
+PSE_DFS_FILE_WRITE_OPS(dump_trace);
+PSE_DFS_FILE_WRITE_OPS(timeout);
+PSE_DFS_FILE_WRITE_OPS(log_level);
+
+PSE_DFS_FILE_OPS(time);
+
+PSE_DFS_FILE_READ_OPS(fw_ver);
+PSE_DFS_FILE_READ_OPS(power_data);
+
+/* callback for command response receive */
+static void mei_pse_rx(struct mei_cl_device *cldev)
+{
+ int ret;
+ struct pse_notif *notif;
+ struct mei_pse *pse = mei_cldev_get_drvdata(cldev);
+ struct file *file;
+ loff_t *pos;
+
+ ret = mei_cldev_recv(cldev, pse->recv_buf, MAX_RECV_SIZE);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ }
+ notif = (struct pse_notif *)pse->recv_buf;
+
+ switch (notif->cmd_id) {
+ case TRACE_DATA_NOTIF:
+ if (notif->source) {
+ file = pse->sem_file;
+ pos = &pse->sem_pos;
+ } else {
+ file = pse->em_file;
+ pos = &pse->em_pos;
+ }
+
+ if (*pos < MAX_LOG_SIZE) {
+ ret = kernel_write(file,
+ pse->recv_buf + CONT_OFFSET,
+ ret - CONT_OFFSET,
+ pos);
+ if (ret < 0)
+ dev_err(&cldev->dev,
+ "error in writing log %d\n", ret);
+ else
+ *pos += ret;
+ } else
+ dev_warn(&cldev->dev,
+ "already exceed max log size\n");
+ break;
+
+ case LOG_ONOFF:
+ case SET_WATERMARK:
+ case DUMP_TRACE:
+ case SET_TIMEOUT:
+ case SET_LOG_LEVEL:
+ case SET_TIME:
+ case GET_TIME:
+ case GET_FW_VER:
+ memcpy(&pse->notif, notif, ret);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ break;
+
+ case DUMP_POWER_DATA:
+ if (notif->status == 0) {
+ memcpy(pse->pm_data + pse->pm_data_pos,
+ pse->recv_buf + NOTIF_HEADER_LEN,
+ ret - NOTIF_HEADER_LEN);
+ pse->pm_data_pos += ret - NOTIF_HEADER_LEN;
+
+ if (pse->pm_data_pos >= sizeof(struct pm_data)) {
+ pse->pm_data_pos = 0;
+ memcpy(&pse->notif, notif, NOTIF_HEADER_LEN);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ }
+ } else {
+ dev_err(&cldev->dev, "error in recving power data\n");
+
+ pse->pm_data_pos = 0;
+ memcpy(&pse->notif, notif, NOTIF_HEADER_LEN);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ }
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv not supported notification\n");
+ break;
+ }
+}
+
+static int mei_pse_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_pse *pse;
+ int ret;
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ pse = kzalloc(sizeof(struct mei_pse), GFP_KERNEL);
+ if (!pse)
+ return -ENOMEM;
+
+ pse->recv_buf = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!pse->recv_buf) {
+ kfree(pse);
+ return -ENOMEM;
+ }
+
+ pse->pm_data = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!pse->pm_data) {
+ free_pages((unsigned long)pse->recv_buf, order);
+ kfree(pse);
+ return -ENOMEM;
+ }
+ pse->pm_data_pos = 0;
+
+ pse->cldev = cldev;
+ mutex_init(&pse->cmd_mutex);
+ init_completion(&pse->response);
+
+ mei_cldev_set_drvdata(cldev, pse);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable pse client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_pse_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ pse->em_file = filp_open(EM_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(pse->em_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", EM_LOG_FILE);
+ ret = PTR_ERR(pse->em_file);
+ goto err_disable;
+ }
+ pse->em_pos = 0;
+
+ pse->sem_file = filp_open(SEM_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(pse->sem_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", SEM_LOG_FILE);
+ ret = PTR_ERR(pse->sem_file);
+ goto err_close;
+ }
+ pse->sem_pos = 0;
+
+ pse->dfs_dir = debugfs_create_dir("vsc_pse", NULL);
+ if (pse->dfs_dir) {
+ PSE_DFS_ADD_FILE(log_onoff);
+ PSE_DFS_ADD_FILE(watermark);
+ PSE_DFS_ADD_FILE(dump_trace);
+ PSE_DFS_ADD_FILE(timeout);
+ PSE_DFS_ADD_FILE(log_level);
+ PSE_DFS_ADD_FILE(time);
+ PSE_DFS_ADD_FILE(fw_ver);
+ PSE_DFS_ADD_FILE(power_data);
+ }
+
+ return 0;
+
+err_close:
+ filp_close(pse->em_file, NULL);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ free_pages((unsigned long)pse->pm_data, order);
+
+ free_pages((unsigned long)pse->recv_buf, order);
+
+ kfree(pse);
+
+ return ret;
+}
+
+static void mei_pse_remove(struct mei_cl_device *cldev)
+{
+ struct mei_pse *pse = mei_cldev_get_drvdata(cldev);
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+
+ mei_cldev_disable(cldev);
+
+ debugfs_remove_recursive(pse->dfs_dir);
+
+ filp_close(pse->em_file, NULL);
+ filp_close(pse->sem_file, NULL);
+
+ /* wait until no buffer acccess */
+ mutex_lock(&pse->cmd_mutex);
+ mutex_unlock(&pse->cmd_mutex);
+
+ free_pages((unsigned long)pse->pm_data, order);
+
+ free_pages((unsigned long)pse->recv_buf, order);
+
+ kfree(pse);
+}
+
+#define MEI_UUID_PSE UUID_LE(0xD035E00C, 0x6DAE, 0x4B6D, \
+ 0xB4, 0x7A, 0xF8, 0x8E, 0x30, 0x2A, 0x40, 0x4E)
+
+static const struct mei_cl_device_id mei_pse_tbl[] = {
+ { MEI_PSE_DRIVER_NAME, MEI_UUID_PSE, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_pse_tbl);
+
+static struct mei_cl_driver mei_pse_driver = {
+ .id_table = mei_pse_tbl,
+ .name = MEI_PSE_DRIVER_NAME,
+
+ .probe = mei_pse_probe,
+ .remove = mei_pse_remove,
+};
+
+static int __init mei_pse_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_pse_driver);
+ if (ret) {
+ pr_err("mei pse driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_pse_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_pse_driver);
+}
+
+module_init(mei_pse_init);
+module_exit(mei_pse_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC PSE client");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index f5fd5b786607..559858e56de9 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -46,4 +46,11 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+config INTEL_MEI_VSC
+ tristate "Intel Vision Sensing Controller device with ME interface"
+ select INTEL_MEI
+ depends on X86 && SPI
+ help
+ MEI over SPI for Intel Vision Sensing Controller device
+
source "drivers/misc/mei/hdcp/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index f1c76f7ee804..a692dedee4d2 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -22,6 +22,10 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
+obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o
+mei-vsc-objs := spi-vsc.o
+mei-vsc-objs += hw-vsc.o
+
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/hw-vsc.c b/drivers/misc/mei/hw-vsc.c
new file mode 100644
index 000000000000..9a9965962dc7
--- /dev/null
+++ b/drivers/misc/mei/hw-vsc.c
@@ -0,0 +1,1624 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/swap.h>
+#include <linux/types.h>
+
+#include "hbm.h"
+#include "hw-vsc.h"
+#include "mei_dev.h"
+#include "mei-trace.h"
+
+static int spi_dev_xfer(struct mei_vsc_hw *hw, void *out_data, void *in_data,
+ int len)
+{
+ hw->xfer.tx_buf = out_data;
+ hw->xfer.rx_buf = in_data;
+ hw->xfer.len = len;
+
+ spi_message_init_with_transfers(&hw->msg, &hw->xfer, 1);
+ return spi_sync_locked(hw->spi, &hw->msg);
+}
+
+#define SPI_XFER_PACKET_CRC(pkt) (*(u32 *)(pkt->buf + pkt->hdr.len))
+static int spi_validate_packet(struct mei_vsc_hw *hw,
+ struct spi_xfer_packet *pkt)
+{
+ u32 base_crc;
+ u32 crc;
+ struct spi_xfer_hdr *hdr = &pkt->hdr;
+
+ base_crc = SPI_XFER_PACKET_CRC(pkt);
+ crc = ~crc32(~0, (u8 *)pkt, sizeof(struct spi_xfer_hdr) + pkt->hdr.len);
+
+ if (base_crc != crc) {
+ dev_err(&hw->spi->dev, "%s crc error cmd %x 0x%x 0x%x\n",
+ __func__, hdr->cmd, base_crc, crc);
+ return -EINVAL;
+ }
+
+ if (hdr->cmd == CMD_SPI_FATAL_ERR) {
+ dev_err(&hw->spi->dev,
+ "receive fatal error from FW cmd %d %d %d.\nCore dump: %s\n",
+ hdr->cmd, hdr->seq, hw->seq, (char *)pkt->buf);
+ return -EIO;
+ } else if (hdr->cmd == CMD_SPI_NACK || hdr->cmd == CMD_SPI_BUSY ||
+ hdr->seq != hw->seq) {
+ dev_err(&hw->spi->dev, "receive error from FW cmd %d %d %d\n",
+ hdr->cmd, hdr->seq, hw->seq);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static inline bool spi_rom_xfer_asserted(struct mei_vsc_hw *hw)
+{
+ return gpiod_get_value_cansleep(hw->wakeuphost);
+}
+
+static inline bool spi_xfer_asserted(struct mei_vsc_hw *hw)
+{
+ return atomic_read(&hw->lock_cnt) > 0 &&
+ gpiod_get_value_cansleep(hw->wakeuphost);
+}
+
+static void spi_xfer_lock(struct mei_vsc_hw *hw)
+{
+ gpiod_set_value_cansleep(hw->wakeupfw, 0);
+}
+
+static void spi_xfer_unlock(struct mei_vsc_hw *hw)
+{
+ atomic_dec_if_positive(&hw->lock_cnt);
+ gpiod_set_value_cansleep(hw->wakeupfw, 1);
+}
+
+static bool spi_xfer_locked(struct mei_vsc_hw *hw)
+{
+ return !gpiod_get_value_cansleep(hw->wakeupfw);
+}
+
+static bool spi_need_read(struct mei_vsc_hw *hw)
+{
+ return spi_xfer_asserted(hw) && !spi_xfer_locked(hw);
+}
+
+#define WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
+static int spi_xfer_wait_asserted(struct mei_vsc_hw *hw)
+{
+ wait_event_interruptible_timeout(hw->xfer_wait, spi_xfer_asserted(hw),
+ WAIT_FW_ASSERTED_TIMEOUT);
+
+ dev_dbg(&hw->spi->dev, "%s %d %d %d\n", __func__,
+ atomic_read(&hw->lock_cnt),
+ gpiod_get_value_cansleep(hw->wakeupfw),
+ gpiod_get_value_cansleep(hw->wakeuphost));
+ if (!spi_xfer_asserted(hw))
+ return -ETIME;
+ else
+ return 0;
+}
+
+static int spi_wakeup_request(struct mei_vsc_hw *hw)
+{
+ /* wakeup spi slave and wait for response */
+ spi_xfer_lock(hw);
+ return spi_xfer_wait_asserted(hw);
+}
+
+static void spi_wakeup_release(struct mei_vsc_hw *hw)
+{
+ return spi_xfer_unlock(hw);
+}
+
+static int find_sync_byte(u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] == PACKET_SYNC)
+ return i;
+
+ return -1;
+}
+
+#define PACKET_PADDING_SIZE 1
+#define MAX_XFER_COUNT 5
+static int mei_vsc_xfer_internal(struct mei_vsc_hw *hw,
+ struct spi_xfer_packet *pkt,
+ struct spi_xfer_packet *ack_pkt)
+{
+ u8 *rx_buf = hw->rx_buf1;
+ u8 *tx_buf = hw->tx_buf1;
+ int next_xfer_len = PACKET_SIZE(pkt) + XFER_TIMEOUT_BYTES;
+ int offset = 0;
+ bool synced = false;
+ int len;
+ int count_down = MAX_XFER_COUNT;
+ int ret = 0;
+ int i;
+
+ dev_dbg(&hw->spi->dev, "spi tx pkt begin: %s %d %d\n", __func__,
+ spi_xfer_asserted(hw), gpiod_get_value_cansleep(hw->wakeupfw));
+ memcpy(tx_buf, pkt, PACKET_SIZE(pkt));
+ memset(rx_buf, 0, MAX_XFER_BUFFER_SIZE);
+
+ do {
+ dev_dbg(&hw->spi->dev,
+ "spi tx pkt partial ing: %s %d %d %d %d\n", __func__,
+ spi_xfer_asserted(hw),
+ gpiod_get_value_cansleep(hw->wakeupfw), next_xfer_len,
+ synced);
+
+ count_down--;
+ ret = spi_dev_xfer(hw, tx_buf, rx_buf, next_xfer_len);
+ if (ret)
+ return ret;
+
+ memset(tx_buf, 0, MAX_XFER_BUFFER_SIZE);
+ if (!synced) {
+ i = find_sync_byte(rx_buf, next_xfer_len);
+ if (i >= 0) {
+ synced = true;
+ len = next_xfer_len - i;
+ } else {
+ continue;
+ }
+
+ } else {
+ i = 0;
+ len = min_t(int, next_xfer_len,
+ sizeof(*ack_pkt) - offset);
+ }
+
+ memcpy(&ack_pkt[offset], &rx_buf[i], len);
+ offset += len;
+
+ if (offset >= sizeof(ack_pkt->hdr))
+ next_xfer_len = PACKET_SIZE(ack_pkt) - offset +
+ PACKET_PADDING_SIZE;
+
+ } while (next_xfer_len > 0 && count_down > 0);
+
+ dev_dbg(&hw->spi->dev, "spi tx pkt done: %s %d %d cmd %d %d %d %d\n",
+ __func__, next_xfer_len, count_down, ack_pkt->hdr.sync,
+ ack_pkt->hdr.cmd, ack_pkt->hdr.len, ack_pkt->hdr.seq);
+
+ if (next_xfer_len > 0)
+ return -EAGAIN;
+
+ return spi_validate_packet(hw, ack_pkt);
+}
+
+static int mei_vsc_xfer(struct mei_vsc_hw *hw, u8 cmd, void *tx, u32 tx_len,
+ void *rx, int rx_max_len, u32 *rx_len)
+{
+ struct spi_xfer_packet *pkt;
+ struct spi_xfer_packet *ack_pkt;
+ u32 *crc;
+ int ret;
+
+ if (!tx || !rx || tx_len > MAX_SPI_MSG_SIZE)
+ return -EINVAL;
+
+ if (rx_len)
+ *rx_len = 0;
+
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*ack_pkt), GFP_KERNEL);
+ ack_pkt = pkt + 1;
+ if (!pkt || !ack_pkt)
+ return -ENOMEM;
+
+ pkt->hdr.sync = PACKET_SYNC;
+ pkt->hdr.cmd = cmd;
+ pkt->hdr.seq = ++hw->seq;
+ pkt->hdr.len = tx_len;
+
+ memcpy(pkt->buf, tx, tx_len);
+ crc = (u32 *)(pkt->buf + tx_len);
+ *crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt->hdr) + tx_len);
+
+ mutex_lock(&hw->mutex);
+
+ ret = spi_wakeup_request(hw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "wakeup vsc FW failed\n");
+ goto out;
+ }
+
+ ret = mei_vsc_xfer_internal(hw, pkt, ack_pkt);
+ if (ret)
+ goto out;
+
+ if (ack_pkt->hdr.len > 0) {
+ int len;
+
+ len = (ack_pkt->hdr.len < rx_max_len) ? ack_pkt->hdr.len :
+ rx_max_len;
+ memcpy(rx, ack_pkt->buf, len);
+ if (rx_len)
+ *rx_len = len;
+ }
+
+out:
+ spi_wakeup_release(hw);
+ mutex_unlock(&hw->mutex);
+ kfree(pkt);
+ return ret;
+}
+
+static int mei_vsc_read_raw(struct mei_vsc_hw *hw, u8 *buf, u32 max_len,
+ u32 *len)
+{
+ struct host_timestamp ts = { 0 };
+
+ ts.realtime = ktime_to_ns(ktime_get_real());
+ ts.boottime = ktime_to_ns(ktime_get_boottime());
+
+ return mei_vsc_xfer(hw, CMD_SPI_READ, &ts, sizeof(ts), buf, max_len,
+ len);
+}
+
+static int mei_vsc_write_raw(struct mei_vsc_hw *hw, u8 *buf, u32 len)
+{
+ u8 status = 0;
+ int rx_len;
+
+ return mei_vsc_xfer(hw, CMD_SPI_WRITE, buf, len, &status,
+ sizeof(status), &rx_len);
+}
+
+#define LOADER_XFER_RETRY_COUNT 25
+static int spi_rom_dev_xfer(struct mei_vsc_hw *hw, void *out_data,
+ void *in_data, int len)
+{
+ int ret;
+ int i;
+ u32 *tmp = out_data;
+ int retry = 0;
+
+ if (len % 4 != 0)
+ return -EINVAL;
+
+ for (i = 0; i < len / 4; i++)
+ tmp[i] = ___constant_swab32(tmp[i]);
+
+ mutex_lock(&hw->mutex);
+ while (retry < LOADER_XFER_RETRY_COUNT) {
+ if (!spi_rom_xfer_asserted(hw))
+ break;
+
+ msleep(20);
+ retry++;
+ }
+
+ if (retry >= LOADER_XFER_RETRY_COUNT) {
+ dev_err(&hw->spi->dev, "%s retry %d times gpio %d\n", __func__,
+ retry, spi_rom_xfer_asserted(hw));
+ mutex_unlock(&hw->mutex);
+ return -EAGAIN;
+ }
+
+ ret = spi_dev_xfer(hw, out_data, in_data, len);
+ mutex_unlock(&hw->mutex);
+ if (!in_data || ret)
+ return ret;
+
+ tmp = in_data;
+ for (i = 0; i < len / 4; i++)
+ tmp[i] = ___constant_swab32(tmp[i]);
+
+ return 0;
+}
+
+#define VSC_RESET_PIN_TOGGLE_INTERVAL 20
+#define VSC_ROM_BOOTUP_DELAY_TIME 10
+static int vsc_reset(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ gpiod_set_value_cansleep(hw->resetfw, 1);
+ msleep(VSC_RESET_PIN_TOGGLE_INTERVAL);
+ gpiod_set_value_cansleep(hw->resetfw, 0);
+ msleep(VSC_RESET_PIN_TOGGLE_INTERVAL);
+ gpiod_set_value_cansleep(hw->resetfw, 1);
+ msleep(VSC_ROM_BOOTUP_DELAY_TIME);
+ /* set default host wake pin to 1, which try to avoid unexpected host irq interrupt */
+ gpiod_set_value_cansleep(hw->wakeupfw, 1);
+ return 0;
+}
+
+static char *fw_name[][3] = {
+ {
+ "vsc/soc_a1/ivsc_fw_a1.bin",
+ "vsc/soc_a1/ivsc_pkg_ovti01as_0_a1.bin",
+ "vsc/soc_a1/ivsc_skucfg_ovti01as_0_1_a1.bin",
+ },
+ {
+ "vsc/soc_a1_prod/ivsc_fw_a1_prod.bin",
+ "vsc/soc_a1_prod/ivsc_pkg_ovti01as_0_a1_prod.bin",
+ "vsc/soc_a1_prod/ivsc_skucfg_ovti01as_0_1_a1_prod.bin",
+ },
+};
+
+static int check_silicon(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ int ret;
+ u32 efuse1;
+ u32 strap;
+
+ dev_dbg(dev->dev, "%s size %zu %zu\n", __func__, sizeof(*frame),
+ sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DUMP_MEM;
+
+ frame->data.dump_mem.addr = EFUSE1_ADDR;
+ frame->data.dump_mem.len = 4;
+
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR) {
+ dev_err(dev->dev, "%s %d %d %d\n", __func__, __LINE__, ret,
+ token->token);
+ return ret;
+ }
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_RESERVED;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR ||
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev, "%s %d %d %d\n", __func__, __LINE__, ret,
+ token->token);
+ return -EIO;
+ }
+
+ efuse1 = *(u32 *)token->payload;
+ dev_dbg(dev->dev, "%s efuse1=%d\n", __func__, efuse1);
+
+ /* to check the silicon main and sub version */
+ hw->fw.main_ver = (efuse1 >> SI_MAINSTEPPING_VERSION_OFFSET) &
+ SI_MAINSTEPPING_VERSION_MASK;
+ hw->fw.sub_ver = (efuse1 >> SI_SUBSTEPPING_VERSION_OFFSET) &
+ SI_SUBSTEPPING_VERSION_MASK;
+ if (hw->fw.main_ver != SI_MAINSTEPPING_VERSION_A) {
+ dev_err(dev->dev, "%s: silicon main version error(%d)\n",
+ __func__, hw->fw.main_ver);
+ return -EINVAL;
+ }
+ if (hw->fw.sub_ver != SI_SUBSTEPPING_VERSION_0 &&
+ hw->fw.sub_ver != SI_SUBSTEPPING_VERSION_1) {
+ dev_dbg(dev->dev, "%s: silicon sub version error(%d)\n", __func__,
+ hw->fw.sub_ver);
+ return -EINVAL;
+ }
+
+ /* to get the silicon strap key: debug or production ? */
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DUMP_MEM;
+ frame->data.dump_mem.addr = STRAP_ADDR;
+ frame->data.dump_mem.len = 4;
+
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR) {
+ dev_err(dev->dev, "%s: transfer failed or invalid token\n",
+ __func__);
+ return ret;
+ }
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_RESERVED;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR ||
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev,
+ "%s: transfer failed or invalid token-> (token = %d)\n",
+ __func__, token->token);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev,
+ "%s: getting the memory(0x%0x), step 2 payload: 0x%0x\n",
+ __func__, STRAP_ADDR, *(u32 *)token->payload);
+
+ strap = *(u32 *)token->payload;
+ dev_dbg(dev->dev, "%s: strap = 0x%x\n", __func__, strap);
+
+ /* to check the silicon strap key source */
+ hw->fw.key_src =
+ (strap >> SI_STRAP_KEY_SRC_OFFSET) & SI_STRAP_KEY_SRC_MASK;
+
+ dev_dbg(dev->dev, "%s: silicon version check done: %s%s\n", __func__,
+ hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 ? "A0" : "A1",
+ hw->fw.key_src == SI_STRAP_KEY_SRC_DEBUG ? "" : "-prod");
+ if (hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_1) {
+ if (hw->fw.key_src == SI_STRAP_KEY_SRC_DEBUG) {
+ hw->fw.fw_file_name = fw_name[0][0];
+ hw->fw.sensor_file_name = fw_name[0][1];
+ hw->fw.sku_cnf_file_name = fw_name[0][2];
+ } else {
+ hw->fw.fw_file_name = fw_name[1][0];
+ hw->fw.sensor_file_name = fw_name[1][1];
+ hw->fw.sku_cnf_file_name = fw_name[1][2];
+ }
+ }
+
+ return 0;
+}
+
+static int parse_main_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct bootloader_sign *bootloader = NULL;
+ struct firmware_sign *arc_sem = NULL;
+ struct firmware_sign *em7d = NULL;
+ struct firmware_sign *ace_run = NULL;
+ struct firmware_sign *ace_vis = NULL;
+ struct firmware_sign *ace_conf = NULL;
+ struct vsc_boot_img *img = (struct vsc_boot_img *)fw->data;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct manifest *man = NULL;
+ struct fragment *bootl_frag = &hw->fw.frags[BOOT_IMAGE_TYPE];
+ struct fragment *arcsem_frag = &hw->fw.frags[ARC_SEM_IMG_TYPE];
+ struct fragment *acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+ struct fragment *acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+ struct fragment *acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+ struct fragment *em7d_frag = &hw->fw.frags[EM7D_IMG_TYPE];
+ struct firmware_sign *firmwares[IMG_CNT_MAX];
+ int i;
+
+ if (!img || img->magic != VSC_FILE_MAGIC) {
+ dev_err(dev->dev, "image file error\n");
+ return -EINVAL;
+ }
+
+ if (img->image_count < IMG_BOOT_ARC_EM7D ||
+ img->image_count > IMG_CNT_MAX) {
+ dev_err(dev->dev, "%s: image count error: image_count=0x%x\n",
+ __func__, img->image_count);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s: img->image_count=%d\n", __func__,
+ img->image_count);
+
+ /* only two lower bytes are used */
+ hw->fw.fw_option = img->option & 0xFFFF;
+ /* image not include bootloader */
+ hw->fw.fw_cnt = img->image_count - 1;
+
+ bootloader =
+ (struct bootloader_sign *)(img->image_loc + img->image_count);
+ if ((u8 *)bootloader > (fw->data + fw->size))
+ return -EINVAL;
+
+ if (bootloader->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "bootloader signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ bootloader->magic, bootloader->image_size);
+ return -EINVAL;
+ }
+
+ man = (struct manifest *)((char *)bootloader->image +
+ bootloader->image_size - SIG_SIZE -
+ sizeof(struct manifest) - CSSHEADER_SIZE);
+ if (man->svn == MAX_SVN_VALUE)
+ hw->fw.svn = MAX_SVN_VALUE;
+ else if (hw->fw.svn == 0)
+ hw->fw.svn = man->svn;
+
+ dev_dbg(dev->dev, "%s: svn: 0x%08X", __func__, hw->fw.svn);
+ /* currently only support silicon versoin A0 | A1 */
+ if ((hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 &&
+ hw->fw.svn != MAX_SVN_VALUE) ||
+ (hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_1 &&
+ hw->fw.svn == MAX_SVN_VALUE)) {
+ dev_err(dev->dev,
+ "silicon version and image svn not matched(A%s:0x%x)\n",
+ hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 ? "0" : "1",
+ hw->fw.svn);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < img->image_count - 1; i++) {
+ if (i == 0) {
+ firmwares[i] =
+ (struct firmware_sign *)(bootloader->image +
+ bootloader->image_size);
+ dev_dbg(dev->dev,
+ "FW (%d/%d) magic number 0x%08x, image size 0x%08x\n",
+ i, img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+ continue;
+ }
+
+ firmwares[i] =
+ (struct firmware_sign *)(firmwares[i - 1]->image +
+ firmwares[i - 1]->image_size);
+
+ if ((u8 *)firmwares[i] > fw->data + fw->size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev,
+ "FW (%d/%d) magic number 0x%08x, image size 0x%08x\n", i,
+ img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+ if (firmwares[i]->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "FW (%d/%d) magic error! magic number 0x%08x, image size 0x%08x\n",
+ i, img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+
+ return -EINVAL;
+ }
+ }
+
+ arc_sem = firmwares[0];
+ if (img->image_count >= IMG_BOOT_ARC_EM7D)
+ em7d = firmwares[img->image_count - 2];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_EM7D)
+ ace_run = firmwares[1];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_ACEV_EM7D)
+ ace_vis = firmwares[2];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D)
+ ace_conf = firmwares[3];
+
+ bootl_frag->data = bootloader->image;
+ bootl_frag->size = bootloader->image_size;
+ bootl_frag->location = img->image_loc[0];
+ if (!bootl_frag->location)
+ return -EINVAL;
+
+ if (!arc_sem)
+ return -EINVAL;
+ arcsem_frag->data = arc_sem->image;
+ arcsem_frag->size = arc_sem->image_size;
+ arcsem_frag->location = img->image_loc[1];
+ if (!arcsem_frag->location)
+ return -EINVAL;
+
+ if (ace_run) {
+ acer_frag->data = ace_run->image;
+ acer_frag->size = ace_run->image_size;
+ acer_frag->location = img->image_loc[2];
+ if (!acer_frag->location)
+ return -EINVAL;
+
+ if (ace_vis) {
+ acev_frag->data = ace_vis->image;
+ acev_frag->size = ace_vis->image_size;
+ /* Align to 4K boundary */
+ acev_frag->location = ((acer_frag->location +
+ acer_frag->size + 0xFFF) &
+ ~(0xFFF));
+ if (img->image_loc[3] &&
+ acer_frag->location != img->image_loc[3]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[3] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[3], acev_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acev_frag->location = img->image_loc[3];
+ }
+ }
+
+ if (ace_conf) {
+ acec_frag->data = ace_conf->image;
+ acec_frag->size = ace_conf->image_size;
+ /* Align to 4K boundary */
+ acec_frag->location = ((acev_frag->location +
+ acev_frag->size + 0xFFF) &
+ ~(0xFFF));
+ if (img->image_loc[4] &&
+ acec_frag->location != img->image_loc[4]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[4] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[4], acec_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acec_frag->location = img->image_loc[4];
+ }
+ }
+ }
+
+ em7d_frag->data = em7d->image;
+ em7d_frag->size = em7d->image_size;
+ /* em7d is the last firmware */
+ em7d_frag->location = img->image_loc[img->image_count - 1];
+ if (!em7d_frag->location)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int parse_sensor_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct firmware_sign *ace_vis = NULL;
+ struct firmware_sign *ace_conf = NULL;
+ struct vsc_boot_img *img = (struct vsc_boot_img *)fw->data;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct fragment *acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+ struct fragment *acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+ struct fragment *acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+
+ if (!img || img->magic != VSC_FILE_MAGIC ||
+ img->image_count < IMG_ACEV_ACECNF ||
+ img->image_count > IMG_CNT_MAX)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "%s: img->image_count=%d\n", __func__,
+ img->image_count);
+
+ hw->fw.fw_cnt += img->image_count;
+ if (hw->fw.fw_cnt > IMG_CNT_MAX)
+ return -EINVAL;
+
+ ace_vis = (struct firmware_sign *)(img->image_loc + img->image_count);
+ ace_conf =
+ (struct firmware_sign *)(ace_vis->image + ace_vis->image_size);
+
+ dev_dbg(dev->dev,
+ "ACE vision signed magic number 0x%08x, image size 0x%08x\n",
+ ace_vis->magic, ace_vis->image_size);
+ if (ace_vis->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "ACE vision signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ ace_vis->magic, ace_vis->image_size);
+ return -EINVAL;
+ }
+
+ acev_frag->data = ace_vis->image;
+ acev_frag->size = ace_vis->image_size;
+ /* Align to 4K boundary */
+ acev_frag->location =
+ ((acer_frag->location + acer_frag->size + 0xFFF) & ~(0xFFF));
+ if (img->image_loc[0] && acer_frag->location != img->image_loc[0]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[0] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[0], acev_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acev_frag->location = img->image_loc[0];
+ }
+
+ dev_dbg(dev->dev,
+ "ACE config signed magic number 0x%08x, image size 0x%08x\n",
+ ace_conf->magic, ace_conf->image_size);
+ if (ace_conf->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "ACE config signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ ace_conf->magic, ace_conf->image_size);
+ return -EINVAL;
+ }
+
+ acec_frag->data = ace_conf->image;
+ acec_frag->size = ace_conf->image_size;
+ /* Align to 4K boundary */
+ acec_frag->location =
+ ((acev_frag->location + acev_frag->size + 0xFFF) & ~(0xFFF));
+ if (img->image_loc[1] && acec_frag->location != img->image_loc[1]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[1] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[1], acec_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acec_frag->location = img->image_loc[1];
+ }
+
+ return 0;
+}
+
+static int parse_sku_cnf_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct fragment *skucnf_frag = &hw->fw.frags[SKU_CONF_TYPE];
+
+ if (fw->size <= sizeof(u32))
+ return -EINVAL;
+
+ skucnf_frag->data = fw->data;
+ skucnf_frag->size = *((u32 *)fw->data) + sizeof(u32);
+ /* SKU config use fixed location */
+ skucnf_frag->location = SKU_CONFIG_LOC;
+ if (fw->size != skucnf_frag->size || fw->size > SKU_MAX_SIZE) {
+ dev_err(dev->dev,
+ "sku config file size is not config size + 4, config size = 0x%x, file size=0x%zx\n",
+ skucnf_frag->size, fw->size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 sum_CRC(void *data, int size)
+{
+ int i;
+ u32 crc = 0;
+
+ for (i = 0; i < size; i++)
+ crc += *((u8 *)data + i);
+
+ return crc;
+}
+
+static int load_boot(struct mei_device *dev, const void *data, int size)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ const u8 *ptr = data;
+ u32 remain;
+ int ret;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "==== %s: image payload size : %d\n", __func__, size);
+ remain = size;
+ while (remain > 0) {
+ u32 max_len = sizeof(frame->data.dl_cont.payload);
+ u32 len = remain > max_len ? max_len : remain;
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_CONT;
+
+ frame->data.dl_cont.len = (u16)len;
+ frame->data.dl_cont.end_flag = (remain == len ? 1 : 0);
+ memcpy(frame->data.dl_cont.payload, ptr, len);
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, VSC_ROM_SPI_PKG_SIZE);
+ if (ret) {
+ dev_err(dev->dev, "%s: transfer failed\n", __func__);
+ break;
+ }
+
+ ptr += len;
+ remain -= len;
+ }
+
+ return ret;
+}
+
+static int load_bootloader(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ struct fragment *fragment = &hw->fw.frags[BOOT_IMAGE_TYPE];
+ int ret;
+
+ if (!fragment->size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "verify bootloader token ...\n");
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_QUERY;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ if (token->token != VSC_TOKEN_BOOTLOADER_REQ &&
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev,
+ "failed to load bootloader, invalid token 0x%x\n",
+ token->token);
+ return -EINVAL;
+ }
+ dev_dbg(dev->dev, "bootloader token has been verified\n");
+
+ dev_dbg(dev->dev, "start download, image len: %u ...\n", fragment->size);
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_START;
+ frame->data.dl_start.img_type = IMG_BOOTLOADER;
+ frame->data.dl_start.img_len = fragment->size;
+ frame->data.dl_start.img_loc = fragment->location;
+ frame->data.dl_start.option = (u16)hw->fw.fw_option;
+ frame->data.dl_start.crc =
+ sum_CRC(frame, (int)offsetof(struct vsc_rom_master_frame,
+ data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, VSC_ROM_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "load bootloader payload ...\n");
+ ret = load_boot(dev, fragment->data, fragment->size);
+ if (ret)
+ dev_err(dev->dev, "failed to load bootloader, err : 0x%0x\n",
+ ret);
+
+ return ret;
+}
+
+static int load_fw_bin(struct mei_device *dev, const void *data, int size)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_master_frame_fw_cont *frame =
+ (struct vsc_master_frame_fw_cont *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->fw.rx_buf;
+ const u8 *ptr = data;
+ int ret;
+ u32 remain;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "==== %s: image payload size : %d\n", __func__, size);
+ remain = size;
+ while (remain > 0) {
+ u32 len = remain > FW_SPI_PKG_SIZE ? FW_SPI_PKG_SIZE : remain;
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ memcpy(frame->payload, ptr, len);
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret) {
+ dev_err(dev->dev, "transfer failed\n");
+ break;
+ }
+
+ ptr += len;
+ remain -= len;
+ }
+
+ return ret;
+}
+
+static int load_fw_frag(struct mei_device *dev, struct fragment *frag, int type)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_fw_master_frame *frame =
+ (struct vsc_fw_master_frame *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->rx_buf;
+ int ret;
+
+ dev_dbg(dev->dev,
+ "start download firmware type %d ... loc:0x%08x, size:0x%08x\n",
+ type, frag->location, frag->size);
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_START;
+ frame->data.dl_start.img_type = type;
+ frame->data.dl_start.img_len = frag->size;
+ frame->data.dl_start.img_loc = frag->location;
+ frame->data.dl_start.option = (u16)hw->fw.fw_option;
+ frame->data.dl_start.crc = sum_CRC(
+ frame, offsetof(struct vsc_fw_master_frame, data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ return load_fw_bin(dev, frag->data, frag->size);
+}
+
+static int load_fw(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_fw_master_frame *frame =
+ (struct vsc_fw_master_frame *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->rx_buf;
+ struct fragment *arcsem_frag = NULL;
+ struct fragment *em7d_frag = NULL;
+ struct fragment *acer_frag = NULL;
+ struct fragment *acev_frag = NULL;
+ struct fragment *acec_frag = NULL;
+ struct fragment *skucnf_frag = NULL;
+ int index = 0;
+ int ret;
+
+ if (hw->fw.frags[ARC_SEM_IMG_TYPE].size > 0)
+ arcsem_frag = &hw->fw.frags[ARC_SEM_IMG_TYPE];
+
+ if (hw->fw.frags[EM7D_IMG_TYPE].size > 0)
+ em7d_frag = &hw->fw.frags[EM7D_IMG_TYPE];
+
+ if (hw->fw.frags[ACER_IMG_TYPE].size > 0)
+ acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+
+ if (hw->fw.frags[ACEV_IMG_TYPE].size > 0)
+ acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+
+ if (hw->fw.frags[ACEC_IMG_TYPE].size > 0)
+ acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+
+ if (hw->fw.frags[SKU_CONF_TYPE].size > 0)
+ skucnf_frag = &hw->fw.frags[SKU_CONF_TYPE];
+
+ if (!arcsem_frag || !em7d_frag) {
+ dev_err(dev->dev, "invalid image or signature data\n");
+ return -EINVAL;
+ }
+
+ /* send dl_set frame */
+ dev_dbg(dev->dev, "send dl_set frame ...\n");
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_SET;
+ frame->data.dl_set.option = (u16)hw->fw.fw_option;
+ frame->data.dl_set.img_cnt = (u8)hw->fw.fw_cnt;
+ dev_dbg(dev->dev, "%s: img_cnt = %d ...\n", __func__,
+ frame->data.dl_set.img_cnt);
+
+ frame->data.dl_set.payload[index++] = arcsem_frag->location;
+ frame->data.dl_set.payload[index++] = arcsem_frag->size;
+ if (acer_frag) {
+ frame->data.dl_set.payload[index++] = acer_frag->location;
+ frame->data.dl_set.payload[index++] = acer_frag->size;
+ if (acev_frag) {
+ frame->data.dl_set.payload[index++] =
+ acev_frag->location;
+ frame->data.dl_set.payload[index++] = acev_frag->size;
+ }
+ if (acec_frag) {
+ frame->data.dl_set.payload[index++] =
+ acec_frag->location;
+ frame->data.dl_set.payload[index++] = acec_frag->size;
+ }
+ }
+ frame->data.dl_set.payload[index++] = em7d_frag->location;
+ frame->data.dl_set.payload[index++] = em7d_frag->size;
+ frame->data.dl_set.payload[hw->fw.fw_cnt * 2] = sum_CRC(
+ frame, (int)offsetof(struct vsc_fw_master_frame,
+ data.dl_set.payload[hw->fw.fw_cnt * 2]));
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ /* load ARC-SEM FW image */
+ if (arcsem_frag) {
+ ret = load_fw_frag(dev, arcsem_frag, IMG_ARCSEM);
+ if (ret)
+ return ret;
+ }
+
+ /* load ACE FW image */
+ if (acer_frag) {
+ ret = load_fw_frag(dev, acer_frag, IMG_ACE_RUNTIME);
+ if (ret)
+ return ret;
+ }
+
+ if (acev_frag) {
+ ret = load_fw_frag(dev, acev_frag, IMG_ACE_VISION);
+ if (ret)
+ return ret;
+ }
+
+ if (acec_frag) {
+ ret = load_fw_frag(dev, acec_frag, IMG_ACE_CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ /* load EM7D FW image */
+ if (em7d_frag) {
+ ret = load_fw_frag(dev, em7d_frag, IMG_EM7D);
+ if (ret)
+ return ret;
+ }
+
+ /* load SKU Config */
+ if (skucnf_frag) {
+ ret = load_fw_frag(dev, skucnf_frag, IMG_SKU_CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ memset(frame, 0, sizeof(*frame));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_TOKEN_CAM_BOOT;
+ frame->data.boot.check_sum = sum_CRC(
+ frame, offsetof(struct vsc_fw_master_frame, data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ dev_err(dev->dev, "failed to boot fw, err : 0x%x\n", ret);
+
+ return ret;
+}
+
+static int init_hw(struct mei_device *dev)
+{
+ int ret;
+ const struct firmware *fw = NULL;
+ const struct firmware *sensor_fw = NULL;
+ const struct firmware *sku_cnf_fw = NULL;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ ret = check_silicon(dev);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev,
+ "%s: FW files. Firmware Boot File: %s, Sensor FW File: %s, Sku Config File: %s\n",
+ __func__, hw->fw.fw_file_name, hw->fw.sensor_file_name,
+ hw->fw.sku_cnf_file_name);
+ ret = request_firmware(&fw, hw->fw.fw_file_name, dev->dev);
+ if (ret < 0 || !fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.fw_file_name);
+ return ret;
+ }
+
+ ret = parse_main_fw(dev, fw);
+ if (ret || !fw) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.fw_file_name);
+ goto release;
+ }
+
+ if (hw->fw.fw_cnt < IMG_ARC_ACER_ACEV_ACECNF_EM7D) {
+ ret = request_firmware(&sensor_fw, hw->fw.sensor_file_name,
+ dev->dev);
+ if (ret < 0 || !sensor_fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.sensor_file_name);
+ goto release;
+ }
+ ret = parse_sensor_fw(dev, sensor_fw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.sensor_file_name);
+ goto release_sensor;
+ }
+ }
+
+ ret = request_firmware(&sku_cnf_fw, hw->fw.sku_cnf_file_name, dev->dev);
+ if (ret < 0 || !sku_cnf_fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.sku_cnf_file_name);
+ goto release_sensor;
+ }
+
+ ret = parse_sku_cnf_fw(dev, sku_cnf_fw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.sensor_file_name);
+ goto release_cnf;
+ }
+
+ ret = load_bootloader(dev);
+ if (ret)
+ goto release_cnf;
+
+ ret = load_fw(dev);
+ if (ret)
+ goto release_cnf;
+
+ return 0;
+
+release_cnf:
+ release_firmware(sku_cnf_fw);
+release_sensor:
+ release_firmware(sensor_fw);
+release:
+ release_firmware(fw);
+ return ret;
+}
+
+/**
+ * mei_vsc_fw_status - read fw status register from pci config space
+ *
+ * @dev: mei device
+ * @fw_status: fw status
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_vsc_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 0;
+ return 0;
+}
+
+/**
+ * mei_vsc_pg_state - translate internal pg state
+ * to the mei power gating state
+ *
+ * @dev: mei device
+ *
+ * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *dev)
+{
+ return MEI_PG_OFF;
+}
+
+/**
+ * mei_vsc_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_enable(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ enable_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_intr_disable - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_disable(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ disable_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_intr_clear - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_clear(struct mei_device *dev)
+{
+ ;
+}
+
+/**
+ * mei_vsc_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_synchronize_irq(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ synchronize_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
+ */
+static int mei_vsc_hw_config(struct mei_device *dev)
+{
+ return 0;
+}
+
+/**
+ * mei_vsc_host_set_ready - enable device
+ *
+ * @dev: mei device
+ */
+static void mei_vsc_host_set_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ hw->host_ready = true;
+}
+
+/**
+ * mei_vsc_host_is_ready - check whether the host has turned ready
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_vsc_host_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->host_ready;
+}
+
+/**
+ * mei_vsc_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_vsc_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->fw_ready;
+}
+
+/**
+ * mei_vsc_hw_start - hw start routine
+ *
+ * @dev: mei device
+ * Return: 0 on success, error otherwise
+ */
+#define MEI_SPI_START_TIMEOUT 200
+static int mei_vsc_hw_start(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ u8 buf;
+ int len;
+ int ret;
+ int timeout = MEI_SPI_START_TIMEOUT;
+
+ mei_vsc_host_set_ready(dev);
+ atomic_set(&hw->lock_cnt, 0);
+ mei_vsc_intr_enable(dev);
+
+ /* wait for FW ready */
+ while (timeout > 0) {
+ msleep(50);
+ timeout -= 50;
+ ret = mei_vsc_read_raw(hw, &buf, sizeof(buf), &len);
+ if (!ret && ret != -EAGAIN)
+ break;
+ }
+
+ if (timeout <= 0)
+ return -ENODEV;
+
+ dev_dbg(dev->dev, "hw is ready\n");
+ hw->fw_ready = true;
+ return 0;
+}
+
+/**
+ * mei_vsc_hbuf_is_ready - checks if host buf is empty.
+ *
+ * @dev: the device structure
+ *
+ * Return: true if empty, false - otherwise.
+ */
+static bool mei_vsc_hbuf_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->write_lock_cnt == 0;
+}
+
+/**
+ * mei_vsc_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * Return: empty slots count
+ */
+static int mei_vsc_hbuf_empty_slots(struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_hbuf_depth - returns depth of the hw buf.
+ *
+ * @dev: the device structure
+ *
+ * Return: size of hw buf in slots
+ */
+static u32 mei_vsc_hbuf_depth(const struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_write - writes a message to FW.
+ *
+ * @dev: the device structure
+ * @hdr: header of message
+ * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: payload length in bytes
+ *
+ * Return: 0 if success, < 0 - otherwise.
+ */
+static int mei_vsc_write(struct mei_device *dev, const void *hdr,
+ size_t hdr_len, const void *data, size_t data_len)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+ char *buf = hw->tx_buf;
+
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3 ||
+ data_len > MAX_SPI_MSG_SIZE)) {
+ dev_err(dev->dev,
+ "%s error write msg hdr_len %zu data_len %zu\n",
+ __func__, hdr_len, data_len);
+ return -EINVAL;
+ }
+
+ hw->write_lock_cnt++;
+ memcpy(buf, hdr, hdr_len);
+ memcpy(buf + hdr_len, data, data_len);
+ dev_dbg(dev->dev, "%s %d" MEI_HDR_FMT, __func__, hw->write_lock_cnt,
+ MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+
+ ret = mei_vsc_write_raw(hw, buf, hdr_len + data_len);
+ if (ret)
+ dev_err(dev->dev, MEI_HDR_FMT "hdr_len %zu data len %zu\n",
+ MEI_HDR_PRM((struct mei_msg_hdr *)hdr), hdr_len,
+ data_len);
+
+ hw->write_lock_cnt--;
+ return ret;
+}
+
+/**
+ * mei_vsc_read
+ * read spi message
+ *
+ * @dev: the device structure
+ *
+ * Return: mei hdr value (u32)
+ */
+static inline u32 mei_vsc_read(const struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ ret = mei_vsc_read_raw(hw, hw->rx_buf, sizeof(hw->rx_buf), &hw->rx_len);
+ if (ret || hw->rx_len < sizeof(u32))
+ return 0;
+
+ return *(u32 *)hw->rx_buf;
+}
+
+/**
+ * mei_vsc_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * Return: -EOVERFLOW if overflow, otherwise filled slots count
+ */
+static int mei_vsc_count_full_read_slots(struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buf: message buf will be written
+ * @len: message size will be read
+ *
+ * Return: always 0
+ */
+static int mei_vsc_read_slots(struct mei_device *dev, unsigned char *buf,
+ unsigned long len)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct mei_msg_hdr *hdr;
+
+ hdr = (struct mei_msg_hdr *)hw->rx_buf;
+ WARN_ON(len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len);
+ memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
+ return 0;
+}
+
+/**
+ * mei_vsc_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_vsc_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+/**
+ * mei_vsc_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * Return: true is pg supported, false otherwise
+ */
+static bool mei_vsc_pg_is_enabled(struct mei_device *dev)
+{
+ return false;
+}
+
+/**
+ * mei_vsc_hw_reset - resets fw.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_vsc_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ mei_vsc_intr_disable(dev);
+ ret = vsc_reset(dev);
+ if (ret)
+ return ret;
+
+ if (hw->disconnect)
+ return 0;
+
+ ret = init_hw(dev);
+ if (ret)
+ return -ENODEV;
+
+ hw->seq = 0;
+ return 0;
+}
+
+/**
+ * mei_vsc_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: irqreturn_t
+ */
+irqreturn_t mei_vsc_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *)dev_id;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_dbg(dev->dev, "interrupt top half lock_cnt %d state %d\n",
+ atomic_read(&hw->lock_cnt), dev->dev_state);
+
+ atomic_inc(&hw->lock_cnt);
+ wake_up(&hw->xfer_wait);
+ if (dev->dev_state == MEI_DEV_INITIALIZING ||
+ dev->dev_state == MEI_DEV_RESETTING)
+ return IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_vsc_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: irqreturn_t
+ *
+ */
+irqreturn_t mei_vsc_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *)dev_id;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct list_head cmpl_list;
+ s32 slots;
+ int rets = 0;
+
+ dev_dbg(dev->dev,
+ "function called after ISR to handle the interrupt processing dev->dev_state=%d.\n",
+ dev->dev_state);
+
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ INIT_LIST_HEAD(&cmpl_list);
+
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ dev_dbg(dev->dev, "slots to read = %08x\n", slots);
+
+reread:
+ while (spi_need_read(hw)) {
+ dev_dbg(dev->dev, "slots to read in = %08x\n", slots);
+ rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
+ /* There is a race between ME write and interrupt delivery:
+ * Not all data is always available immediately after the
+ * interrupt, so try to read again on the next interrupt.
+ */
+ if (rets == -ENODATA)
+ break;
+
+ if (rets && (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)) {
+ dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
+ rets);
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+ }
+ dev_dbg(dev->dev, "slots to read out = %08x\n", slots);
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ rets = mei_irq_write_handler(dev, &cmpl_list);
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ mei_irq_compl_handler(dev, &cmpl_list);
+
+ if (spi_need_read(hw))
+ goto reread;
+
+end:
+ dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+}
+
+static const struct mei_hw_ops mei_vsc_hw_ops = {
+
+ .fw_status = mei_vsc_fw_status,
+ .pg_state = mei_vsc_pg_state,
+
+ .host_is_ready = mei_vsc_host_is_ready,
+
+ .hw_is_ready = mei_vsc_hw_is_ready,
+ .hw_reset = mei_vsc_hw_reset,
+ .hw_config = mei_vsc_hw_config,
+ .hw_start = mei_vsc_hw_start,
+
+ .pg_in_transition = mei_vsc_pg_in_transition,
+ .pg_is_enabled = mei_vsc_pg_is_enabled,
+
+ .intr_clear = mei_vsc_intr_clear,
+ .intr_enable = mei_vsc_intr_enable,
+ .intr_disable = mei_vsc_intr_disable,
+ .synchronize_irq = mei_vsc_synchronize_irq,
+
+ .hbuf_free_slots = mei_vsc_hbuf_empty_slots,
+ .hbuf_is_ready = mei_vsc_hbuf_is_ready,
+ .hbuf_depth = mei_vsc_hbuf_depth,
+ .write = mei_vsc_write,
+
+ .rdbuf_full_slots = mei_vsc_count_full_read_slots,
+ .read_hdr = mei_vsc_read,
+ .read = mei_vsc_read_slots
+};
+
+/**
+ * mei_vsc_dev_init - allocates and initializes the mei device structure
+ *
+ * @parent: device associated with physical device (spi/platform)
+ *
+ * Return: The mei_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_vsc_dev_init(struct device *parent)
+{
+ struct mei_device *dev;
+ struct mei_vsc_hw *hw;
+
+ dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev, parent, &mei_vsc_hw_ops);
+ dev->fw_f_fw_ver_supported = 0;
+ dev->kind = 0;
+ return dev;
+}
diff --git a/drivers/misc/mei/hw-vsc.h b/drivers/misc/mei/hw-vsc.h
new file mode 100644
index 000000000000..228edb77fab3
--- /dev/null
+++ b/drivers/misc/mei/hw-vsc.h
@@ -0,0 +1,377 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+
+#ifndef _MEI_HW_SPI_H_
+#define _MEI_HW_SPI_H_
+
+#include <linux/irqreturn.h>
+#include <linux/spi/spi.h>
+#include <linux/mei.h>
+#include <linux/types.h>
+
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_cfg {
+ const struct mei_fw_status fw_status;
+ const char *kind;
+ u32 fw_ver_supported : 1;
+ u32 hw_trc_supported : 1;
+};
+
+enum FRAG_TYPE {
+ BOOT_IMAGE_TYPE,
+ ARC_SEM_IMG_TYPE,
+ EM7D_IMG_TYPE,
+ ACER_IMG_TYPE,
+ ACEV_IMG_TYPE,
+ ACEC_IMG_TYPE,
+ SKU_CONF_TYPE,
+ FRAGMENT_TYPE_MAX,
+};
+
+struct fragment {
+ enum FRAG_TYPE type;
+ u32 location;
+ const u8 *data;
+ u32 size;
+};
+
+irqreturn_t mei_vsc_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_vsc_irq_thread_handler(int irq, void *dev_id);
+struct mei_device *mei_vsc_dev_init(struct device *parent);
+
+#define VSC_MAGIC_NUM 0x49505343
+#define VSC_FILE_MAGIC 0x46564353
+#define VSC_FW_MAGIC 0x49574653
+#define VSC_ROM_SPI_PKG_SIZE 256
+#define FW_SPI_PKG_SIZE 512
+
+#define IMG_MAX_LOC (0x50FFFFFF)
+#define FW_MAX_SIZE (0x200000)
+#define SKU_CONFIG_LOC (0x5001A000)
+#define SKU_MAX_SIZE (4100)
+
+#define IMG_DMA_ENABLE_OPTION (1 << 0)
+
+#define SIG_SIZE 384
+#define PUBKEY_SIZE 384
+#define CSSHEADER_SIZE 128
+
+#define VSC_CMD_QUERY 0
+#define VSC_CMD_DL_SET 1
+#define VSC_CMD_DL_START 2
+#define VSC_CMD_DL_CONT 3
+#define VSC_CMD_DUMP_MEM 4
+#define VSC_CMD_SET_REG 5
+#define VSC_CMD_PRINT_ROM_VERSION 6
+#define VSC_CMD_WRITE_FLASH 7
+#define VSC_CMD_RESERVED 8
+
+enum IMAGE_TYPE {
+ IMG_DEBUG,
+ IMG_BOOTLOADER,
+ IMG_EM7D,
+ IMG_ARCSEM,
+ IMG_ACE_RUNTIME,
+ IMG_ACE_VISION,
+ IMG_ACE_CONFIG,
+ IMG_SKU_CONFIG
+};
+
+/*image count define, refer to Clover Fall Boot ROM HLD 1.0*/
+#define IMG_ACEV_ACECNF 2
+#define IMG_BOOT_ARC_EM7D 3
+#define IMG_BOOT_ARC_ACER_EM7D 4
+#define IMG_BOOT_ARC_ACER_ACEV_EM7D 5
+#define IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D 6
+#define IMG_ARC_ACER_ACEV_ACECNF_EM7D (IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D - 1)
+#define IMG_CNT_MAX IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D
+
+#define VSC_TOKEN_BOOTLOADER_REQ 1
+#define VSC_TOKEN_FIRMWARE_REQ 2
+#define VSC_TOKEN_DOWNLOAD_CONT 3
+#define VSC_TOKEN_DUMP_RESP 4
+#define VSC_TOKEN_DUMP_CONT 5
+#define VSC_TOKEN_SKU_CONFIG_REQ 6
+#define VSC_TOKEN_ERROR 7
+#define VSC_TOKEN_DUMMY 8
+#define VSC_TOKEN_CAM_STATUS_RESP 9
+#define VSC_TOKEN_CAM_BOOT 10
+
+#define MAX_SVN_VALUE (0xFFFFFFFE)
+
+#define EFUSE1_ADDR (0xE0030000 + 0x38)
+#define STRAP_ADDR (0xE0030000 + 0x100)
+
+#define SI_MAINSTEPPING_VERSION_OFFSET (4)
+#define SI_MAINSTEPPING_VERSION_MASK (0xF)
+#define SI_MAINSTEPPING_VERSION_A (0x0)
+#define SI_MAINSTEPPING_VERSION_B (0x1)
+#define SI_MAINSTEPPING_VERSION_C (0x2)
+
+#define SI_SUBSTEPPING_VERSION_OFFSET (0x0)
+#define SI_SUBSTEPPING_VERSION_MASK (0xF)
+#define SI_SUBSTEPPING_VERSION_0 (0x0)
+#define SI_SUBSTEPPING_VERSION_0_PRIME (0x1)
+#define SI_SUBSTEPPING_VERSION_1 (0x2)
+#define SI_SUBSTEPPING_VERSION_1_PRIME (0x3)
+
+#define SI_STRAP_KEY_SRC_OFFSET (16)
+#define SI_STRAP_KEY_SRC_MASK (0x1)
+
+#define SI_STRAP_KEY_SRC_DEBUG (0x0)
+#define SI_STRAP_KEY_SRC_PRODUCT (0x1)
+
+struct vsc_rom_master_frame {
+ u32 magic;
+ u8 cmd;
+ union {
+ struct {
+ u8 img_type;
+ u16 option;
+ u32 img_len;
+ u32 img_loc;
+ u32 crc;
+ u8 res[0];
+ } __packed dl_start;
+ struct {
+ u8 option;
+ u16 img_cnt;
+ u32 payload[(VSC_ROM_SPI_PKG_SIZE - 8) / 4];
+ } __packed dl_set;
+ struct {
+ u8 end_flag;
+ u16 len;
+ u8 payload[VSC_ROM_SPI_PKG_SIZE - 8];
+ } __packed dl_cont;
+ struct {
+ u8 res;
+ u16 len;
+ u32 addr;
+#define ROM_DUMP_MEM_RESERVE_SIZE 12
+ u8 payload[VSC_ROM_SPI_PKG_SIZE -
+ ROM_DUMP_MEM_RESERVE_SIZE];
+ } __packed dump_mem;
+ struct {
+ u8 res[3];
+ u32 addr;
+ u32 val;
+#define ROM_SET_REG_RESERVE_SIZE 16
+ u8 payload[VSC_ROM_SPI_PKG_SIZE -
+ ROM_SET_REG_RESERVE_SIZE];
+ } __packed set_reg;
+ struct {
+ u8 ins[0];
+ } __packed undoc_f1;
+ struct {
+ u32 addr;
+ u32 len;
+ u8 payload[0];
+ } __packed os_dump_mem;
+ u8 reserve[VSC_ROM_SPI_PKG_SIZE - 5];
+ } data;
+} __packed;
+
+struct vsc_fw_master_frame {
+ u32 magic;
+ u8 cmd;
+ union {
+ struct {
+ u16 option;
+ u8 img_type;
+ u32 img_len;
+ u32 img_loc;
+ u32 crc;
+ u8 res[0];
+ } __packed dl_start;
+ struct {
+ u16 option;
+ u8 img_cnt;
+ u32 payload[(FW_SPI_PKG_SIZE - 8) / 4];
+ } __packed dl_set;
+ struct {
+ u8 end_flag;
+ u16 len;
+ u8 payload[FW_SPI_PKG_SIZE - 8];
+ } __packed dl_cont;
+ struct {
+ u32 addr;
+ u8 len;
+ u8 payload[0];
+ } __packed dump_mem;
+ struct {
+ u32 addr;
+ u32 val;
+ u8 payload[0];
+ } __packed set_reg;
+ struct {
+ u8 ins[0];
+ } __packed undoc_f1;
+ struct {
+ u32 addr;
+ u32 len;
+ u8 payload[0];
+ } __packed os_dump_mem;
+ struct {
+ u8 resv[3];
+ u32 check_sum;
+#define LOADER_BOOT_RESERVE_SIZE 12
+ u8 payload[FW_SPI_PKG_SIZE - LOADER_BOOT_RESERVE_SIZE];
+ } __packed boot;
+ u8 reserve[FW_SPI_PKG_SIZE - 5];
+ } data;
+} __packed;
+
+struct vsc_master_frame_fw_cont {
+ u8 payload[FW_SPI_PKG_SIZE];
+} __packed;
+
+struct vsc_rom_slave_token {
+ u32 magic;
+ u8 token;
+ u8 type;
+ u8 res[2];
+ u8 payload[VSC_ROM_SPI_PKG_SIZE - 8];
+} __packed;
+
+struct vsc_bol_slave_token {
+ u32 magic;
+ u8 token;
+ u8 type;
+ u8 res[2];
+ u8 payload[FW_SPI_PKG_SIZE - 8];
+} __packed;
+
+struct vsc_boot_img {
+ u32 magic;
+ u32 option;
+ u32 image_count;
+ u32 image_loc[IMG_CNT_MAX];
+} __packed;
+
+struct vsc_sensor_img_t {
+ u32 magic;
+ u32 option;
+ u32 image_count;
+ u32 image_loc[IMG_ACEV_ACECNF];
+} __packed;
+
+struct bootloader_sign {
+ u32 magic;
+ u32 image_size;
+ u8 image[0];
+} __packed;
+
+struct manifest {
+ u32 svn;
+ u32 header_ver;
+ u32 comp_flags;
+ u32 comp_name;
+ u32 comp_vendor_name;
+ u32 module_size;
+ u32 module_addr;
+} __packed;
+
+struct firmware_sign {
+ u32 magic;
+ u32 image_size;
+ u8 image[1];
+} __packed;
+
+/* spi transport layer */
+#define PACKET_SYNC 0x31
+#define MAX_SPI_MSG_SIZE 2048
+#define MAX_MEI_MSG_SIZE 512
+
+#define CRC_SIZE sizeof(u32)
+#define PACKET_SIZE(pkt) (sizeof(pkt->hdr) + (pkt->hdr.len) + (CRC_SIZE))
+#define MAX_PACKET_SIZE \
+ (sizeof(struct spi_xfer_hdr) + MAX_SPI_MSG_SIZE + (CRC_SIZE))
+
+/* SPI xfer timeout size definition */
+#define XFER_TIMEOUT_BYTES 700
+#define MAX_XFER_BUFFER_SIZE ((MAX_PACKET_SIZE) + (XFER_TIMEOUT_BYTES))
+
+struct spi_xfer_hdr {
+ u8 sync;
+ u8 cmd;
+ u16 len;
+ u32 seq;
+} __packed;
+
+struct spi_xfer_packet {
+ struct spi_xfer_hdr hdr;
+ u8 buf[MAX_XFER_BUFFER_SIZE - sizeof(struct spi_xfer_hdr)];
+} __packed;
+
+#define CMD_SPI_WRITE 0x01
+#define CMD_SPI_READ 0x02
+#define CMD_SPI_RESET_NOTIFY 0x04
+
+#define CMD_SPI_ACK 0x10
+#define CMD_SPI_NACK 0x11
+#define CMD_SPI_BUSY 0x12
+#define CMD_SPI_FATAL_ERR 0x13
+
+struct host_timestamp {
+ u64 realtime;
+ u64 boottime;
+} __packed;
+
+struct vsc_boot_fw {
+ u32 main_ver;
+ u32 sub_ver;
+ u32 key_src;
+ u32 svn;
+
+ u8 tx_buf[FW_SPI_PKG_SIZE];
+ u8 rx_buf[FW_SPI_PKG_SIZE];
+
+ /* FirmwareBootFile */
+ char *fw_file_name;
+ /* PkgBootFile */
+ char *sensor_file_name;
+ /* SkuConfigBootFile */
+ char *sku_cnf_file_name;
+
+ u32 fw_option;
+ u32 fw_cnt;
+ struct fragment frags[FRAGMENT_TYPE_MAX];
+};
+
+struct mei_vsc_hw {
+ struct spi_device *spi;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ u8 rx_buf[MAX_SPI_MSG_SIZE];
+ u8 tx_buf[MAX_SPI_MSG_SIZE];
+ u32 rx_len;
+
+ int wakeuphostint;
+ struct gpio_desc *wakeuphost;
+ struct gpio_desc *resetfw;
+ struct gpio_desc *wakeupfw;
+
+ struct vsc_boot_fw fw;
+ bool host_ready;
+ bool fw_ready;
+
+ /* mei transport layer */
+ u32 seq;
+ u8 tx_buf1[MAX_XFER_BUFFER_SIZE];
+ u8 rx_buf1[MAX_XFER_BUFFER_SIZE];
+
+ struct mutex mutex;
+ bool disconnect;
+ atomic_t lock_cnt;
+ int write_lock_cnt;
+ wait_queue_head_t xfer_wait;
+};
+
+#define to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
+
+#endif
diff --git a/drivers/misc/mei/spi-vsc.c b/drivers/misc/mei/spi-vsc.c
new file mode 100644
index 000000000000..7a8c1e6d3eca
--- /dev/null
+++ b/drivers/misc/mei/spi-vsc.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mei.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#include "client.h"
+#include "hw-vsc.h"
+#include "mei_dev.h"
+
+/* gpio resources*/
+static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
+static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
+static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
+static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
+static const struct acpi_gpio_mapping mei_vsc_acpi_gpios[] = {
+ { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
+ { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
+ { "resetfw-gpios", &resetfw_gpio, 1 },
+ { "wakeupfw-gpios", &wakeupfw, 1 },
+ {}
+};
+
+static int mei_vsc_probe(struct spi_device *spi)
+{
+ struct mei_vsc_hw *hw;
+ struct mei_device *dev;
+ int ret;
+
+ dev = mei_vsc_dev_init(&spi->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ hw = to_vsc_hw(dev);
+ mutex_init(&hw->mutex);
+ init_waitqueue_head(&hw->xfer_wait);
+ hw->spi = spi;
+ spi_set_drvdata(spi, dev);
+
+ ret = devm_acpi_dev_add_driver_gpios(&spi->dev, mei_vsc_acpi_gpios);
+ if (ret) {
+ dev_err(&spi->dev, "%s: fail to add gpio\n", __func__);
+ return -EBUSY;
+ }
+
+ hw->wakeuphost = devm_gpiod_get(&spi->dev, "wakeuphost", GPIOD_IN);
+ if (IS_ERR(hw->wakeuphost)) {
+ dev_err(&spi->dev, "gpio get irq failed\n");
+ return -EINVAL;
+ }
+ hw->resetfw = devm_gpiod_get(&spi->dev, "resetfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->resetfw)) {
+ dev_err(&spi->dev, "gpio get resetfw failed\n");
+ return -EINVAL;
+ }
+ hw->wakeupfw = devm_gpiod_get(&spi->dev, "wakeupfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->wakeupfw)) {
+ dev_err(&spi->dev, "gpio get wakeupfw failed\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&spi->dev),
+ "wakeuphostint-gpios", 0);
+ if (ret < 0)
+ return ret;
+
+ hw->wakeuphostint = ret;
+ irq_set_status_flags(hw->wakeuphostint, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(hw->wakeuphostint, mei_vsc_irq_quick_handler,
+ mei_vsc_irq_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ KBUILD_MODNAME, dev);
+ if (mei_start(dev)) {
+ dev_err(&spi->dev, "init hw failure.\n");
+ ret = -ENODEV;
+ goto release_irq;
+ }
+
+ ret = mei_register(dev, &spi->dev);
+ if (ret)
+ goto stop;
+
+ pm_runtime_enable(dev->dev);
+ dev_dbg(&spi->dev, "initialization successful.\n");
+ return 0;
+
+stop:
+ mei_stop(dev);
+release_irq:
+ mei_cancel_work(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ return ret;
+}
+
+static int __maybe_unused mei_vsc_suspend(struct device *device)
+{
+ struct spi_device *spi = to_spi_device(device);
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ if (!dev)
+ return -ENODEV;
+
+ dev_dbg(dev->dev, "%s\n", __func__);
+
+ hw->disconnect = true;
+ mei_stop(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ return 0;
+}
+
+static int __maybe_unused mei_vsc_resume(struct device *device)
+{
+ struct spi_device *spi = to_spi_device(device);
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ dev_dbg(dev->dev, "%s\n", __func__);
+ irq_set_status_flags(hw->wakeuphostint, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(hw->wakeuphostint, mei_vsc_irq_quick_handler,
+ mei_vsc_irq_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "request_threaded_irq failed: irq = %d.\n",
+ hw->wakeuphostint);
+ return ret;
+ }
+
+ hw->disconnect = false;
+ ret = mei_restart(dev);
+ if (ret)
+ return ret;
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+ return 0;
+}
+
+static int mei_vsc_remove(struct spi_device *spi)
+{
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_info(&spi->dev, "%s %d", __func__, hw->wakeuphostint);
+
+ pm_runtime_disable(dev->dev);
+ hw->disconnect = true;
+ mei_stop(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ mei_deregister(dev);
+ mutex_destroy(&hw->mutex);
+ return 0;
+}
+
+/**
+ * mei_vsc_shutdown - Device Removal Routine
+ *
+ * @spi: SPI device structure
+ *
+ * mei_vsc_shutdown is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void mei_vsc_shutdown(struct spi_device *spi)
+{
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_dbg(dev->dev, "shutdown\n");
+ hw->disconnect = true;
+ mei_stop(dev);
+
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+}
+
+static const struct dev_pm_ops mei_vsc_pm_ops = {
+
+ SET_SYSTEM_SLEEP_PM_OPS(mei_vsc_suspend, mei_vsc_resume)
+};
+
+static const struct acpi_device_id mei_vsc_acpi_ids[] = {
+ { "INTC1058", 1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mei_vsc_acpi_ids);
+
+static struct spi_driver mei_vsc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .acpi_match_table = ACPI_PTR(mei_vsc_acpi_ids),
+ .pm = &mei_vsc_pm_ops,
+ },
+ .probe = mei_vsc_probe,
+ .remove = mei_vsc_remove,
+ .shutdown = mei_vsc_shutdown,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+module_spi_driver(mei_vsc_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye at intel.com>");
+MODULE_DESCRIPTION("Intel MEI VSC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e71a4c514f7b..73d870b426b6 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -413,6 +413,16 @@ config SPI_JCORE
This enables support for the SPI master controller in the J-Core
synthesizable, open source SoC.
+config SPI_LJCA
+ tristate "INTEL La Jolla Cove Adapter SPI support"
+ depends on MFD_LJCA
+ help
+ Select this option to enable SPI driver for the INTEL
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-ljca.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 13e54c45e9df..382dbb15716b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c
new file mode 100644
index 000000000000..0ff3eea40687
--- /dev/null
+++ b/drivers/spi/spi-ljca.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-SPI driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI commands */
+enum ljca_spi_cmd {
+ LJCA_SPI_INIT = 1,
+ LJCA_SPI_READ,
+ LJCA_SPI_WRITE,
+ LJCA_SPI_WRITEREAD,
+ LJCA_SPI_DEINIT,
+};
+
+#define LJCA_SPI_BUS_MAX_HZ 48000000
+enum {
+ LJCA_SPI_BUS_SPEED_24M,
+ LJCA_SPI_BUS_SPEED_12M,
+ LJCA_SPI_BUS_SPEED_8M,
+ LJCA_SPI_BUS_SPEED_6M,
+ LJCA_SPI_BUS_SPEED_4_8M, /*4.8MHz*/
+ LJCA_SPI_BUS_SPEED_MIN = LJCA_SPI_BUS_SPEED_4_8M,
+};
+
+enum {
+ LJCA_SPI_CLOCK_LOW_POLARITY,
+ LJCA_SPI_CLOCK_HIGH_POLARITY,
+};
+
+enum {
+ LJCA_SPI_CLOCK_FIRST_PHASE,
+ LJCA_SPI_CLOCK_SECOND_PHASE,
+};
+
+#define LJCA_SPI_BUF_SIZE 60
+#define LJCA_SPI_MAX_XFER_SIZE \
+ (LJCA_SPI_BUF_SIZE - sizeof(struct spi_xfer_packet))
+union spi_clock_mode {
+ struct {
+ u8 polarity : 1;
+ u8 phrase : 1;
+ u8 reserved : 6;
+ } u;
+
+ u8 mode;
+} __packed;
+
+struct spi_init_packet {
+ u8 index;
+ u8 speed;
+ union spi_clock_mode mode;
+} __packed;
+
+struct spi_xfer_indicator {
+ u8 id : 6;
+ u8 cmpl : 1;
+ u8 index : 1;
+};
+
+struct spi_xfer_packet {
+ struct spi_xfer_indicator indicator;
+ s8 len;
+ u8 data[];
+} __packed;
+
+struct ljca_spi_dev {
+ struct platform_device *pdev;
+ struct ljca_spi_info *ctr_info;
+ struct spi_master *master;
+ u8 speed;
+ u8 mode;
+
+ u8 obuf[LJCA_SPI_BUF_SIZE];
+ u8 ibuf[LJCA_SPI_BUF_SIZE];
+};
+
+static int ljca_spi_read_write(struct ljca_spi_dev *ljca_spi, const u8 *w_data,
+ u8 *r_data, int len, int id, int complete,
+ int cmd)
+{
+ struct spi_xfer_packet *w_packet =
+ (struct spi_xfer_packet *)ljca_spi->obuf;
+ struct spi_xfer_packet *r_packet =
+ (struct spi_xfer_packet *)ljca_spi->ibuf;
+ int ret;
+ int ibuf_len;
+
+ w_packet->indicator.index = ljca_spi->ctr_info->id;
+ w_packet->indicator.id = id;
+ w_packet->indicator.cmpl = complete;
+
+ if (cmd == LJCA_SPI_READ) {
+ w_packet->len = sizeof(u16);
+ *(u16 *)&w_packet->data[0] = len;
+ } else {
+ w_packet->len = len;
+ memcpy(w_packet->data, w_data, len);
+ }
+
+ ret = ljca_transfer(ljca_spi->pdev, cmd, w_packet,
+ sizeof(*w_packet) + w_packet->len, r_packet,
+ &ibuf_len);
+ if (ret)
+ return ret;
+
+ if (ibuf_len < sizeof(*r_packet) || r_packet->len <= 0) {
+ dev_err(&ljca_spi->pdev->dev, "receive patcket error len %d\n",
+ r_packet->len);
+ return -EIO;
+ }
+
+ if (r_data)
+ memcpy(r_data, r_packet->data, r_packet->len);
+
+ return 0;
+}
+
+static int ljca_spi_init(struct ljca_spi_dev *ljca_spi, int div, int mode)
+{
+ struct spi_init_packet w_packet = { 0 };
+ int ret;
+
+ if (ljca_spi->mode == mode && ljca_spi->speed == div)
+ return 0;
+
+ if (mode & SPI_CPOL)
+ w_packet.mode.u.polarity = LJCA_SPI_CLOCK_HIGH_POLARITY;
+ else
+ w_packet.mode.u.polarity = LJCA_SPI_CLOCK_LOW_POLARITY;
+
+ if (mode & SPI_CPHA)
+ w_packet.mode.u.phrase = LJCA_SPI_CLOCK_SECOND_PHASE;
+ else
+ w_packet.mode.u.phrase = LJCA_SPI_CLOCK_FIRST_PHASE;
+
+ w_packet.index = ljca_spi->ctr_info->id;
+ w_packet.speed = div;
+ ret = ljca_transfer(ljca_spi->pdev, LJCA_SPI_INIT, &w_packet,
+ sizeof(w_packet), NULL, NULL);
+ if (ret)
+ return ret;
+
+ ljca_spi->mode = mode;
+ ljca_spi->speed = div;
+ return 0;
+}
+
+static int ljca_spi_deinit(struct ljca_spi_dev *ljca_spi)
+{
+ struct spi_init_packet w_packet = { 0 };
+
+ w_packet.index = ljca_spi->ctr_info->id;
+ return ljca_transfer(ljca_spi->pdev, LJCA_SPI_DEINIT, &w_packet,
+ sizeof(w_packet), NULL, NULL);
+}
+
+static int ljca_spi_transfer(struct ljca_spi_dev *ljca_spi, const u8 *tx_data,
+ u8 *rx_data, u16 len)
+{
+ int ret;
+ int remaining = len;
+ int offset = 0;
+ int cur_len;
+ int complete = 0;
+ int i;
+
+ for (i = 0; remaining > 0;
+ offset += cur_len, remaining -= cur_len, i++) {
+ dev_dbg(&ljca_spi->pdev->dev,
+ "fragment %d offset %d remaining %d ret %d\n", i,
+ offset, remaining, ret);
+
+ if (remaining > LJCA_SPI_MAX_XFER_SIZE) {
+ cur_len = LJCA_SPI_MAX_XFER_SIZE;
+ } else {
+ cur_len = remaining;
+ complete = 1;
+ }
+
+ if (tx_data && rx_data)
+ ret = ljca_spi_read_write(ljca_spi, tx_data + offset,
+ rx_data + offset, cur_len, i,
+ complete, LJCA_SPI_WRITEREAD);
+ else if (tx_data)
+ ret = ljca_spi_read_write(ljca_spi, tx_data + offset,
+ NULL, cur_len, i, complete,
+ LJCA_SPI_WRITE);
+ else if (rx_data)
+ ret = ljca_spi_read_write(ljca_spi, NULL,
+ rx_data + offset, cur_len, i,
+ complete, LJCA_SPI_READ);
+ else
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ljca_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+
+ dev_dbg(&ljca_spi->pdev->dev, "cs %d\n", spi->chip_select);
+ return 0;
+}
+
+static int ljca_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+ int ret;
+ int div;
+
+ div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz) / 2 - 1;
+ if (div > LJCA_SPI_BUS_SPEED_MIN)
+ div = LJCA_SPI_BUS_SPEED_MIN;
+
+ ret = ljca_spi_init(ljca_spi, div, spi->mode);
+ if (ret < 0) {
+ dev_err(&ljca_spi->pdev->dev,
+ "cannot initialize transfer ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ljca_spi_transfer(ljca_spi, xfer->tx_buf, xfer->rx_buf,
+ xfer->len);
+ if (ret < 0)
+ dev_err(&ljca_spi->pdev->dev, "ljca spi transfer failed!\n");
+
+ return ret;
+}
+
+static int ljca_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ljca_spi_dev *ljca_spi;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*ljca_spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+ ljca_spi = spi_master_get_devdata(master);
+
+ ljca_spi->ctr_info = &pdata->spi_info;
+ ljca_spi->master = master;
+ ljca_spi->master->dev.of_node = pdev->dev.of_node;
+ ljca_spi->pdev = pdev;
+
+ ACPI_COMPANION_SET(&ljca_spi->master->dev, ACPI_COMPANION(&pdev->dev));
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->prepare_message = ljca_spi_prepare_message;
+ master->transfer_one = ljca_spi_transfer_one;
+ master->auto_runtime_pm = false;
+ master->max_speed_hz = LJCA_SPI_BUS_MAX_HZ;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto exit_free_master;
+ }
+
+ return ret;
+
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ljca_spi_dev_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+
+ ljca_spi_deinit(ljca_spi);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ljca_spi_dev_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int ljca_spi_dev_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ljca_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(ljca_spi_dev_suspend, ljca_spi_dev_resume)
+};
+
+static struct platform_driver spi_ljca_driver = {
+ .driver = {
+ .name = "ljca-spi",
+ .pm = &ljca_spi_pm,
+ },
+ .probe = ljca_spi_probe,
+ .remove = ljca_spi_dev_remove,
+};
+
+module_platform_driver(spi_ljca_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye at intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-spi");
diff --git a/include/linux/mfd/ljca.h b/include/linux/mfd/ljca.h
new file mode 100644
index 000000000000..2c19f2de58ce
--- /dev/null
+++ b/include/linux/mfd/ljca.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_USB_LJCA_H
+#define __LINUX_USB_LJCA_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define MAX_GPIO_NUM 64
+
+struct ljca_gpio_info {
+ int num;
+ DECLARE_BITMAP(valid_pin_map, MAX_GPIO_NUM);
+};
+
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+struct ljca_platform_data {
+ int type;
+ union {
+ struct ljca_gpio_info gpio_info;
+ struct ljca_i2c_info i2c_info;
+ struct ljca_spi_info spi_info;
+ };
+};
+
+typedef void (*ljca_event_cb_t)(struct platform_device *pdev, u8 cmd,
+ const void *evt_data, int len);
+
+int ljca_register_event_cb(struct platform_device *pdev,
+ ljca_event_cb_t event_cb);
+void ljca_unregister_event_cb(struct platform_device *pdev);
+int ljca_transfer(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len);
+int ljca_transfer_noack(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len);
+
+#endif
diff --git a/include/linux/vsc.h b/include/linux/vsc.h
new file mode 100644
index 000000000000..0ce8ccb0bc52
--- /dev/null
+++ b/include/linux/vsc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_VSC_H_
+#define _LINUX_VSC_H_
+
+#include <linux/types.h>
+
+/**
+ * @brief VSC camera ownership definition
+ */
+enum vsc_camera_owner {
+ VSC_CAMERA_NONE = 0,
+ VSC_CAMERA_CVF,
+ VSC_CAMERA_IPU,
+};
+
+/**
+ * @brief VSC privacy status definition
+ */
+enum vsc_privacy_status {
+ VSC_PRIVACY_ON = 0,
+ VSC_PRIVACY_OFF,
+};
+
+/**
+ * @brief VSC MIPI configuration definition
+ */
+struct vsc_mipi_config {
+ uint32_t freq;
+ uint32_t lane_num;
+};
+
+/**
+ * @brief VSC camera status definition
+ */
+struct vsc_camera_status {
+ enum vsc_camera_owner owner;
+ enum vsc_privacy_status status;
+ uint32_t exposure_level;
+};
+
+/**
+ * @brief VSC privacy callback type definition
+ *
+ * @param context Privacy callback handle
+ * @param status Current privacy status
+ */
+typedef void (*vsc_privacy_callback_t)(void *handle,
+ enum vsc_privacy_status status);
+
+/**
+ * @brief Acquire camera sensor ownership to IPU
+ *
+ * @param config[IN] The pointer of MIPI configuration going to set
+ * @param callback[IN] The pointer of privacy callback function
+ * @param handle[IN] Privacy callback function runtime handle from IPU driver
+ * @param status[OUT] The pointer of camera status after the acquire
+ *
+ * @retval 0 If success
+ * @retval -EIO IO error
+ * @retval -EINVAL Invalid argument
+ * @retval negative values for other errors
+ */
+int vsc_acquire_camera_sensor(struct vsc_mipi_config *config,
+ vsc_privacy_callback_t callback,
+ void *handle,
+ struct vsc_camera_status *status);
+
+/**
+ * @brief Release camera sensor ownership
+ *
+ * @param status[OUT] Camera status after the release
+ *
+ * @retval 0 If success
+ * @retval -EIO IO error
+ * @retval -EINVAL Invalid argument
+ * @retval negative values for other errors
+ */
+int vsc_release_camera_sensor(struct vsc_camera_status *status);
+
+#endif
--
2.33.1
More information about the kernel-team
mailing list