mirror of
https://github.com/armbian/build.git
synced 2025-09-19 04:31:38 +02:00
7324 lines
195 KiB
Diff
7324 lines
195 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Patrick Yavitz <pyavitz@armbian.com>
|
|
Date: Fri, 21 Jun 2024 11:54:06 -0400
|
|
Subject: add spacemit patch set
|
|
|
|
source: https://gitee.com/bianbu-linux/linux-6.1
|
|
|
|
Signed-off-by: Patrick Yavitz <pyavitz@armbian.com>
|
|
---
|
|
drivers/usb/dwc2/hcd.c | 4 +-
|
|
drivers/usb/dwc2/params.c | 21 +-
|
|
drivers/usb/dwc2/platform.c | 2 +-
|
|
drivers/usb/dwc3/Kconfig | 7 +
|
|
drivers/usb/dwc3/Makefile | 1 +
|
|
drivers/usb/dwc3/core.c | 11 +
|
|
drivers/usb/dwc3/dwc3-spacemit.c | 351 ++
|
|
drivers/usb/gadget/function/f_tcm.c | 583 +-
|
|
drivers/usb/gadget/function/tcm.h | 19 +-
|
|
drivers/usb/gadget/udc/Kconfig | 7 +
|
|
drivers/usb/gadget/udc/Makefile | 1 +
|
|
drivers/usb/gadget/udc/k1x_ci_udc.h | 351 ++
|
|
drivers/usb/gadget/udc/k1x_udc_core.c | 2690 ++++++++++
|
|
drivers/usb/host/Kconfig | 6 +
|
|
drivers/usb/host/ehci-hcd.c | 7 +
|
|
drivers/usb/host/ehci-k1x-ci.c | 497 ++
|
|
drivers/usb/host/xhci-hub.c | 9 +
|
|
17 files changed, 4413 insertions(+), 154 deletions(-)
|
|
|
|
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc2/hcd.c
|
|
+++ b/drivers/usb/dwc2/hcd.c
|
|
@@ -5146,9 +5146,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
|
|
|
|
/* Set device flags indicating whether the HCD supports DMA */
|
|
if (hsotg->params.host_dma) {
|
|
- if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
|
|
+ if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(40)) < 0)
|
|
dev_warn(hsotg->dev, "can't set DMA mask\n");
|
|
- if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
|
|
+ if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(40)) < 0)
|
|
dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
|
|
}
|
|
|
|
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc2/params.c
|
|
+++ b/drivers/usb/dwc2/params.c
|
|
@@ -230,6 +230,23 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
|
|
p->hird_threshold_en = false;
|
|
}
|
|
|
|
+static void dwc2_set_spacemit_params(struct dwc2_hsotg *hsotg)
|
|
+{
|
|
+ struct dwc2_core_params *p = &hsotg->params;
|
|
+
|
|
+ p->otg_caps.hnp_support = false;
|
|
+ p->otg_caps.srp_support = false;
|
|
+ p->speed = DWC2_SPEED_PARAM_HIGH;
|
|
+ p->host_rx_fifo_size = 790;
|
|
+ p->host_nperio_tx_fifo_size = 384;
|
|
+ p->host_perio_tx_fifo_size = 768;
|
|
+ p->phy_type = DWC2_PHY_TYPE_PARAM_ULPI;
|
|
+ //p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
|
|
+ //p->phy_utmi_width = 16;
|
|
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
|
|
+ GAHBCFG_HBSTLEN_SHIFT;
|
|
+}
|
|
+
|
|
const struct of_device_id dwc2_of_match_table[] = {
|
|
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
|
|
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
|
|
@@ -267,6 +284,8 @@ const struct of_device_id dwc2_of_match_table[] = {
|
|
.data = dwc2_set_stm32mp15_hsotg_params },
|
|
{ .compatible = "intel,socfpga-agilex-hsotg",
|
|
.data = dwc2_set_socfpga_agilex_params },
|
|
+ { .compatible = "spacemit,k1-pro-usb",
|
|
+ .data = dwc2_set_spacemit_params },
|
|
{},
|
|
};
|
|
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
|
|
@@ -559,7 +578,7 @@ static void dwc2_check_param_phy_type(struct dwc2_hsotg *hsotg)
|
|
valid = 1;
|
|
break;
|
|
case DWC2_PHY_TYPE_PARAM_ULPI:
|
|
- if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) ||
|
|
+ if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI) ||
|
|
(hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
|
|
valid = 1;
|
|
break;
|
|
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc2/platform.c
|
|
+++ b/drivers/usb/dwc2/platform.c
|
|
@@ -419,7 +419,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
|
|
*/
|
|
if (!dev->dev.dma_mask)
|
|
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
|
|
- retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
|
|
+ retval = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(40));
|
|
if (retval) {
|
|
dev_err(&dev->dev, "can't set coherent DMA mask: %d\n", retval);
|
|
return retval;
|
|
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc3/Kconfig
|
|
+++ b/drivers/usb/dwc3/Kconfig
|
|
@@ -168,4 +168,11 @@ config USB_DWC3_AM62
|
|
The Designware Core USB3 IP is programmed to operate in
|
|
in USB 2.0 mode only.
|
|
Say 'Y' or 'M' here if you have one such device
|
|
+
|
|
+config USB_DWC3_SPACEMIT
|
|
+ tristate "Spacemit Platforms"
|
|
+ default USB_DWC3
|
|
+ help
|
|
+ Support SPACEMIT platforms with DesignWare Core USB3 IP.
|
|
+ Say 'Y' or 'M' here if you have one such device
|
|
endif
|
|
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc3/Makefile
|
|
+++ b/drivers/usb/dwc3/Makefile
|
|
@@ -54,3 +54,4 @@ obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
|
|
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
|
|
obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
|
|
obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o
|
|
+obj-$(CONFIG_USB_DWC3_SPACEMIT) += dwc3-spacemit.o
|
|
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/dwc3/core.c
|
|
+++ b/drivers/usb/dwc3/core.c
|
|
@@ -1936,6 +1936,17 @@ static int dwc3_probe(struct platform_device *pdev)
|
|
ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
|
|
if (ret)
|
|
goto disable_clks;
|
|
+ }else if(!dwc->sysdev_is_parent &&
|
|
+ IS_ENABLED(CONFIG_SOC_SPACEMIT_K1PRO)) {
|
|
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(40));
|
|
+ if (ret)
|
|
+ goto disable_clks;
|
|
+ }
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_SOC_SPACEMIT_K1X)) {
|
|
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(32));
|
|
+ if (ret)
|
|
+ goto disable_clks;
|
|
}
|
|
|
|
spin_lock_init(&dwc->lock);
|
|
diff --git a/drivers/usb/dwc3/dwc3-spacemit.c b/drivers/usb/dwc3/dwc3-spacemit.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/dwc3/dwc3-spacemit.c
|
|
@@ -0,0 +1,351 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * dwc3-spacemit.c - Spacemit DWC3 Specific Glue layer
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Co., Ltd.
|
|
+ *
|
|
+ * Author: Wilson <long.wan@spacemit.com>
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/usb/phy.h>
|
|
+#include <linux/phy/phy.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_platform.h>
|
|
+#include <linux/reset.h>
|
|
+#include <linux/of_address.h>
|
|
+
|
|
+#define DWC3_SPACEMIT_MAX_CLOCKS 4
|
|
+
|
|
+struct dwc3_spacemit_driverdata {
|
|
+ const char *clk_names[DWC3_SPACEMIT_MAX_CLOCKS];
|
|
+ int num_clks;
|
|
+ int suspend_clk_idx;
|
|
+ bool need_notify_disconnect;
|
|
+};
|
|
+
|
|
+struct dwc3_spacemit {
|
|
+ struct device *dev;
|
|
+ struct reset_control *resets;
|
|
+
|
|
+ const char **clk_names;
|
|
+ struct clk *clks[DWC3_SPACEMIT_MAX_CLOCKS];
|
|
+ int num_clks;
|
|
+ int suspend_clk_idx;
|
|
+ bool reset_on_resume;
|
|
+
|
|
+ struct usb_phy *usb2_phy;
|
|
+ struct usb_phy *usb3_phy;
|
|
+ struct phy *usb2_generic_phy;
|
|
+ struct phy *usb3_generic_phy;
|
|
+
|
|
+ bool need_notify_disconnect;
|
|
+};
|
|
+
|
|
+void dwc3_spacemit_clear_disconnect(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev;
|
|
+ struct dwc3_spacemit *spacemit;
|
|
+ dev_dbg(dev, "%s: clear disconnect\n", __func__);
|
|
+ if (!dev)
|
|
+ return;
|
|
+ pdev = to_platform_device(dev);
|
|
+ if (IS_ERR_OR_NULL(pdev))
|
|
+ return;
|
|
+ spacemit = platform_get_drvdata(pdev);
|
|
+ if(!spacemit->need_notify_disconnect)
|
|
+ return;
|
|
+ usb_phy_notify_disconnect(spacemit->usb2_phy, USB_SPEED_HIGH);
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_get_phy(struct dwc3_spacemit *spacemit)
|
|
+{
|
|
+ struct device *dev = spacemit->dev;
|
|
+ int ret;
|
|
+
|
|
+ spacemit->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
|
|
+ spacemit->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
|
|
+ if (IS_ERR(spacemit->usb2_phy)) {
|
|
+ ret = PTR_ERR(spacemit->usb2_phy);
|
|
+ if (ret == -ENXIO || ret == -ENODEV)
|
|
+ spacemit->usb2_phy = NULL;
|
|
+ else
|
|
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
|
|
+ }
|
|
+
|
|
+ if (IS_ERR(spacemit->usb3_phy)) {
|
|
+ ret = PTR_ERR(spacemit->usb3_phy);
|
|
+ if (ret == -ENXIO || ret == -ENODEV)
|
|
+ spacemit->usb3_phy = NULL;
|
|
+ else
|
|
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
|
|
+ }
|
|
+
|
|
+ spacemit->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
|
|
+ if (IS_ERR(spacemit->usb2_generic_phy)) {
|
|
+ ret = PTR_ERR(spacemit->usb2_generic_phy);
|
|
+ if (ret == -ENOSYS || ret == -ENODEV)
|
|
+ spacemit->usb2_generic_phy = NULL;
|
|
+ else
|
|
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
|
|
+ }
|
|
+
|
|
+ spacemit->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
|
|
+ if (IS_ERR(spacemit->usb3_generic_phy)) {
|
|
+ ret = PTR_ERR(spacemit->usb3_generic_phy);
|
|
+ if (ret == -ENOSYS || ret == -ENODEV)
|
|
+ spacemit->usb3_generic_phy = NULL;
|
|
+ else
|
|
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_phy_setup(struct dwc3_spacemit *spacemit, bool enable)
|
|
+{
|
|
+ if (enable) {
|
|
+ usb_phy_init(spacemit->usb2_phy);
|
|
+ usb_phy_init(spacemit->usb3_phy);
|
|
+ phy_init(spacemit->usb2_generic_phy);
|
|
+ phy_init(spacemit->usb3_generic_phy);
|
|
+ } else {
|
|
+ usb_phy_shutdown(spacemit->usb2_phy);
|
|
+ usb_phy_shutdown(spacemit->usb3_phy);
|
|
+ phy_exit(spacemit->usb2_generic_phy);
|
|
+ phy_exit(spacemit->usb3_generic_phy);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_init(struct dwc3_spacemit *data)
|
|
+{
|
|
+ struct device *dev = data->dev;
|
|
+ int ret = 0, i;
|
|
+
|
|
+ for (i = 0; i < data->num_clks; i++) {
|
|
+ ret = clk_prepare_enable(data->clks[i]);
|
|
+ if (ret) {
|
|
+ while (i-- > 0)
|
|
+ clk_disable_unprepare(data->clks[i]);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (data->suspend_clk_idx >= 0)
|
|
+ clk_prepare_enable(data->clks[data->suspend_clk_idx]);
|
|
+
|
|
+ ret = reset_control_assert(data->resets);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to assert resets, err=%d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = reset_control_deassert(data->resets);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to deassert resets, err=%d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ dwc3_spacemit_phy_setup(data, true);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_exit(struct dwc3_spacemit *data)
|
|
+{
|
|
+ struct device *dev = data->dev;
|
|
+ int ret = 0, i;
|
|
+
|
|
+ dwc3_spacemit_phy_setup(data, false);
|
|
+
|
|
+ ret = reset_control_assert(data->resets);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to assert resets, err=%d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (data->suspend_clk_idx >= 0)
|
|
+ clk_disable_unprepare(data->clks[data->suspend_clk_idx]);
|
|
+
|
|
+ for (i = data->num_clks - 1; i >= 0; i--)
|
|
+ clk_disable_unprepare(data->clks[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct dwc3_spacemit *spacemit;
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct device_node *node = dev->of_node;
|
|
+ const struct dwc3_spacemit_driverdata *driver_data;
|
|
+ int i, ret;
|
|
+
|
|
+ spacemit = devm_kzalloc(dev, sizeof(*spacemit), GFP_KERNEL);
|
|
+ if (!spacemit)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ driver_data = of_device_get_match_data(dev);
|
|
+ spacemit->dev = dev;
|
|
+ spacemit->num_clks = driver_data->num_clks;
|
|
+ spacemit->clk_names = (const char **)driver_data->clk_names;
|
|
+ spacemit->suspend_clk_idx = driver_data->suspend_clk_idx;
|
|
+ spacemit->need_notify_disconnect = driver_data->need_notify_disconnect;
|
|
+ spacemit->reset_on_resume = device_property_read_bool(&pdev->dev, "reset-on-resume");
|
|
+
|
|
+ platform_set_drvdata(pdev, spacemit);
|
|
+
|
|
+ for (i = 0; i < spacemit->num_clks; i++) {
|
|
+ spacemit->clks[i] = devm_clk_get(dev, spacemit->clk_names[i]);
|
|
+ if (IS_ERR(spacemit->clks[i])) {
|
|
+ dev_err(dev, "failed to get clock: %s\n",
|
|
+ spacemit->clk_names[i]);
|
|
+ return PTR_ERR(spacemit->clks[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spacemit->resets = devm_reset_control_array_get_optional_exclusive(dev);
|
|
+ if (IS_ERR(spacemit->resets)) {
|
|
+ ret = PTR_ERR(spacemit->resets);
|
|
+ dev_err(dev, "failed to get resets, err=%d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = dwc3_spacemit_get_phy(spacemit);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = dwc3_spacemit_init(spacemit);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to init spacemit\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (node) {
|
|
+ ret = of_platform_populate(node, NULL, NULL, dev);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "failed to add dwc3 core\n");
|
|
+ goto populate_err;
|
|
+ }
|
|
+ } else {
|
|
+ dev_err(dev, "no device node, failed to add dwc3 core\n");
|
|
+ ret = -ENODEV;
|
|
+ goto populate_err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+populate_err:
|
|
+ dwc3_spacemit_exit(spacemit);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct dwc3_spacemit *spacemit = platform_get_drvdata(pdev);
|
|
+
|
|
+ of_platform_depopulate(&pdev->dev);
|
|
+ dwc3_spacemit_exit(spacemit);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dwc3_spacemit_driverdata spacemit_k1pro_drvdata = {
|
|
+ .clk_names = { "usbdrd30" },
|
|
+ .num_clks = 0,
|
|
+ .suspend_clk_idx = -1,
|
|
+ .need_notify_disconnect = false,
|
|
+};
|
|
+
|
|
+static const struct dwc3_spacemit_driverdata spacemit_k1x_drvdata = {
|
|
+ .clk_names = { "usbdrd30" },
|
|
+ .num_clks = 1,
|
|
+ .suspend_clk_idx = -1,
|
|
+ .need_notify_disconnect = true,
|
|
+};
|
|
+
|
|
+static const struct of_device_id spacemit_dwc3_match[] = {
|
|
+ {
|
|
+ .compatible = "spacemit,k1-pro-dwc3",
|
|
+ .data = &spacemit_k1pro_drvdata,
|
|
+ },
|
|
+ {
|
|
+ .compatible = "spacemit,k1-x-dwc3",
|
|
+ .data = &spacemit_k1x_drvdata,
|
|
+ },
|
|
+ { /* Sentinel */ }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, spacemit_dwc3_match);
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int dwc3_spacemit_suspend(struct device *dev)
|
|
+{
|
|
+ struct dwc3_spacemit *spacemit = dev_get_drvdata(dev);
|
|
+ int i, ret;
|
|
+
|
|
+ dwc3_spacemit_phy_setup(spacemit, false);
|
|
+ if (spacemit->reset_on_resume){
|
|
+ ret = reset_control_assert(spacemit->resets);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ dev_info(spacemit->dev, "Will reset controller and phy on resume\n");
|
|
+ }
|
|
+ for (i = spacemit->num_clks - 1; i >= 0; i--)
|
|
+ clk_disable_unprepare(spacemit->clks[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dwc3_spacemit_resume(struct device *dev)
|
|
+{
|
|
+ struct dwc3_spacemit *spacemit = dev_get_drvdata(dev);
|
|
+ int i, ret;
|
|
+
|
|
+ for (i = 0; i < spacemit->num_clks; i++) {
|
|
+ ret = clk_prepare_enable(spacemit->clks[i]);
|
|
+ if (ret) {
|
|
+ while (i-- > 0)
|
|
+ clk_disable_unprepare(spacemit->clks[i]);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (spacemit->reset_on_resume){
|
|
+ dev_info(spacemit->dev, "Resetting controller and phy\n");
|
|
+ ret = reset_control_deassert(spacemit->resets);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+ dwc3_spacemit_phy_setup(spacemit, true);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops dwc3_spacemit_dev_pm_ops = {
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_spacemit_suspend, dwc3_spacemit_resume)
|
|
+};
|
|
+
|
|
+#define DEV_PM_OPS (&dwc3_spacemit_dev_pm_ops)
|
|
+#else
|
|
+#define DEV_PM_OPS NULL
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+static struct platform_driver dwc3_spacemit_driver = {
|
|
+ .probe = dwc3_spacemit_probe,
|
|
+ .remove = dwc3_spacemit_remove,
|
|
+ .driver = {
|
|
+ .name = "spacemit-dwc3",
|
|
+ .of_match_table = spacemit_dwc3_match,
|
|
+ .pm = DEV_PM_OPS,
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(dwc3_spacemit_driver);
|
|
+
|
|
+MODULE_AUTHOR("Wilson <long.wan@spacemit.com>");
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_DESCRIPTION("DesignWare USB3 Spacemit Glue Layer");
|
|
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/gadget/function/f_tcm.c
|
|
+++ b/drivers/usb/gadget/function/f_tcm.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/string.h>
|
|
#include <linux/configfs.h>
|
|
#include <linux/ctype.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/usb/ch9.h>
|
|
#include <linux/usb/composite.h>
|
|
#include <linux/usb/gadget.h>
|
|
@@ -50,7 +51,7 @@ static int bot_enqueue_cmd_cbw(struct f_uas *fu)
|
|
if (fu->flags & USBG_BOT_CMD_PEND)
|
|
return 0;
|
|
|
|
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
|
|
+ ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
|
|
if (!ret)
|
|
fu->flags |= USBG_BOT_CMD_PEND;
|
|
return ret;
|
|
@@ -136,7 +137,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
|
|
}
|
|
req->complete = bot_err_compl;
|
|
req->context = cmd;
|
|
- req->buf = fu->cmd.buf;
|
|
+ req->buf = fu->cmd[0].buf;
|
|
usb_ep_queue(ep, req, GFP_KERNEL);
|
|
} else {
|
|
bot_enqueue_sense_code(fu, cmd);
|
|
@@ -245,7 +246,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
|
|
{
|
|
struct f_uas *fu = cmd->fu;
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
- struct usb_gadget *gadget = fuas_to_gadget(fu);
|
|
int ret;
|
|
|
|
init_completion(&cmd->write_complete);
|
|
@@ -256,18 +256,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!gadget->sg_supported) {
|
|
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
|
|
- if (!cmd->data_buf)
|
|
- return -ENOMEM;
|
|
-
|
|
- fu->bot_req_out->buf = cmd->data_buf;
|
|
- } else {
|
|
- fu->bot_req_out->buf = NULL;
|
|
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
|
|
- fu->bot_req_out->sg = se_cmd->t_data_sg;
|
|
- }
|
|
-
|
|
fu->bot_req_out->complete = usbg_data_write_cmpl;
|
|
fu->bot_req_out->length = se_cmd->data_length;
|
|
fu->bot_req_out->context = cmd;
|
|
@@ -314,8 +302,8 @@ static int bot_prepare_reqs(struct f_uas *fu)
|
|
if (!fu->bot_req_out)
|
|
goto err_out;
|
|
|
|
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
|
|
- if (!fu->cmd.req)
|
|
+ fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
|
|
+ if (!fu->cmd[0].req)
|
|
goto err_cmd;
|
|
|
|
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
|
|
@@ -327,27 +315,27 @@ static int bot_prepare_reqs(struct f_uas *fu)
|
|
fu->bot_status.req->complete = bot_status_complete;
|
|
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
|
|
|
|
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
|
|
- if (!fu->cmd.buf)
|
|
+ fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
|
|
+ if (!fu->cmd[0].buf)
|
|
goto err_buf;
|
|
|
|
- fu->cmd.req->complete = bot_cmd_complete;
|
|
- fu->cmd.req->buf = fu->cmd.buf;
|
|
- fu->cmd.req->length = fu->ep_out->maxpacket;
|
|
- fu->cmd.req->context = fu;
|
|
+ fu->cmd[0].req->complete = bot_cmd_complete;
|
|
+ fu->cmd[0].req->buf = fu->cmd[0].buf;
|
|
+ fu->cmd[0].req->length = fu->ep_out->maxpacket;
|
|
+ fu->cmd[0].req->context = fu;
|
|
|
|
ret = bot_enqueue_cmd_cbw(fu);
|
|
if (ret)
|
|
goto err_queue;
|
|
return 0;
|
|
err_queue:
|
|
- kfree(fu->cmd.buf);
|
|
- fu->cmd.buf = NULL;
|
|
+ kfree(fu->cmd[0].buf);
|
|
+ fu->cmd[0].buf = NULL;
|
|
err_buf:
|
|
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
|
|
err_sts:
|
|
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
|
|
- fu->cmd.req = NULL;
|
|
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
|
|
+ fu->cmd[0].req = NULL;
|
|
err_cmd:
|
|
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
|
|
fu->bot_req_out = NULL;
|
|
@@ -372,16 +360,16 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
|
|
|
|
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
|
|
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
|
|
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
|
|
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
|
|
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
|
|
|
|
- kfree(fu->cmd.buf);
|
|
+ kfree(fu->cmd[0].buf);
|
|
|
|
fu->bot_req_in = NULL;
|
|
fu->bot_req_out = NULL;
|
|
- fu->cmd.req = NULL;
|
|
+ fu->cmd[0].req = NULL;
|
|
fu->bot_status.req = NULL;
|
|
- fu->cmd.buf = NULL;
|
|
+ fu->cmd[0].buf = NULL;
|
|
}
|
|
|
|
static void bot_set_alt(struct f_uas *fu)
|
|
@@ -465,6 +453,51 @@ static int usbg_bot_setup(struct usb_function *f,
|
|
|
|
/* Start uas.c code */
|
|
|
|
+static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
|
|
+{
|
|
+ switch (code) {
|
|
+ case TMR_FUNCTION_FAILED:
|
|
+ return RC_TMF_FAILED;
|
|
+ case TMR_FUNCTION_COMPLETE:
|
|
+ case TMR_TASK_DOES_NOT_EXIST:
|
|
+ return RC_TMF_COMPLETE;
|
|
+ case TMR_LUN_DOES_NOT_EXIST:
|
|
+ return RC_INCORRECT_LUN;
|
|
+ case TMR_OVERLAPPED_TAG_ATTEMPTED:
|
|
+ return RC_OVERLAPPED_TAG;
|
|
+ case TMR_FUNCTION_REJECTED:
|
|
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
|
|
+ default:
|
|
+ return RC_TMF_NOT_SUPPORTED;
|
|
+ }
|
|
+}
|
|
+
|
|
+static unsigned char uasp_to_tcm_func(int code)
|
|
+{
|
|
+ switch (code) {
|
|
+ case TMF_ABORT_TASK:
|
|
+ return TMR_ABORT_TASK;
|
|
+ case TMF_ABORT_TASK_SET:
|
|
+ return TMR_ABORT_TASK_SET;
|
|
+ case TMF_CLEAR_TASK_SET:
|
|
+ return TMR_CLEAR_TASK_SET;
|
|
+ case TMF_LOGICAL_UNIT_RESET:
|
|
+ return TMR_LUN_RESET;
|
|
+ case TMF_I_T_NEXUS_RESET:
|
|
+ return TMR_I_T_NEXUS_RESET;
|
|
+ case TMF_CLEAR_ACA:
|
|
+ return TMR_CLEAR_ACA;
|
|
+ case TMF_QUERY_TASK:
|
|
+ return TMR_QUERY_TASK;
|
|
+ case TMF_QUERY_TASK_SET:
|
|
+ return TMR_QUERY_TASK_SET;
|
|
+ case TMF_QUERY_ASYNC_EVENT:
|
|
+ return TMR_QUERY_ASYNC_EVENT;
|
|
+ default:
|
|
+ return TMR_UNKNOWN;
|
|
+ }
|
|
+}
|
|
+
|
|
static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
|
|
{
|
|
/* We have either all three allocated or none */
|
|
@@ -482,15 +515,21 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
|
|
|
|
static void uasp_free_cmdreq(struct f_uas *fu)
|
|
{
|
|
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
|
|
- kfree(fu->cmd.buf);
|
|
- fu->cmd.req = NULL;
|
|
- fu->cmd.buf = NULL;
|
|
+ int i;
|
|
+ int num_cmds = fu->num_cmds;
|
|
+
|
|
+ for (i = 0; i < num_cmds; i++) {
|
|
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
|
|
+ kfree(fu->cmd[i].buf);
|
|
+ fu->cmd[i].req = NULL;
|
|
+ fu->cmd[i].buf = NULL;
|
|
+ }
|
|
}
|
|
|
|
static void uasp_cleanup_old_alt(struct f_uas *fu)
|
|
{
|
|
int i;
|
|
+ int num_cmds = fu->num_cmds;
|
|
|
|
if (!(fu->flags & USBG_ENABLED))
|
|
return;
|
|
@@ -500,11 +539,27 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
|
|
usb_ep_disable(fu->ep_status);
|
|
usb_ep_disable(fu->ep_cmd);
|
|
|
|
- for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
|
|
+ for (i = 0; i < num_cmds; i++)
|
|
uasp_cleanup_one_stream(fu, &fu->stream[i]);
|
|
uasp_free_cmdreq(fu);
|
|
}
|
|
|
|
+static struct uas_stream *uasp_get_stream_by_tag(struct f_uas *fu, u16 tag)
|
|
+{
|
|
+ /*
|
|
+ * For simplicity, we use mod operation to quickly find an in-progress
|
|
+ * matching command tag to check for overlapped command. The assumption
|
|
+ * is that the UASP class driver will limit to using tag id from 1 to
|
|
+ * USBG_NUM_CMDS. This is based on observation from the Windows and
|
|
+ * Linux UASP storage class driver behavior. If an unusual UASP class
|
|
+ * driver uses a tag greater than USBG_NUM_CMDS, then this method may no
|
|
+ * longer work due to possible stream id collision. In that case, we
|
|
+ * need to use a proper algorithm to fetch the stream (or simply walk
|
|
+ * through all active streams to check for overlap).
|
|
+ */
|
|
+ return &fu->stream[tag % fu->num_cmds];
|
|
+}
|
|
+
|
|
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
|
|
|
|
static int uasp_prepare_r_request(struct usbg_cmd *cmd)
|
|
@@ -512,8 +567,10 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
struct f_uas *fu = cmd->fu;
|
|
struct usb_gadget *gadget = fuas_to_gadget(fu);
|
|
- struct uas_stream *stream = cmd->stream;
|
|
-
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
+ if (!stream->req_in){
|
|
+ return -ESHUTDOWN;
|
|
+ }
|
|
if (!gadget->sg_supported) {
|
|
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
|
|
if (!cmd->data_buf)
|
|
@@ -532,6 +589,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
|
|
}
|
|
|
|
stream->req_in->is_last = 1;
|
|
+ stream->req_in->stream_id = cmd->tag;
|
|
stream->req_in->complete = uasp_status_data_cmpl;
|
|
stream->req_in->length = se_cmd->data_length;
|
|
stream->req_in->context = cmd;
|
|
@@ -544,7 +602,11 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
|
|
{
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
struct sense_iu *iu = &cmd->sense_iu;
|
|
- struct uas_stream *stream = cmd->stream;
|
|
+ struct uas_stream *stream;
|
|
+ stream = uasp_get_stream_by_tag(cmd->fu, cmd->tag);
|
|
+ if (!stream->req_status) {
|
|
+ return;
|
|
+ }
|
|
|
|
cmd->state = UASP_QUEUE_COMMAND;
|
|
iu->iu_id = IU_ID_STATUS;
|
|
@@ -555,21 +617,75 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
|
|
*/
|
|
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
|
|
iu->status = se_cmd->scsi_status;
|
|
+
|
|
stream->req_status->is_last = 1;
|
|
+ stream->req_status->stream_id = cmd->tag;
|
|
stream->req_status->context = cmd;
|
|
stream->req_status->length = se_cmd->scsi_sense_length + 16;
|
|
stream->req_status->buf = iu;
|
|
stream->req_status->complete = uasp_status_data_cmpl;
|
|
+
|
|
+}
|
|
+
|
|
+static void uasp_prepare_response(struct usbg_cmd *cmd)
|
|
+{
|
|
+ struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
+ struct response_iu *rsp_iu = &cmd->response_iu;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(cmd->fu, cmd->tag);
|
|
+
|
|
+ cmd->state = UASP_QUEUE_COMMAND;
|
|
+ rsp_iu->iu_id = IU_ID_RESPONSE;
|
|
+ rsp_iu->tag = cpu_to_be16(cmd->tag);
|
|
+
|
|
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
|
|
+ rsp_iu->response_code = cmd->tmr_rsp;
|
|
+ else
|
|
+ rsp_iu->response_code =
|
|
+ tcm_to_uasp_response(se_cmd->se_tmr_req->response);
|
|
+
|
|
+ /*
|
|
+ * The UASP driver must support all the task management functions listed
|
|
+ * in Table 20 of UAS-r04. To remain compliant while indicate that the
|
|
+ * TMR did not go through, report RC_TMF_FAILED instead of
|
|
+ * RC_TMF_NOT_SUPPORTED and print a warning to the user.
|
|
+ */
|
|
+ switch (cmd->tmr_func) {
|
|
+ case TMR_ABORT_TASK:
|
|
+ case TMR_ABORT_TASK_SET:
|
|
+ case TMR_CLEAR_TASK_SET:
|
|
+ case TMR_LUN_RESET:
|
|
+ case TMR_I_T_NEXUS_RESET:
|
|
+ case TMR_CLEAR_ACA:
|
|
+ case TMR_QUERY_TASK:
|
|
+ case TMR_QUERY_TASK_SET:
|
|
+ case TMR_QUERY_ASYNC_EVENT:
|
|
+ if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
|
|
+ pr_warn("TMR function %d not supported\n",
|
|
+ cmd->tmr_func);
|
|
+ rsp_iu->response_code = RC_TMF_FAILED;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ stream->req_status->is_last = 1;
|
|
+ stream->req_status->stream_id = cmd->tag;
|
|
+ stream->req_status->context = cmd;
|
|
+ stream->req_status->length = sizeof(struct response_iu);
|
|
+ stream->req_status->buf = rsp_iu;
|
|
+ stream->req_status->complete = uasp_status_data_cmpl;
|
|
}
|
|
|
|
+static void usbg_release_cmd(struct se_cmd *se_cmd);
|
|
+static int uasp_send_tm_response(struct usbg_cmd *cmd);
|
|
+
|
|
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
struct usbg_cmd *cmd = req->context;
|
|
- struct uas_stream *stream = cmd->stream;
|
|
struct f_uas *fu = cmd->fu;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
int ret;
|
|
|
|
- if (req->status < 0)
|
|
+ if (req->status == -ESHUTDOWN)
|
|
goto cleanup;
|
|
|
|
switch (cmd->state) {
|
|
@@ -600,8 +716,36 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
|
|
break;
|
|
|
|
case UASP_QUEUE_COMMAND:
|
|
- transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
|
|
+ /*
|
|
+ * Overlapped command detected and cancelled.
|
|
+ * So send overlapped attempted status.
|
|
+ */
|
|
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
|
|
+ req->status == -ECONNRESET) {
|
|
+ uasp_send_tm_response(cmd);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ stream->cmd = NULL;
|
|
+
|
|
+ /*
|
|
+ * If no command submitted to target core here, just free the
|
|
+ * bitmap index. This is for the cases where f_tcm handles
|
|
+ * status response instead of the target core.
|
|
+ */
|
|
+ if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
|
|
+ cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
|
|
+ struct se_session *se_sess;
|
|
+
|
|
+ se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
|
|
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
|
|
+ cmd->se_cmd.map_tag,
|
|
+ cmd->se_cmd.map_cpu);
|
|
+ } else {
|
|
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
+ }
|
|
+
|
|
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
|
|
break;
|
|
|
|
default:
|
|
@@ -610,27 +754,38 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
|
|
return;
|
|
|
|
cleanup:
|
|
+ stream->cmd = NULL;
|
|
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
}
|
|
|
|
static int uasp_send_status_response(struct usbg_cmd *cmd)
|
|
{
|
|
struct f_uas *fu = cmd->fu;
|
|
- struct uas_stream *stream = cmd->stream;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
struct sense_iu *iu = &cmd->sense_iu;
|
|
|
|
iu->tag = cpu_to_be16(cmd->tag);
|
|
- stream->req_status->complete = uasp_status_data_cmpl;
|
|
- stream->req_status->context = cmd;
|
|
cmd->fu = fu;
|
|
uasp_prepare_status(cmd);
|
|
return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
|
|
}
|
|
|
|
+static int uasp_send_tm_response(struct usbg_cmd *cmd)
|
|
+{
|
|
+ struct f_uas *fu = cmd->fu;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
+ struct response_iu *iu = &cmd->response_iu;
|
|
+
|
|
+ iu->tag = cpu_to_be16(cmd->tag);
|
|
+ cmd->fu = fu;
|
|
+ uasp_prepare_response(cmd);
|
|
+ return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
|
|
+}
|
|
+
|
|
static int uasp_send_read_response(struct usbg_cmd *cmd)
|
|
{
|
|
struct f_uas *fu = cmd->fu;
|
|
- struct uas_stream *stream = cmd->stream;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
struct sense_iu *iu = &cmd->sense_iu;
|
|
int ret;
|
|
|
|
@@ -660,7 +815,6 @@ static int uasp_send_read_response(struct usbg_cmd *cmd)
|
|
cmd->state = UASP_SEND_DATA;
|
|
stream->req_status->buf = iu;
|
|
stream->req_status->length = sizeof(struct iu);
|
|
-
|
|
ret = usb_ep_queue(fu->ep_status, stream->req_status,
|
|
GFP_ATOMIC);
|
|
if (ret)
|
|
@@ -674,7 +828,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
|
|
{
|
|
struct f_uas *fu = cmd->fu;
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
- struct uas_stream *stream = cmd->stream;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
struct sense_iu *iu = &cmd->sense_iu;
|
|
int ret;
|
|
|
|
@@ -711,31 +865,27 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
|
|
}
|
|
|
|
wait_for_completion(&cmd->write_complete);
|
|
+
|
|
target_execute_cmd(se_cmd);
|
|
cleanup:
|
|
return ret;
|
|
}
|
|
|
|
-static int usbg_submit_command(struct f_uas *, void *, unsigned int);
|
|
+static int usbg_submit_command(struct f_uas *, struct usb_request *);
|
|
|
|
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
struct f_uas *fu = req->context;
|
|
- int ret;
|
|
|
|
- if (req->status < 0)
|
|
+ if (req->status == -ESHUTDOWN)
|
|
return;
|
|
|
|
- ret = usbg_submit_command(fu, req->buf, req->actual);
|
|
- /*
|
|
- * Once we tune for performance enqueue the command req here again so
|
|
- * we can receive a second command while we processing this one. Pay
|
|
- * attention to properly sync STAUS endpoint with DATA IN + OUT so you
|
|
- * don't break HS.
|
|
- */
|
|
- if (!ret)
|
|
+ if (req->status < 0) {
|
|
+ usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
|
|
return;
|
|
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
|
|
+ }
|
|
+
|
|
+ usbg_submit_command(fu, req);
|
|
}
|
|
|
|
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
|
|
@@ -764,66 +914,49 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
-static int uasp_alloc_cmd(struct f_uas *fu)
|
|
+static int uasp_alloc_cmd(struct f_uas *fu, int i)
|
|
{
|
|
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
|
|
- if (!fu->cmd.req)
|
|
+ fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
|
|
+ if (!fu->cmd[i].req)
|
|
goto err;
|
|
|
|
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
|
|
- if (!fu->cmd.buf)
|
|
+ fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
|
|
+ if (!fu->cmd[i].buf)
|
|
goto err_buf;
|
|
|
|
- fu->cmd.req->complete = uasp_cmd_complete;
|
|
- fu->cmd.req->buf = fu->cmd.buf;
|
|
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
|
|
- fu->cmd.req->context = fu;
|
|
+ fu->cmd[i].req->complete = uasp_cmd_complete;
|
|
+ fu->cmd[i].req->buf = fu->cmd[i].buf;
|
|
+ fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
|
|
+ fu->cmd[i].req->context = fu;
|
|
return 0;
|
|
|
|
err_buf:
|
|
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
|
|
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
|
|
err:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
-static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
|
|
-{
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < max_streams; i++) {
|
|
- struct uas_stream *s = &fu->stream[i];
|
|
-
|
|
- s->req_in->stream_id = i + 1;
|
|
- s->req_out->stream_id = i + 1;
|
|
- s->req_status->stream_id = i + 1;
|
|
- }
|
|
-}
|
|
-
|
|
static int uasp_prepare_reqs(struct f_uas *fu)
|
|
{
|
|
int ret;
|
|
int i;
|
|
- int max_streams;
|
|
-
|
|
- if (fu->flags & USBG_USE_STREAMS)
|
|
- max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
|
|
- else
|
|
- max_streams = 1;
|
|
+ int num_cmds = fu->num_cmds;
|
|
|
|
- for (i = 0; i < max_streams; i++) {
|
|
+ for (i = 0; i < num_cmds; i++) {
|
|
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
|
|
if (ret)
|
|
goto err_cleanup;
|
|
}
|
|
|
|
- ret = uasp_alloc_cmd(fu);
|
|
- if (ret)
|
|
- goto err_free_stream;
|
|
- uasp_setup_stream_res(fu, max_streams);
|
|
+ for (i = 0; i < num_cmds; i++) {
|
|
+ ret = uasp_alloc_cmd(fu, i);
|
|
+ if (ret)
|
|
+ goto err_free_stream;
|
|
|
|
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
|
|
- if (ret)
|
|
- goto err_free_stream;
|
|
+ ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
|
|
+ if (ret)
|
|
+ goto err_free_stream;
|
|
+ }
|
|
|
|
return 0;
|
|
|
|
@@ -852,6 +985,11 @@ static void uasp_set_alt(struct f_uas *fu)
|
|
if (gadget->speed >= USB_SPEED_SUPER)
|
|
fu->flags |= USBG_USE_STREAMS;
|
|
|
|
+ if (fu->flags & USBG_USE_STREAMS)
|
|
+ fu->num_cmds = USBG_NUM_CMDS;
|
|
+ else
|
|
+ fu->num_cmds = 1;
|
|
+
|
|
config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
|
|
ret = usb_ep_enable(fu->ep_in);
|
|
if (ret)
|
|
@@ -876,7 +1014,8 @@ static void uasp_set_alt(struct f_uas *fu)
|
|
goto err_wq;
|
|
fu->flags |= USBG_ENABLED;
|
|
|
|
- pr_info("Using the UAS protocol\n");
|
|
+ pr_info("Using the UAS protocol, TCQ %s\n",
|
|
+ (fu->flags & USBG_USE_STREAMS) ? "Supported" : "Not Supported");
|
|
return;
|
|
err_wq:
|
|
usb_ep_disable(fu->ep_status);
|
|
@@ -914,6 +1053,8 @@ static int get_cmd_dir(const unsigned char *cdb)
|
|
case READ_TOC:
|
|
case READ_FORMAT_CAPACITIES:
|
|
case REQUEST_SENSE:
|
|
+ case ATA_12:
|
|
+ case ATA_16:
|
|
ret = DMA_FROM_DEVICE;
|
|
break;
|
|
|
|
@@ -956,8 +1097,18 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
struct usbg_cmd *cmd = req->context;
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(cmd->fu, cmd->tag);
|
|
|
|
- if (req->status < 0) {
|
|
+ cmd->state = UASP_QUEUE_COMMAND;
|
|
+
|
|
+ if (req->status == -ESHUTDOWN) {
|
|
+ stream->cmd = NULL;
|
|
+ target_put_sess_cmd(se_cmd);
|
|
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (req->status) {
|
|
pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
|
|
goto cleanup;
|
|
}
|
|
@@ -973,7 +1124,15 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
|
|
return;
|
|
|
|
cleanup:
|
|
- transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
+ target_put_sess_cmd(se_cmd);
|
|
+ /* Command was aborted due to overlapped tag */
|
|
+ if (cmd->state == UASP_QUEUE_COMMAND &&
|
|
+ cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
|
|
+ uasp_send_tm_response(cmd);
|
|
+ return;
|
|
+ }
|
|
+ transport_send_check_condition_and_sense(se_cmd,
|
|
+ TCM_CHECK_CONDITION_ABORT_CMD, 0);
|
|
}
|
|
|
|
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
|
|
@@ -995,9 +1154,12 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
|
|
}
|
|
|
|
req->is_last = 1;
|
|
+ req->stream_id = cmd->tag;
|
|
req->complete = usbg_data_write_cmpl;
|
|
req->length = se_cmd->data_length;
|
|
req->context = cmd;
|
|
+
|
|
+ cmd->state = UASP_SEND_STATUS;
|
|
return 0;
|
|
}
|
|
|
|
@@ -1037,9 +1199,25 @@ static int usbg_send_read_response(struct se_cmd *se_cmd)
|
|
return uasp_send_read_response(cmd);
|
|
}
|
|
|
|
-static void usbg_cmd_work(struct work_struct *work)
|
|
+static void usbg_aborted_task(struct se_cmd *se_cmd);
|
|
+
|
|
+static void usbg_submit_tmr(struct usbg_cmd *cmd)
|
|
+{
|
|
+ struct se_session *se_sess;
|
|
+ struct se_cmd *se_cmd;
|
|
+ int flags = TARGET_SCF_ACK_KREF;
|
|
+
|
|
+ se_cmd = &cmd->se_cmd;
|
|
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
|
|
+
|
|
+ target_submit_tmr(se_cmd, se_sess,
|
|
+ cmd->response_iu.add_response_info,
|
|
+ cmd->unpacked_lun, NULL, cmd->tmr_func,
|
|
+ GFP_ATOMIC, cmd->tag, flags);
|
|
+}
|
|
+
|
|
+static void usbg_submit_cmd(struct usbg_cmd *cmd)
|
|
{
|
|
- struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
|
|
struct se_cmd *se_cmd;
|
|
struct tcm_usbg_nexus *tv_nexus;
|
|
struct usbg_tpg *tpg;
|
|
@@ -1065,8 +1243,75 @@ static void usbg_cmd_work(struct work_struct *work)
|
|
|
|
out:
|
|
transport_send_check_condition_and_sense(se_cmd,
|
|
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
|
|
- transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
|
|
+}
|
|
+
|
|
+static void usbg_cmd_work(struct work_struct *work)
|
|
+{
|
|
+ struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
|
|
+
|
|
+ /*
|
|
+ * Failure is detected by f_tcm here. Skip submitting the command to the
|
|
+ * target core if we already know the failing response and send the usb
|
|
+ * response to the host directly.
|
|
+ */
|
|
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
|
|
+ goto skip;
|
|
+
|
|
+ if (cmd->tmr_func)
|
|
+ usbg_submit_tmr(cmd);
|
|
+ else
|
|
+ usbg_submit_cmd(cmd);
|
|
+
|
|
+ return;
|
|
+
|
|
+skip:
|
|
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
|
|
+ struct se_session *se_sess;
|
|
+ struct uas_stream *stream;
|
|
+
|
|
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
|
|
+ stream = uasp_get_stream_by_tag(cmd->fu, cmd->tag);
|
|
+
|
|
+ /*
|
|
+ * There's no guarantee of a matching completion order between
|
|
+ * different endpoints. i.e. The device may receive a new (CDB)
|
|
+ * command request completion of the command endpoint before it
|
|
+ * gets notified of the previous command status completion from
|
|
+ * a status endpoint. The driver still needs to detect
|
|
+ * misbehaving host and respond with an overlap command tag. To
|
|
+ * prevent false overlapped tag failure, give the active and
|
|
+ * matching stream id a short time (1ms) to complete before
|
|
+ * respond with overlapped command failure.
|
|
+ */
|
|
+ msleep(1);
|
|
+
|
|
+ /* If the stream is completed, retry the command. */
|
|
+ if (!stream->cmd) {
|
|
+ usbg_submit_command(cmd->fu, cmd->req);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The command isn't submitted to the target core, so we're safe
|
|
+ * to remove the bitmap index from the session tag pool.
|
|
+ */
|
|
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
|
|
+ cmd->se_cmd.map_tag,
|
|
+ cmd->se_cmd.map_cpu);
|
|
+
|
|
+ /*
|
|
+ * Overlap command tag detected. Cancel any pending transfer of
|
|
+ * the command submitted to target core.
|
|
+ */
|
|
+ stream->cmd->tmr_rsp = RC_OVERLAPPED_TAG;
|
|
+ usbg_aborted_task(&stream->cmd->se_cmd);
|
|
+
|
|
+ /* Send the response after the transfer is aborted. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ uasp_send_tm_response(cmd);
|
|
}
|
|
|
|
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
|
|
@@ -1084,6 +1329,11 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
|
|
memset(cmd, 0, sizeof(*cmd));
|
|
cmd->se_cmd.map_tag = tag;
|
|
cmd->se_cmd.map_cpu = cpu;
|
|
+ /*
|
|
+ * CPU LIO will execute the cmd on. Defaults to the CPU the cmd is
|
|
+ * initialized on. Drivers can override.
|
|
+ */
|
|
+ cmd->se_cmd.cpuid = cpu;
|
|
cmd->se_cmd.tag = cmd->tag = scsi_tag;
|
|
cmd->fu = fu;
|
|
|
|
@@ -1092,50 +1342,72 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
|
|
|
|
static void usbg_release_cmd(struct se_cmd *);
|
|
|
|
-static int usbg_submit_command(struct f_uas *fu,
|
|
- void *cmdbuf, unsigned int len)
|
|
+static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
|
|
{
|
|
- struct command_iu *cmd_iu = cmdbuf;
|
|
+ struct iu *iu = req->buf;
|
|
struct usbg_cmd *cmd;
|
|
struct usbg_tpg *tpg = fu->tpg;
|
|
struct tcm_usbg_nexus *tv_nexus;
|
|
+ struct uas_stream *stream;
|
|
+ struct command_iu *cmd_iu;
|
|
u32 cmd_len;
|
|
u16 scsi_tag;
|
|
|
|
- if (cmd_iu->iu_id != IU_ID_COMMAND) {
|
|
- pr_err("Unsupported type %d\n", cmd_iu->iu_id);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
tv_nexus = tpg->tpg_nexus;
|
|
if (!tv_nexus) {
|
|
pr_err("Missing nexus, ignoring command\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
- cmd_len = (cmd_iu->len & ~0x3) + 16;
|
|
- if (cmd_len > USBG_MAX_CMD)
|
|
- return -EINVAL;
|
|
-
|
|
- scsi_tag = be16_to_cpup(&cmd_iu->tag);
|
|
+ scsi_tag = be16_to_cpup(&iu->tag);
|
|
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
|
|
if (IS_ERR(cmd)) {
|
|
pr_err("usbg_get_cmd failed\n");
|
|
return -ENOMEM;
|
|
}
|
|
- memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
|
|
+ cmd->req = req;
|
|
+ cmd->fu = fu;
|
|
+ cmd->tag = scsi_tag;
|
|
+ cmd->se_cmd.tag = scsi_tag;
|
|
+ cmd->tmr_func = 0;
|
|
+ cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
|
|
|
|
- if (fu->flags & USBG_USE_STREAMS) {
|
|
- if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
|
|
- goto err;
|
|
- if (!cmd->tag)
|
|
- cmd->stream = &fu->stream[0];
|
|
- else
|
|
- cmd->stream = &fu->stream[cmd->tag - 1];
|
|
- } else {
|
|
- cmd->stream = &fu->stream[0];
|
|
+ cmd_iu = (struct command_iu *)iu;
|
|
+
|
|
+ /* Command and Task Management IUs share the same LUN offset */
|
|
+ cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
|
|
+
|
|
+ if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
|
|
+ cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
|
|
+ goto skip;
|
|
}
|
|
|
|
+ stream = uasp_get_stream_by_tag(fu, scsi_tag);
|
|
+ if (stream->cmd) {
|
|
+ pr_err("Command tag %d overlapped\n", scsi_tag);
|
|
+ cmd->tmr_rsp = RC_OVERLAPPED_TAG;
|
|
+ goto skip;
|
|
+ }
|
|
+ stream->cmd = cmd;
|
|
+
|
|
+ if (iu->iu_id == IU_ID_TASK_MGMT) {
|
|
+ struct task_mgmt_iu *tm_iu;
|
|
+
|
|
+ tm_iu = (struct task_mgmt_iu *)iu;
|
|
+ cmd->tmr_func = uasp_to_tcm_func(tm_iu->function);
|
|
+ goto skip;
|
|
+ }
|
|
+
|
|
+ cmd_len = (cmd_iu->len & ~0x3) + 16;
|
|
+ if (cmd_len > USBG_MAX_CMD) {
|
|
+ pr_err("invalid len %d\n", cmd_len);
|
|
+ target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
|
|
+ stream->cmd = NULL;
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
|
|
+
|
|
+
|
|
switch (cmd_iu->prio_attr & 0x7) {
|
|
case UAS_HEAD_TAG:
|
|
cmd->prio_attr = TCM_HEAD_TAG;
|
|
@@ -1155,15 +1427,11 @@ static int usbg_submit_command(struct f_uas *fu,
|
|
break;
|
|
}
|
|
|
|
- cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
|
|
-
|
|
+skip:
|
|
INIT_WORK(&cmd->work, usbg_cmd_work);
|
|
queue_work(tpg->workqueue, &cmd->work);
|
|
|
|
return 0;
|
|
-err:
|
|
- usbg_release_cmd(&cmd->se_cmd);
|
|
- return -EINVAL;
|
|
}
|
|
|
|
static void bot_cmd_work(struct work_struct *work)
|
|
@@ -1172,7 +1440,7 @@ static void bot_cmd_work(struct work_struct *work)
|
|
struct se_cmd *se_cmd;
|
|
struct tcm_usbg_nexus *tv_nexus;
|
|
struct usbg_tpg *tpg;
|
|
- int dir;
|
|
+ int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
|
|
|
|
se_cmd = &cmd->se_cmd;
|
|
tpg = cmd->fu->tpg;
|
|
@@ -1189,12 +1457,12 @@ static void bot_cmd_work(struct work_struct *work)
|
|
|
|
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
|
|
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
|
|
- cmd->data_len, cmd->prio_attr, dir, 0);
|
|
+ cmd->data_len, cmd->prio_attr, dir, flags);
|
|
return;
|
|
|
|
out:
|
|
transport_send_check_condition_and_sense(se_cmd,
|
|
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
|
|
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
|
|
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
}
|
|
|
|
@@ -1305,10 +1573,29 @@ static int usbg_get_cmd_state(struct se_cmd *se_cmd)
|
|
|
|
static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
|
|
{
|
|
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
|
|
+
|
|
+ uasp_send_tm_response(cmd);
|
|
}
|
|
|
|
static void usbg_aborted_task(struct se_cmd *se_cmd)
|
|
{
|
|
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
|
|
+ struct f_uas *fu = cmd->fu;
|
|
+ struct uas_stream *stream = uasp_get_stream_by_tag(fu, cmd->tag);
|
|
+ int ret = 0;
|
|
+
|
|
+ if (stream->req_out->status == -EINPROGRESS)
|
|
+ ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
|
|
+ else if (stream->req_in->status == -EINPROGRESS)
|
|
+ ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
|
|
+ else if (stream->req_status->status == -EINPROGRESS)
|
|
+ ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
|
|
+
|
|
+ if (ret)
|
|
+ pr_err("Unable to dequeue se_cmd out %p\n", se_cmd);
|
|
+
|
|
+ cmd->state = UASP_QUEUE_COMMAND;
|
|
}
|
|
|
|
static const char *usbg_check_wwn(const char *name)
|
|
@@ -1334,7 +1621,7 @@ static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
|
|
}
|
|
|
|
static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
|
|
- const char *name)
|
|
+ const char *name)
|
|
{
|
|
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
|
|
tport_wwn);
|
|
@@ -1379,7 +1666,8 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
|
|
goto unref_dep;
|
|
mutex_init(&tpg->tpg_mutex);
|
|
atomic_set(&tpg->tpg_port_count, 0);
|
|
- tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
|
|
+ tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
|
|
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
|
|
if (!tpg->workqueue)
|
|
goto free_tpg;
|
|
|
|
@@ -1536,7 +1824,7 @@ static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
|
|
}
|
|
|
|
static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
|
|
- struct se_session *se_sess, void *p)
|
|
+ struct se_session *se_sess, void *p)
|
|
{
|
|
struct usbg_tpg *tpg = container_of(se_tpg,
|
|
struct usbg_tpg, se_tpg);
|
|
@@ -1564,10 +1852,10 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
|
|
}
|
|
|
|
tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
|
|
- USB_G_DEFAULT_SESSION_TAGS,
|
|
- sizeof(struct usbg_cmd),
|
|
- TARGET_PROT_NORMAL, name,
|
|
- tv_nexus, usbg_alloc_sess_cb);
|
|
+ USB_G_DEFAULT_SESSION_TAGS,
|
|
+ sizeof(struct usbg_cmd),
|
|
+ TARGET_PROT_NORMAL, name,
|
|
+ tv_nexus, usbg_alloc_sess_cb);
|
|
if (IS_ERR(tv_nexus->tvn_se_sess)) {
|
|
#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
|
|
pr_debug(MAKE_NEXUS_MSG, name);
|
|
@@ -1774,7 +2062,7 @@ static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
|
|
static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
|
|
.bLength = sizeof(uasp_bi_ep_comp_desc),
|
|
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
|
|
- .bMaxBurst = 0,
|
|
+ .bMaxBurst = 15,
|
|
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
|
|
.wBytesPerInterval = 0,
|
|
};
|
|
@@ -1782,7 +2070,7 @@ static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
|
|
static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
|
|
.bLength = sizeof(bot_bi_ep_comp_desc),
|
|
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
|
|
- .bMaxBurst = 0,
|
|
+ .bMaxBurst = 15,
|
|
};
|
|
|
|
static struct usb_endpoint_descriptor uasp_bo_desc = {
|
|
@@ -1817,12 +2105,14 @@ static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
|
|
static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
|
|
.bLength = sizeof(uasp_bo_ep_comp_desc),
|
|
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
|
|
+ .bMaxBurst = 15,
|
|
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
|
|
};
|
|
|
|
static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
|
|
.bLength = sizeof(bot_bo_ep_comp_desc),
|
|
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
|
|
+ .bMaxBurst = 15,
|
|
};
|
|
|
|
static struct usb_endpoint_descriptor uasp_status_desc = {
|
|
@@ -1858,6 +2148,7 @@ static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
|
|
.bLength = sizeof(uasp_status_in_ep_comp_desc),
|
|
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
|
|
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
|
|
+ .bMaxBurst = 15,
|
|
};
|
|
|
|
static struct usb_endpoint_descriptor uasp_cmd_desc = {
|
|
@@ -2309,7 +2600,7 @@ DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
|
|
static int __init tcm_init(void)
|
|
{
|
|
int ret;
|
|
-
|
|
+ pr_info("f_tcm: UAS support multiple cmds\n");
|
|
ret = usb_function_register(&tcmusb_func);
|
|
if (ret)
|
|
return ret;
|
|
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/gadget/function/tcm.h
|
|
+++ b/drivers/usb/gadget/function/tcm.h
|
|
@@ -16,6 +16,8 @@
|
|
#define UASP_SS_EP_COMP_LOG_STREAMS 4
|
|
#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
|
|
|
|
+#define USBG_NUM_CMDS (UASP_SS_EP_COMP_NUM_STREAMS + 1)
|
|
+
|
|
enum {
|
|
USB_G_STR_INT_UAS = 0,
|
|
USB_G_STR_INT_BBB,
|
|
@@ -24,7 +26,8 @@ enum {
|
|
#define USB_G_ALT_INT_BBB 0
|
|
#define USB_G_ALT_INT_UAS 1
|
|
|
|
-#define USB_G_DEFAULT_SESSION_TAGS 128
|
|
+#define USB_G_DEFAULT_SESSION_TAGS USBG_NUM_CMDS
|
|
+
|
|
|
|
struct tcm_usbg_nexus {
|
|
struct se_session *tvn_se_sess;
|
|
@@ -75,12 +78,17 @@ struct usbg_cmd {
|
|
struct completion write_complete;
|
|
struct kref ref;
|
|
|
|
+ struct usb_request *req;
|
|
+
|
|
/* UAS only */
|
|
u16 tag;
|
|
u16 prio_attr;
|
|
struct sense_iu sense_iu;
|
|
+ struct response_iu response_iu;
|
|
enum uas_state state;
|
|
- struct uas_stream *stream;
|
|
+ int tmr_func;
|
|
+ int tmr_rsp;
|
|
+#define RC_RESPONSE_UNKNOWN 0xff
|
|
|
|
/* BOT only */
|
|
__le32 bot_tag;
|
|
@@ -93,6 +101,8 @@ struct uas_stream {
|
|
struct usb_request *req_in;
|
|
struct usb_request *req_out;
|
|
struct usb_request *req_status;
|
|
+
|
|
+ struct usbg_cmd *cmd;
|
|
};
|
|
|
|
struct usbg_cdb {
|
|
@@ -109,6 +119,7 @@ struct f_uas {
|
|
struct usbg_tpg *tpg;
|
|
struct usb_function function;
|
|
u16 iface;
|
|
+ u16 num_cmds;
|
|
|
|
u32 flags;
|
|
#define USBG_ENABLED (1 << 0)
|
|
@@ -117,14 +128,14 @@ struct f_uas {
|
|
#define USBG_IS_BOT (1 << 3)
|
|
#define USBG_BOT_CMD_PEND (1 << 4)
|
|
|
|
- struct usbg_cdb cmd;
|
|
+ struct usbg_cdb cmd[USBG_NUM_CMDS];
|
|
struct usb_ep *ep_in;
|
|
struct usb_ep *ep_out;
|
|
|
|
/* UAS */
|
|
struct usb_ep *ep_status;
|
|
struct usb_ep *ep_cmd;
|
|
- struct uas_stream stream[UASP_SS_EP_COMP_NUM_STREAMS];
|
|
+ struct uas_stream stream[USBG_NUM_CMDS];
|
|
|
|
/* BOT */
|
|
struct bot_status bot_status;
|
|
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/gadget/udc/Kconfig
|
|
+++ b/drivers/usb/gadget/udc/Kconfig
|
|
@@ -251,6 +251,13 @@ config USB_MV_UDC
|
|
USB2.0 OTG controller, which can be configured as high speed or
|
|
full speed USB peripheral.
|
|
|
|
+config USB_K1X_UDC
|
|
+ tristate "Spacemit K1X USB2.0 Device Controller"
|
|
+ depends on HAS_DMA
|
|
+ help
|
|
+ Include a high speed USB2.0 OTG controller, which can be configured
|
|
+ as high speed or full speed USB peripheral.
|
|
+
|
|
config USB_MV_U3D
|
|
depends on HAS_DMA
|
|
tristate "MARVELL PXA2128 USB 3.0 controller"
|
|
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/gadget/udc/Makefile
|
|
+++ b/drivers/usb/gadget/udc/Makefile
|
|
@@ -43,3 +43,4 @@ obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
|
|
obj-$(CONFIG_USB_ASPEED_UDC) += aspeed_udc.o
|
|
obj-$(CONFIG_USB_BDC_UDC) += bdc/
|
|
obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o
|
|
+obj-$(CONFIG_USB_K1X_UDC) += k1x_udc_core.o
|
|
diff --git a/drivers/usb/gadget/udc/k1x_ci_udc.h b/drivers/usb/gadget/udc/k1x_ci_udc.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/gadget/udc/k1x_ci_udc.h
|
|
@@ -0,0 +1,351 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+
|
|
+#ifndef __MV_UDC_H
|
|
+#define __MV_UDC_H
|
|
+
|
|
+#include <linux/power_supply.h>
|
|
+#include <linux/extcon-provider.h>
|
|
+
|
|
+#define VUSBHS_MAX_PORTS 8
|
|
+
|
|
+#define DQH_ALIGNMENT 2048
|
|
+#define DTD_ALIGNMENT 64
|
|
+#define DMA_BOUNDARY 4096
|
|
+
|
|
+#define EP_DIR_IN 1
|
|
+#define EP_DIR_OUT 0
|
|
+
|
|
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
|
|
+
|
|
+#define EP0_MAX_PKT_SIZE 64
|
|
+/* ep0 transfer state */
|
|
+#define WAIT_FOR_SETUP 0
|
|
+#define DATA_STATE_XMIT 1
|
|
+#define DATA_STATE_NEED_ZLP 2
|
|
+#define WAIT_FOR_OUT_STATUS 3
|
|
+#define DATA_STATE_RECV 4
|
|
+
|
|
+#define CAPLENGTH_MASK (0xff)
|
|
+#define DCCPARAMS_DEN_MASK (0x1f)
|
|
+
|
|
+#define HCSPARAMS_PPC (0x10)
|
|
+
|
|
+/* Frame Index Register Bit Masks */
|
|
+#define USB_FRINDEX_MASKS 0x3fff
|
|
+
|
|
+/* Command Register Bit Masks */
|
|
+#define USBCMD_RUN_STOP (0x00000001)
|
|
+#define USBCMD_CTRL_RESET (0x00000002)
|
|
+#define USBCMD_SETUP_TRIPWIRE_SET (0x00002000)
|
|
+#define USBCMD_SETUP_TRIPWIRE_CLEAR (~USBCMD_SETUP_TRIPWIRE_SET)
|
|
+
|
|
+#define USBCMD_ATDTW_TRIPWIRE_SET (0x00004000)
|
|
+#define USBCMD_ATDTW_TRIPWIRE_CLEAR (~USBCMD_ATDTW_TRIPWIRE_SET)
|
|
+
|
|
+/* bit 15,3,2 are for frame list size */
|
|
+#define USBCMD_FRAME_SIZE_1024 (0x00000000) /* 000 */
|
|
+#define USBCMD_FRAME_SIZE_512 (0x00000004) /* 001 */
|
|
+#define USBCMD_FRAME_SIZE_256 (0x00000008) /* 010 */
|
|
+#define USBCMD_FRAME_SIZE_128 (0x0000000C) /* 011 */
|
|
+#define USBCMD_FRAME_SIZE_64 (0x00008000) /* 100 */
|
|
+#define USBCMD_FRAME_SIZE_32 (0x00008004) /* 101 */
|
|
+#define USBCMD_FRAME_SIZE_16 (0x00008008) /* 110 */
|
|
+#define USBCMD_FRAME_SIZE_8 (0x0000800C) /* 111 */
|
|
+
|
|
+#define USBCMD_INT_THREAD_CTRL8 (0x00080000)
|
|
+
|
|
+#define EPCTRL_TX_ALL_MASK (0xFFFF0000)
|
|
+#define EPCTRL_RX_ALL_MASK (0x0000FFFF)
|
|
+
|
|
+#define EPCTRL_TX_DATA_TOGGLE_RST (0x00400000)
|
|
+#define EPCTRL_TX_EP_STALL (0x00010000)
|
|
+#define EPCTRL_RX_EP_STALL (0x00000001)
|
|
+#define EPCTRL_RX_DATA_TOGGLE_RST (0x00000040)
|
|
+#define EPCTRL_RX_ENABLE (0x00000080)
|
|
+#define EPCTRL_TX_ENABLE (0x00800000)
|
|
+#define EPCTRL_CONTROL (0x00000000)
|
|
+#define EPCTRL_ISOCHRONOUS (0x00040000)
|
|
+#define EPCTRL_BULK (0x00080000)
|
|
+#define EPCTRL_INT (0x000C0000)
|
|
+#define EPCTRL_TX_TYPE (0x000C0000)
|
|
+#define EPCTRL_RX_TYPE (0x0000000C)
|
|
+#define EPCTRL_DATA_TOGGLE_INHIBIT (0x00000020)
|
|
+#define EPCTRL_TX_EP_TYPE_SHIFT (18)
|
|
+#define EPCTRL_RX_EP_TYPE_SHIFT (2)
|
|
+
|
|
+#define EPCOMPLETE_MAX_ENDPOINTS (16)
|
|
+
|
|
+/* endpoint list address bit masks */
|
|
+#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
|
|
+
|
|
+#define PORTSCX_W1C_BITS 0x2a
|
|
+#define PORTSCX_PORT_DM (1 << 10)
|
|
+#define PORTSCX_PORT_DP (1 << 11)
|
|
+#define PORTSCX_PORT_RESET 0x00000100
|
|
+#define PORTSCX_PORT_POWER 0x00001000
|
|
+#define PORTSCX_FORCE_FULL_SPEED_CONNECT 0x01000000
|
|
+#define PORTSCX_PAR_XCVR_SELECT 0xC0000000
|
|
+#define PORTSCX_PORT_FORCE_RESUME 0x00000040
|
|
+#define PORTSCX_PORT_SUSPEND 0x00000080
|
|
+#define PORTSCX_PORT_SPEED_FULL 0x00000000
|
|
+#define PORTSCX_PORT_SPEED_LOW 0x04000000
|
|
+#define PORTSCX_PORT_SPEED_HIGH 0x08000000
|
|
+#define PORTSCX_PORT_SPEED_MASK 0x0C000000
|
|
+#define PORTSCX_LINE_STATUS_MASK 0x00000C00
|
|
+
|
|
+/* USB MODE Register Bit Masks */
|
|
+#define USBMODE_CTRL_MODE_IDLE 0x00000000
|
|
+#define USBMODE_CTRL_MODE_DEVICE 0x00000002
|
|
+#define USBMODE_CTRL_MODE_HOST 0x00000003
|
|
+#define USBMODE_CTRL_MODE_RSV 0x00000001
|
|
+#define USBMODE_SETUP_LOCK_OFF 0x00000008
|
|
+#define USBMODE_STREAM_DISABLE 0x00000010
|
|
+
|
|
+/* USB STS Register Bit Masks */
|
|
+#define USBSTS_INT 0x00000001
|
|
+#define USBSTS_ERR 0x00000002
|
|
+#define USBSTS_PORT_CHANGE 0x00000004
|
|
+#define USBSTS_FRM_LST_ROLL 0x00000008
|
|
+#define USBSTS_SYS_ERR 0x00000010
|
|
+#define USBSTS_IAA 0x00000020
|
|
+#define USBSTS_RESET 0x00000040
|
|
+#define USBSTS_SOF 0x00000080
|
|
+#define USBSTS_SUSPEND 0x00000100
|
|
+#define USBSTS_HC_HALTED 0x00001000
|
|
+#define USBSTS_RCL 0x00002000
|
|
+#define USBSTS_PERIODIC_SCHEDULE 0x00004000
|
|
+#define USBSTS_ASYNC_SCHEDULE 0x00008000
|
|
+
|
|
+
|
|
+/* Interrupt Enable Register Bit Masks */
|
|
+#define USBINTR_INT_EN (0x00000001)
|
|
+#define USBINTR_ERR_INT_EN (0x00000002)
|
|
+#define USBINTR_PORT_CHANGE_DETECT_EN (0x00000004)
|
|
+#define USBINTR_SYS_ERR (0x00000010)
|
|
+
|
|
+#define USBINTR_ASYNC_ADV_AAE (0x00000020)
|
|
+#define USBINTR_ASYNC_ADV_AAE_ENABLE (0x00000020)
|
|
+#define USBINTR_ASYNC_ADV_AAE_DISABLE (0xFFFFFFDF)
|
|
+
|
|
+#define USBINTR_RESET_EN (0x00000040)
|
|
+#define USBINTR_SOF_UFRAME_EN (0x00000080)
|
|
+#define USBINTR_DEVICE_SUSPEND (0x00000100)
|
|
+
|
|
+#define USB_DEVICE_ADDRESS_MASK (0xfe000000)
|
|
+#define USB_DEVICE_ADDRESS_BIT_SHIFT (25)
|
|
+#define USB_DEVICE_ADDRESS_USBADR (0x1 << 24)
|
|
+
|
|
+struct mv_cap_regs {
|
|
+ u32 caplength_hciversion;
|
|
+ u32 hcsparams; /* HC structural parameters */
|
|
+ u32 hccparams; /* HC Capability Parameters*/
|
|
+ u32 reserved[5];
|
|
+ u32 dciversion; /* DC version number and reserved 16 bits */
|
|
+ u32 dccparams; /* DC Capability Parameters */
|
|
+};
|
|
+
|
|
+struct mv_op_regs {
|
|
+ u32 usbcmd; /* Command register */
|
|
+ u32 usbsts; /* Status register */
|
|
+ u32 usbintr; /* Interrupt enable */
|
|
+ u32 frindex; /* Frame index */
|
|
+ u32 reserved1[1];
|
|
+ u32 deviceaddr; /* Device Address */
|
|
+ u32 eplistaddr; /* Endpoint List Address */
|
|
+ u32 ttctrl; /* HOST TT status and control */
|
|
+ u32 burstsize; /* Programmable Burst Size */
|
|
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
|
|
+ u32 reserved[4];
|
|
+ u32 epnak; /* Endpoint NAK */
|
|
+ u32 epnaken; /* Endpoint NAK Enable */
|
|
+ u32 configflag; /* Configured Flag register */
|
|
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
|
|
+ u32 otgsc;
|
|
+ u32 usbmode; /* USB Host/Device mode */
|
|
+ u32 epsetupstat; /* Endpoint Setup Status */
|
|
+ u32 epprime; /* Endpoint Initialize */
|
|
+ u32 epflush; /* Endpoint De-initialize */
|
|
+ u32 epstatus; /* Endpoint Status */
|
|
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
|
|
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
|
|
+};
|
|
+
|
|
+struct mv_udc {
|
|
+ struct usb_gadget gadget;
|
|
+ struct usb_gadget_driver *driver;
|
|
+ spinlock_t lock;
|
|
+ struct completion *done;
|
|
+ struct platform_device *dev;
|
|
+ int irq;
|
|
+
|
|
+ struct mv_cap_regs __iomem *cap_regs;
|
|
+ struct mv_op_regs __iomem *op_regs;
|
|
+ unsigned int max_eps;
|
|
+ struct mv_dqh *ep_dqh;
|
|
+ size_t ep_dqh_size;
|
|
+ dma_addr_t ep_dqh_dma;
|
|
+
|
|
+ struct dma_pool *dtd_pool;
|
|
+ struct mv_ep *eps;
|
|
+
|
|
+ struct mv_dtd *dtd_head;
|
|
+ struct mv_dtd *dtd_tail;
|
|
+ unsigned int dtd_entries;
|
|
+
|
|
+ struct mv_req *status_req;
|
|
+ struct usb_ctrlrequest local_setup_buff;
|
|
+
|
|
+ unsigned int resume_state; /* USB state to resume */
|
|
+ unsigned int usb_state; /* USB current state */
|
|
+ unsigned int ep0_state; /* Endpoint zero state */
|
|
+ unsigned int ep0_dir;
|
|
+
|
|
+ unsigned int dev_addr;
|
|
+ unsigned int test_mode;
|
|
+
|
|
+ int errors;
|
|
+
|
|
+ unsigned int softconnect;
|
|
+ unsigned int vbus_active;
|
|
+ unsigned int remote_wakeup;
|
|
+ unsigned int selfpowered;
|
|
+ unsigned int softconnected;
|
|
+ unsigned int force_fs;
|
|
+ unsigned int clock_gating;
|
|
+ unsigned int active;
|
|
+ unsigned int stopped; /* stop bit is setted */
|
|
+
|
|
+ struct work_struct vbus_work;
|
|
+ struct workqueue_struct *qwork;
|
|
+
|
|
+ unsigned int power;
|
|
+ unsigned int charger_type;
|
|
+ struct delayed_work delayed_charger_work;
|
|
+
|
|
+ struct work_struct event_work;
|
|
+
|
|
+ struct usb_phy *phy;
|
|
+ struct usb_phy *transceiver;
|
|
+
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+
|
|
+ struct notifier_block notifier;
|
|
+
|
|
+ /* some SOC has mutiple clock sources for USB*/
|
|
+ struct clk *clk;
|
|
+
|
|
+ /* reset control for USB */
|
|
+ struct reset_control *reset;
|
|
+
|
|
+ /* power supply used to detect charger type */
|
|
+ struct power_supply udc_psy;
|
|
+
|
|
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
|
|
+ struct mv_udc_stats stats;
|
|
+#endif
|
|
+
|
|
+ /* for vbus detection */
|
|
+ struct extcon_specific_cable_nb vbus_dev;
|
|
+ struct extcon_dev *extcon;
|
|
+};
|
|
+
|
|
+/* endpoint data structure */
|
|
+struct mv_ep {
|
|
+ struct usb_ep ep;
|
|
+ struct mv_udc *udc;
|
|
+ struct list_head queue;
|
|
+ struct mv_dqh *dqh;
|
|
+ u32 direction;
|
|
+ char name[14];
|
|
+ unsigned stopped:1,
|
|
+ wedge:1,
|
|
+ ep_type:2,
|
|
+ ep_num:8;
|
|
+};
|
|
+
|
|
+/* request data structure */
|
|
+struct mv_req {
|
|
+ struct usb_request req;
|
|
+ struct mv_dtd *dtd, *head, *tail;
|
|
+ struct mv_ep *ep;
|
|
+ struct list_head queue;
|
|
+ unsigned int test_mode;
|
|
+ unsigned dtd_count;
|
|
+ unsigned mapped:1;
|
|
+};
|
|
+
|
|
+#define EP_QUEUE_HEAD_MULT_POS 30
|
|
+#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
|
|
+#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
|
|
+#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
|
|
+#define EP_QUEUE_HEAD_IOS 0x00008000
|
|
+#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
|
|
+#define EP_QUEUE_HEAD_IOC 0x00008000
|
|
+#define EP_QUEUE_HEAD_MULTO 0x00000C00
|
|
+#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
|
|
+#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
|
|
+#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
|
|
+#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
|
|
+#define EP_QUEUE_FRINDEX_MASK 0x000007FF
|
|
+#define EP_MAX_LENGTH_TRANSFER 0x4000
|
|
+
|
|
+struct mv_dqh {
|
|
+ /* Bits 16..26 Bit 15 is Interrupt On Setup */
|
|
+ u32 max_packet_length;
|
|
+ u32 curr_dtd_ptr; /* Current dTD Pointer */
|
|
+ u32 next_dtd_ptr; /* Next dTD Pointer */
|
|
+ /* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
|
|
+ u32 size_ioc_int_sts;
|
|
+ u32 buff_ptr0; /* Buffer pointer Page 0 (12-31) */
|
|
+ u32 buff_ptr1; /* Buffer pointer Page 1 (12-31) */
|
|
+ u32 buff_ptr2; /* Buffer pointer Page 2 (12-31) */
|
|
+ u32 buff_ptr3; /* Buffer pointer Page 3 (12-31) */
|
|
+ u32 buff_ptr4; /* Buffer pointer Page 4 (12-31) */
|
|
+ u32 reserved1;
|
|
+ /* 8 bytes of setup data that follows the Setup PID */
|
|
+ u8 setup_buffer[8];
|
|
+ u32 reserved2[4];
|
|
+};
|
|
+
|
|
+
|
|
+#define DTD_NEXT_TERMINATE (0x00000001)
|
|
+#define DTD_IOC (0x00008000)
|
|
+#define DTD_STATUS_ACTIVE (0x00000080)
|
|
+#define DTD_STATUS_HALTED (0x00000040)
|
|
+#define DTD_STATUS_DATA_BUFF_ERR (0x00000020)
|
|
+#define DTD_STATUS_TRANSACTION_ERR (0x00000008)
|
|
+#define DTD_RESERVED_FIELDS (0x00007F00)
|
|
+#define DTD_ERROR_MASK (0x68)
|
|
+#define DTD_ADDR_MASK (0xFFFFFFE0)
|
|
+#define DTD_PACKET_SIZE 0x7FFF0000
|
|
+#define DTD_LENGTH_BIT_POS (16)
|
|
+
|
|
+struct mv_dtd {
|
|
+ u32 dtd_next;
|
|
+ u32 size_ioc_sts;
|
|
+ u32 buff_ptr0; /* Buffer pointer Page 0 */
|
|
+ u32 buff_ptr1; /* Buffer pointer Page 1 */
|
|
+ u32 buff_ptr2; /* Buffer pointer Page 2 */
|
|
+ u32 buff_ptr3; /* Buffer pointer Page 3 */
|
|
+ u32 buff_ptr4; /* Buffer pointer Page 4 */
|
|
+ u32 scratch_ptr;
|
|
+ /* 32 bytes */
|
|
+ dma_addr_t td_dma; /* dma address for this td */
|
|
+ struct mv_dtd *next_dtd_virt;
|
|
+};
|
|
+
|
|
+uint32_t UDC_READ_REG32(uint32_t volatile *reg)
|
|
+{
|
|
+ return readl(reg);
|
|
+}
|
|
+
|
|
+void DWC_WRITE_REG32(uint32_t volatile *reg, uint32_t value)
|
|
+{
|
|
+ writel(value, reg);
|
|
+}
|
|
+
|
|
+void UDC_MODIFY_REG32(uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask)
|
|
+{
|
|
+ writel((readl(reg) & ~clear_mask) | set_mask, reg);
|
|
+}
|
|
+#endif
|
|
diff --git a/drivers/usb/gadget/udc/k1x_udc_core.c b/drivers/usb/gadget/udc/k1x_udc_core.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/gadget/udc/k1x_udc_core.c
|
|
@@ -0,0 +1,2690 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * UDC support for Spacemit k1x SoCs
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/dmapool.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/timer.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/moduleparam.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/usb/ch9.h>
|
|
+#include <linux/usb/gadget.h>
|
|
+#include <linux/usb/otg.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/platform_data/k1x_ci_usb.h>
|
|
+#include <asm/unaligned.h>
|
|
+#include <dt-bindings/usb/k1x_ci_usb.h>
|
|
+#include <linux/power_supply.h>
|
|
+#include <linux/reset.h>
|
|
+#include <linux/extcon.h>
|
|
+#include <linux/extcon-provider.h>
|
|
+
|
|
+#include "k1x_ci_udc.h"
|
|
+
|
|
+#define DRIVER_DESC "K1x USB Device Controller driver"
|
|
+
|
|
+#define ep_dir(ep) (((ep)->ep_num == 0) ? \
|
|
+ ((ep)->udc->ep0_dir) : ((ep)->direction))
|
|
+
|
|
+/* timeout value -- usec */
|
|
+#define RESET_TIMEOUT 10000
|
|
+#define FLUSH_TIMEOUT 10000
|
|
+#define EPSTATUS_TIMEOUT 10000
|
|
+#define PRIME_TIMEOUT 10000
|
|
+#define READSAFE_TIMEOUT 1000
|
|
+#define MAX_EPPRIME_TIMES 100000
|
|
+
|
|
+#define LOOPS_USEC_SHIFT 1
|
|
+#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
|
|
+#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
|
|
+#define ENUMERATION_DELAY (2 * HZ)
|
|
+
|
|
+static DECLARE_COMPLETION(release_done);
|
|
+
|
|
+static const char driver_name[] = "mv_udc";
|
|
+static const char driver_desc[] = DRIVER_DESC;
|
|
+
|
|
+/* controller device global variable */
|
|
+static struct mv_udc *the_controller;
|
|
+
|
|
+static int mv_udc_enable(struct mv_udc *udc);
|
|
+static void mv_udc_disable(struct mv_udc *udc);
|
|
+
|
|
+static void nuke(struct mv_ep *ep, int status);
|
|
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
|
|
+
|
|
+/* for endpoint 0 operations */
|
|
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
|
|
+ .bLength = USB_DT_ENDPOINT_SIZE,
|
|
+ .bDescriptorType = USB_DT_ENDPOINT,
|
|
+ .bEndpointAddress = 0,
|
|
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
|
|
+ .wMaxPacketSize = EP0_MAX_PKT_SIZE,
|
|
+};
|
|
+
|
|
+static void ep0_reset(struct mv_udc *udc)
|
|
+{
|
|
+ struct mv_ep *ep;
|
|
+ u32 epctrlx;
|
|
+ int i = 0;
|
|
+
|
|
+ /* ep0 in and out */
|
|
+ for (i = 0; i < 2; i++) {
|
|
+ ep = &udc->eps[i];
|
|
+ ep->udc = udc;
|
|
+
|
|
+ /* ep0 dQH */
|
|
+ ep->dqh = &udc->ep_dqh[i];
|
|
+
|
|
+ /* configure ep0 endpoint capabilities in dQH */
|
|
+ ep->dqh->max_packet_length =
|
|
+ (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
|
|
+ | EP_QUEUE_HEAD_IOS | EP_QUEUE_HEAD_ZLT_SEL;
|
|
+
|
|
+ ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
|
|
+
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[0]);
|
|
+ if (i) { /* TX */
|
|
+ epctrlx |= EPCTRL_TX_ENABLE
|
|
+ | (USB_ENDPOINT_XFER_CONTROL
|
|
+ << EPCTRL_TX_EP_TYPE_SHIFT);
|
|
+
|
|
+ } else { /* RX */
|
|
+ epctrlx |= EPCTRL_RX_ENABLE
|
|
+ | (USB_ENDPOINT_XFER_CONTROL
|
|
+ << EPCTRL_RX_EP_TYPE_SHIFT);
|
|
+ }
|
|
+
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[0]);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* protocol ep0 stall, will automatically be cleared on new transaction */
|
|
+static void ep0_stall(struct mv_udc *udc)
|
|
+{
|
|
+ u32 epctrlx;
|
|
+
|
|
+ /* set TX and RX to stall */
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[0]);
|
|
+ epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[0]);
|
|
+
|
|
+ /* update ep0 state */
|
|
+ udc->ep0_state = WAIT_FOR_SETUP;
|
|
+ udc->ep0_dir = EP_DIR_OUT;
|
|
+}
|
|
+
|
|
+static int hw_ep_prime(struct mv_udc *udc, u32 bit_pos)
|
|
+{
|
|
+ u32 prime_times = 0;
|
|
+
|
|
+ writel(bit_pos, &udc->op_regs->epprime);
|
|
+
|
|
+ while (readl(&udc->op_regs->epprime) & bit_pos) {
|
|
+ cpu_relax();
|
|
+ prime_times++;
|
|
+ if (prime_times > MAX_EPPRIME_TIMES) {
|
|
+ dev_err(&udc->dev->dev, "epprime out of time\n");
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int process_ep_req(struct mv_udc *udc, int index,
|
|
+ struct mv_req *curr_req)
|
|
+{
|
|
+ struct mv_dtd *curr_dtd;
|
|
+ struct mv_dqh *curr_dqh;
|
|
+ int actual, remaining_length;
|
|
+ int i, direction;
|
|
+ int retval = 0;
|
|
+ u32 errors;
|
|
+ u32 bit_pos;
|
|
+
|
|
+ curr_dqh = &udc->ep_dqh[index];
|
|
+ direction = index % 2;
|
|
+
|
|
+ curr_dtd = curr_req->head;
|
|
+ actual = curr_req->req.length;
|
|
+
|
|
+ for (i = 0; i < curr_req->dtd_count; i++) {
|
|
+ if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
|
|
+ dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
|
|
+ udc->eps[index].name);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
|
|
+ if (!errors) {
|
|
+ remaining_length =
|
|
+ (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
|
|
+ >> DTD_LENGTH_BIT_POS;
|
|
+ actual -= remaining_length;
|
|
+
|
|
+ if (remaining_length) {
|
|
+ if (direction) {
|
|
+ dev_dbg(&udc->dev->dev,
|
|
+ "TX dTD remains data\n");
|
|
+ retval = -EPROTO;
|
|
+ break;
|
|
+ } else
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ dev_info(&udc->dev->dev,
|
|
+ "complete_tr error: ep=%d %s: error = 0x%x\n",
|
|
+ index >> 1, direction ? "SEND" : "RECV",
|
|
+ errors);
|
|
+ if (errors & DTD_STATUS_HALTED) {
|
|
+ /* Clear the errors and Halt condition */
|
|
+ curr_dqh->size_ioc_int_sts &= ~errors;
|
|
+ retval = -EPIPE;
|
|
+ } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
|
|
+ retval = -EPROTO;
|
|
+ } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
|
|
+ retval = -EILSEQ;
|
|
+ }
|
|
+ }
|
|
+ if (i != curr_req->dtd_count - 1)
|
|
+ curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
|
|
+ }
|
|
+ if (retval)
|
|
+ return retval;
|
|
+
|
|
+ if (direction == EP_DIR_OUT)
|
|
+ bit_pos = 1 << curr_req->ep->ep_num;
|
|
+ else
|
|
+ bit_pos = 1 << (16 + curr_req->ep->ep_num);
|
|
+
|
|
+ while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
|
|
+ if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
|
|
+ while (readl(&udc->op_regs->epstatus) & bit_pos)
|
|
+ udelay(1);
|
|
+ break;
|
|
+ } else {
|
|
+ if (!(readl(&udc->op_regs->epstatus) & bit_pos)) {
|
|
+ /* The DMA engine thinks there is no more dTD */
|
|
+ curr_dqh->next_dtd_ptr = curr_dtd->dtd_next
|
|
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+
|
|
+ /* clear active and halt bit */
|
|
+ curr_dqh->size_ioc_int_sts &=
|
|
+ ~(DTD_STATUS_ACTIVE
|
|
+ | DTD_STATUS_HALTED);
|
|
+
|
|
+ /* Do prime again */
|
|
+ wmb();
|
|
+
|
|
+ hw_ep_prime(udc, bit_pos);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ udelay(1);
|
|
+ }
|
|
+
|
|
+ curr_req->req.actual = actual;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * done() - retire a request; caller blocked irqs
|
|
+ * @status : request status to be set, only works when
|
|
+ * request is still in progress.
|
|
+ */
|
|
+static int done(struct mv_ep *ep, struct mv_req *req, int status)
|
|
+ __releases(&ep->udc->lock)
|
|
+ __acquires(&ep->udc->lock)
|
|
+{
|
|
+ struct mv_udc *udc = NULL;
|
|
+ unsigned char stopped = ep->stopped;
|
|
+ struct mv_dtd *curr_td, *next_td;
|
|
+ int j;
|
|
+
|
|
+ udc = (struct mv_udc *)ep->udc;
|
|
+
|
|
+ /* Removed the req from mv_ep->queue */
|
|
+ list_del_init(&req->queue);
|
|
+
|
|
+ if (req->req.dma == DMA_ADDR_INVALID && req->mapped == 0) {
|
|
+ dev_info(&udc->dev->dev, "%s request %p already unmapped",
|
|
+ ep->name, req);
|
|
+ WARN_ON_ONCE(1);
|
|
+ return -ESHUTDOWN;
|
|
+ }
|
|
+
|
|
+ /* req.status should be set as -EINPROGRESS in ep_queue() */
|
|
+ if (req->req.status == -EINPROGRESS)
|
|
+ req->req.status = status;
|
|
+ else
|
|
+ status = req->req.status;
|
|
+
|
|
+ /* Free dtd for the request */
|
|
+ next_td = req->head;
|
|
+ for (j = 0; j < req->dtd_count; j++) {
|
|
+ curr_td = next_td;
|
|
+ if (j != req->dtd_count - 1)
|
|
+ next_td = curr_td->next_dtd_virt;
|
|
+ dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
|
|
+ }
|
|
+
|
|
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
|
|
+ req->req.dma = DMA_ADDR_INVALID;
|
|
+ req->mapped = 0;
|
|
+
|
|
+ if (status && (status != -ESHUTDOWN))
|
|
+ dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
|
|
+ ep->ep.name, &req->req, status,
|
|
+ req->req.actual, req->req.length);
|
|
+
|
|
+ ep->stopped = 1;
|
|
+
|
|
+ spin_unlock(&ep->udc->lock);
|
|
+
|
|
+ if (!(list_empty(&ep->queue))) {
|
|
+ struct mv_req *curr_req, *temp_req;
|
|
+ u32 bit_pos, direction;
|
|
+ struct mv_dqh *dqh;
|
|
+
|
|
+ direction = ep_dir(ep);
|
|
+ dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
|
|
+ bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
|
|
+
|
|
+ if ((readl(&udc->op_regs->epstatus) & bit_pos) || (readl(&udc->op_regs->epprime) & bit_pos))
|
|
+ goto skip_prime_again;
|
|
+
|
|
+ list_for_each_entry_safe(curr_req, temp_req, &ep->queue, queue)
|
|
+ if (curr_req->head->size_ioc_sts & DTD_STATUS_ACTIVE) {
|
|
+ dqh->next_dtd_ptr = curr_req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
|
|
+ wmb();
|
|
+ hw_ep_prime(udc, bit_pos);
|
|
+ pr_debug("done: prime ep again: ENDPTSTAT = 0x%x\n", readl(&udc->op_regs->epstatus) & bit_pos);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+skip_prime_again:
|
|
+ /*
|
|
+ * complete() is from gadget layer,
|
|
+ * eg fsg->bulk_in_complete()
|
|
+ */
|
|
+ if (req->req.complete)
|
|
+ req->req.complete(&ep->ep, &req->req);
|
|
+
|
|
+ spin_lock(&ep->udc->lock);
|
|
+ ep->stopped = stopped;
|
|
+
|
|
+ if (udc->active)
|
|
+ return 0;
|
|
+ else
|
|
+ return -ESHUTDOWN;
|
|
+}
|
|
+
|
|
+static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ struct mv_dqh *dqh;
|
|
+ struct mv_req *curr_req, *temp_req;
|
|
+ u32 find_missing_dtd = 0;
|
|
+ u32 bit_pos, direction;
|
|
+ u32 epstatus;
|
|
+ int retval = 0;
|
|
+
|
|
+ udc = ep->udc;
|
|
+ direction = ep_dir(ep);
|
|
+ dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
|
|
+ bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
|
|
+
|
|
+ /* check if the pipe is empty */
|
|
+ if (!(list_empty(&ep->queue))) {
|
|
+ struct mv_req *lastreq;
|
|
+ lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
|
|
+ lastreq->tail->dtd_next =
|
|
+ req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+
|
|
+ wmb();
|
|
+
|
|
+ if (readl(&udc->op_regs->epprime) & bit_pos)
|
|
+ goto done;
|
|
+
|
|
+ epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
|
|
+ if (epstatus)
|
|
+ goto done;
|
|
+
|
|
+ /* Check if there are missing dTD in the queue not primed */
|
|
+ list_for_each_entry_safe(curr_req, temp_req, &ep->queue, queue)
|
|
+ if (curr_req->head->size_ioc_sts & DTD_STATUS_ACTIVE) {
|
|
+ pr_info("There are missing dTD need to be primed!\n");
|
|
+ find_missing_dtd = 1;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Write dQH next pointer and terminate bit to 0 */
|
|
+ if (unlikely(find_missing_dtd))
|
|
+ dqh->next_dtd_ptr = curr_req->head->td_dma
|
|
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+ else
|
|
+ dqh->next_dtd_ptr = req->head->td_dma
|
|
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+
|
|
+ /* clear active and halt bit, in case set from a previous error */
|
|
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
|
|
+
|
|
+ /* Ensure that updates to the QH will occure before priming. */
|
|
+ wmb();
|
|
+
|
|
+ /* Prime the Endpoint */
|
|
+ hw_ep_prime(udc, bit_pos);
|
|
+done:
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
|
|
+ dma_addr_t *dma, int *is_last)
|
|
+{
|
|
+ struct mv_dtd *dtd;
|
|
+ struct mv_udc *udc;
|
|
+ struct mv_dqh *dqh;
|
|
+ u32 temp, mult = 0;
|
|
+
|
|
+ /* how big will this transfer be? */
|
|
+ if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
|
|
+ dqh = req->ep->dqh;
|
|
+ mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
|
|
+ & 0x3;
|
|
+ *length = min(req->req.length - req->req.actual,
|
|
+ (unsigned)(mult * req->ep->ep.maxpacket));
|
|
+ } else
|
|
+ *length = min(req->req.length - req->req.actual,
|
|
+ (unsigned)EP_MAX_LENGTH_TRANSFER);
|
|
+
|
|
+ udc = req->ep->udc;
|
|
+
|
|
+ /*
|
|
+ * Be careful that no _GFP_HIGHMEM is set,
|
|
+ * or we can not use dma_to_virt
|
|
+ */
|
|
+ dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
|
|
+ if (dtd == NULL)
|
|
+ return dtd;
|
|
+
|
|
+ dtd->td_dma = *dma;
|
|
+ /* initialize buffer page pointers */
|
|
+ temp = (u32)(req->req.dma + req->req.actual);
|
|
+ dtd->buff_ptr0 = cpu_to_le32(temp);
|
|
+ temp &= ~0xFFF;
|
|
+ dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
|
|
+ dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
|
|
+ dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
|
|
+ dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
|
|
+
|
|
+ req->req.actual += *length;
|
|
+
|
|
+ /* zlp is needed if req->req.zero is set */
|
|
+ if (req->req.zero) {
|
|
+ if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
|
|
+ *is_last = 1;
|
|
+ else
|
|
+ *is_last = 0;
|
|
+ } else if (req->req.length == req->req.actual)
|
|
+ *is_last = 1;
|
|
+ else
|
|
+ *is_last = 0;
|
|
+
|
|
+ /* Fill in the transfer size; set active bit */
|
|
+ temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
|
|
+
|
|
+ /* Enable interrupt for the last dtd of a request */
|
|
+ if (*is_last && !req->req.no_interrupt)
|
|
+ temp |= DTD_IOC;
|
|
+
|
|
+ temp |= mult << 10;
|
|
+
|
|
+ dtd->size_ioc_sts = temp;
|
|
+
|
|
+ mb();
|
|
+
|
|
+ return dtd;
|
|
+}
|
|
+
|
|
+/* generate dTD linked list for a request */
|
|
+static int req_to_dtd(struct mv_req *req)
|
|
+{
|
|
+ unsigned count;
|
|
+ int is_last, is_first = 1;
|
|
+ struct mv_dtd *dtd, *last_dtd = NULL;
|
|
+ dma_addr_t dma;
|
|
+
|
|
+ do {
|
|
+ dtd = build_dtd(req, &count, &dma, &is_last);
|
|
+ if (dtd == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (is_first) {
|
|
+ is_first = 0;
|
|
+ req->head = dtd;
|
|
+ } else {
|
|
+ last_dtd->dtd_next = dma;
|
|
+ last_dtd->next_dtd_virt = dtd;
|
|
+ }
|
|
+ last_dtd = dtd;
|
|
+ req->dtd_count++;
|
|
+ } while (!is_last);
|
|
+
|
|
+ /* set terminate bit to 1 for the last dTD */
|
|
+ dtd->dtd_next = DTD_NEXT_TERMINATE;
|
|
+
|
|
+ req->tail = dtd;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_ep_enable(struct usb_ep *_ep,
|
|
+ const struct usb_endpoint_descriptor *desc)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ struct mv_ep *ep;
|
|
+ struct mv_dqh *dqh;
|
|
+ u16 max = 0;
|
|
+ u32 bit_pos, epctrlx, direction;
|
|
+ const unsigned char zlt = 1;
|
|
+ unsigned char ios, mult;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ ep = container_of(_ep, struct mv_ep, ep);
|
|
+ udc = ep->udc;
|
|
+
|
|
+ if (!_ep || !desc
|
|
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ direction = ep_dir(ep);
|
|
+ max = usb_endpoint_maxp(desc);
|
|
+ pr_debug("mv_ep_enable: %d MPS= 0x%x \n", ep->ep_num, max);
|
|
+
|
|
+ /*
|
|
+ * disable HW zero length termination select
|
|
+ * driver handles zero length packet through req->req.zero
|
|
+ */
|
|
+ bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ if (!udc->active) {
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return -ESHUTDOWN;
|
|
+ }
|
|
+
|
|
+ /* Check if the Endpoint is Primed */
|
|
+ if ((readl(&udc->op_regs->epprime) & bit_pos)
|
|
+ || (readl(&udc->op_regs->epstatus) & bit_pos)) {
|
|
+ dev_info(&udc->dev->dev,
|
|
+ "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
|
|
+ " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
|
|
+ (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
|
|
+ (unsigned)readl(&udc->op_regs->epprime),
|
|
+ (unsigned)readl(&udc->op_regs->epstatus),
|
|
+ (unsigned)bit_pos);
|
|
+ goto en_done;
|
|
+ }
|
|
+ /* Set the max packet length, interrupt on Setup and Mult fields */
|
|
+ ios = 0;
|
|
+ mult = 0;
|
|
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
|
|
+ case USB_ENDPOINT_XFER_BULK:
|
|
+ case USB_ENDPOINT_XFER_INT:
|
|
+ break;
|
|
+ case USB_ENDPOINT_XFER_CONTROL:
|
|
+ ios = 1;
|
|
+ break;
|
|
+ case USB_ENDPOINT_XFER_ISOC:
|
|
+ /* Calculate transactions needed for high bandwidth iso */
|
|
+ mult = (unsigned char)(1 + ((max >> 11) & 0x03));
|
|
+ max = max & 0x7ff; /* bit 0~10 */
|
|
+ /* 3 transactions at most */
|
|
+ if (mult > 3)
|
|
+ goto en_done;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Get the endpoint queue head address */
|
|
+ dqh = ep->dqh;
|
|
+ dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
|
|
+ | (mult << EP_QUEUE_HEAD_MULT_POS)
|
|
+ | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
|
|
+ | (ios ? EP_QUEUE_HEAD_IOS : 0);
|
|
+ dqh->next_dtd_ptr = 1;
|
|
+ dqh->size_ioc_int_sts = 0;
|
|
+
|
|
+ ep->ep.maxpacket = max;
|
|
+ ep->ep.desc = desc;
|
|
+ ep->stopped = 0;
|
|
+
|
|
+ /* Enable the endpoint for Rx or Tx and set the endpoint type */
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ if (direction == EP_DIR_IN) {
|
|
+ epctrlx &= ~EPCTRL_TX_ALL_MASK;
|
|
+ epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
|
|
+ | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
|
|
+ << EPCTRL_TX_EP_TYPE_SHIFT);
|
|
+ } else {
|
|
+ epctrlx &= ~EPCTRL_RX_ALL_MASK;
|
|
+ epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
|
|
+ | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
|
|
+ << EPCTRL_RX_EP_TYPE_SHIFT);
|
|
+ }
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+
|
|
+ /*
|
|
+ * Implement Guideline (GL# USB-7) The unused endpoint type must
|
|
+ * be programmed to bulk.
|
|
+ */
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
|
|
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
|
|
+ << EPCTRL_RX_EP_TYPE_SHIFT);
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ }
|
|
+
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
|
|
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
|
|
+ << EPCTRL_TX_EP_TYPE_SHIFT);
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+
|
|
+ return 0;
|
|
+en_done:
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static int mv_ep_disable(struct usb_ep *_ep)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ struct mv_ep *ep;
|
|
+ struct mv_dqh *dqh;
|
|
+ u32 epctrlx, direction;
|
|
+ unsigned long flags;
|
|
+ u32 active;
|
|
+
|
|
+ ep = container_of(_ep, struct mv_ep, ep);
|
|
+ if ((_ep == NULL) || !ep->ep.desc)
|
|
+ return -EINVAL;
|
|
+
|
|
+ udc = ep->udc;
|
|
+
|
|
+#if 0
|
|
+ if (!udc->vbus_active) {
|
|
+ dev_dbg(&udc->dev->dev,
|
|
+ "usb already plug out!\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /* Get the endpoint queue head address */
|
|
+ dqh = ep->dqh;
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ active = udc->active;
|
|
+ if (!active)
|
|
+ mv_udc_enable(udc);
|
|
+
|
|
+ direction = ep_dir(ep);
|
|
+
|
|
+ /* Reset the max packet length and the interrupt on Setup */
|
|
+ dqh->max_packet_length = 0;
|
|
+
|
|
+ /* Disable the endpoint for Rx or Tx and reset the endpoint type */
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ epctrlx &= ~((direction == EP_DIR_IN)
|
|
+ ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
|
|
+ : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+
|
|
+ pr_debug("mv_ep_disable: %d \n", ep->ep_num);
|
|
+
|
|
+ /* nuke all pending requests (does flush) */
|
|
+ nuke(ep, -ESHUTDOWN);
|
|
+
|
|
+ ep->ep.desc = NULL;
|
|
+ ep->stopped = 1;
|
|
+
|
|
+ if (!active)
|
|
+ mv_udc_disable(udc);
|
|
+
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct usb_request *
|
|
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
|
|
+{
|
|
+ struct mv_req *req = NULL;
|
|
+
|
|
+ req = kzalloc(sizeof *req, gfp_flags);
|
|
+ if (!req)
|
|
+ return NULL;
|
|
+
|
|
+ req->req.dma = DMA_ADDR_INVALID;
|
|
+ INIT_LIST_HEAD(&req->queue);
|
|
+
|
|
+ return &req->req;
|
|
+}
|
|
+
|
|
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
|
|
+{
|
|
+ struct mv_req *req = NULL;
|
|
+
|
|
+ req = container_of(_req, struct mv_req, req);
|
|
+
|
|
+ if (_req)
|
|
+ kfree(req);
|
|
+}
|
|
+
|
|
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ u32 bit_pos, direction;
|
|
+ struct mv_ep *ep;
|
|
+ unsigned int loops;
|
|
+
|
|
+ if (!_ep)
|
|
+ return;
|
|
+
|
|
+ ep = container_of(_ep, struct mv_ep, ep);
|
|
+ if (!ep->ep.desc)
|
|
+ return;
|
|
+
|
|
+ udc = ep->udc;
|
|
+ if (!udc->active)
|
|
+ return;
|
|
+
|
|
+ direction = ep_dir(ep);
|
|
+
|
|
+ if (ep->ep_num == 0)
|
|
+ bit_pos = (1 << 16) | 1;
|
|
+ else if (direction == EP_DIR_OUT)
|
|
+ bit_pos = 1 << ep->ep_num;
|
|
+ else
|
|
+ bit_pos = 1 << (16 + ep->ep_num);
|
|
+
|
|
+ loops = LOOPS(EPSTATUS_TIMEOUT);
|
|
+ do {
|
|
+ unsigned int inter_loops;
|
|
+
|
|
+ if (loops == 0) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
|
|
+ (unsigned)readl(&udc->op_regs->epstatus),
|
|
+ (unsigned)bit_pos);
|
|
+ return;
|
|
+ }
|
|
+ /* Write 1 to the Flush register */
|
|
+ writel(bit_pos, &udc->op_regs->epflush);
|
|
+
|
|
+ /* Wait until flushing completed */
|
|
+ inter_loops = LOOPS(FLUSH_TIMEOUT);
|
|
+ while (readl(&udc->op_regs->epflush)) {
|
|
+ /*
|
|
+ * ENDPTFLUSH bit should be cleared to indicate this
|
|
+ * operation is complete
|
|
+ */
|
|
+ if (inter_loops == 0) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "TIMEOUT for ENDPTFLUSH=0x%x,"
|
|
+ "bit_pos=0x%x\n",
|
|
+ (unsigned)readl(&udc->op_regs->epflush),
|
|
+ (unsigned)bit_pos);
|
|
+ return;
|
|
+ }
|
|
+ inter_loops--;
|
|
+ udelay(LOOPS_USEC);
|
|
+ }
|
|
+ loops--;
|
|
+ } while (readl(&udc->op_regs->epstatus) & bit_pos);
|
|
+
|
|
+ writel(bit_pos, &udc->op_regs->epcomplete);
|
|
+}
|
|
+
|
|
+/* queues (submits) an I/O request to an endpoint */
|
|
+static int
|
|
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
|
|
+{
|
|
+ struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
|
|
+ struct mv_req *req = container_of(_req, struct mv_req, req);
|
|
+ struct mv_udc *udc = ep->udc;
|
|
+ unsigned long flags;
|
|
+ int retval;
|
|
+
|
|
+ /* catch various bogus parameters */
|
|
+ if (!_req || !req->req.complete || !req->req.buf
|
|
+ || !list_empty(&req->queue)) {
|
|
+ dev_err(&udc->dev->dev, "%s, bad params", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (unlikely(!_ep || !ep->ep.desc)) {
|
|
+ dev_err(&udc->dev->dev, "%s, bad ep", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ udc = ep->udc;
|
|
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ req->ep = ep;
|
|
+
|
|
+ /* map virtual address to hardware */
|
|
+ retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
|
|
+ if (retval)
|
|
+ return retval;
|
|
+ req->req.dma = _req->dma;
|
|
+ req->mapped = 1;
|
|
+
|
|
+ req->req.status = -EINPROGRESS;
|
|
+ req->req.actual = 0;
|
|
+ req->dtd_count = 0;
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ if (udc->stopped || !udc->active || !ep->ep.desc) {
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ dev_info(&udc->dev->dev,
|
|
+ "udc or %s is already disabled!\n", ep->name);
|
|
+ retval = -EINVAL;
|
|
+ goto err_unmap_dma;
|
|
+ }
|
|
+
|
|
+ /* build dtds and push them to device queue */
|
|
+ if (!req_to_dtd(req)) {
|
|
+ retval = queue_dtd(ep, req);
|
|
+ if (retval) {
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ dev_err(&udc->dev->dev, "Failed to queue dtd\n");
|
|
+ goto err_unmap_dma;
|
|
+ }
|
|
+ } else {
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_unmap_dma;
|
|
+ }
|
|
+
|
|
+ /* Update ep0 state */
|
|
+ if (ep->ep_num == 0)
|
|
+ udc->ep0_state = DATA_STATE_XMIT;
|
|
+
|
|
+ /* irq handler advances the queue */
|
|
+ list_add_tail(&req->queue, &ep->queue);
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_unmap_dma:
|
|
+ usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
|
|
+ req->req.dma = DMA_ADDR_INVALID;
|
|
+ req->mapped = 0;
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
|
|
+{
|
|
+ struct mv_dqh *dqh = ep->dqh;
|
|
+ u32 bit_pos;
|
|
+
|
|
+ /* Write dQH next pointer and terminate bit to 0 */
|
|
+ dqh->next_dtd_ptr = req->head->td_dma
|
|
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
|
|
+
|
|
+ /* clear active and halt bit, in case set from a previous error */
|
|
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
|
|
+
|
|
+ /* Ensure that updates to the QH will occure before priming. */
|
|
+ wmb();
|
|
+
|
|
+ bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
|
|
+
|
|
+ /* Prime the Endpoint */
|
|
+ hw_ep_prime(ep->udc, bit_pos);
|
|
+}
|
|
+
|
|
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
|
|
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
|
|
+{
|
|
+ struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
|
|
+ struct mv_req *req;
|
|
+ struct mv_udc *udc = ep->udc;
|
|
+ unsigned long flags;
|
|
+ int stopped, ret = 0;
|
|
+ u32 epctrlx;
|
|
+
|
|
+ if (!_ep || !_req)
|
|
+ return -EINVAL;
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+ if (!udc->active) {
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* make sure it's actually queued on this endpoint */
|
|
+ list_for_each_entry(req, &ep->queue, queue) {
|
|
+ if (&req->req == _req)
|
|
+ break;
|
|
+ }
|
|
+ if (&req->req != _req) {
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ stopped = ep->stopped;
|
|
+
|
|
+ /* Stop the ep before we deal with the queue */
|
|
+ ep->stopped = 1;
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ if (ep_dir(ep) == EP_DIR_IN)
|
|
+ epctrlx &= ~EPCTRL_TX_ENABLE;
|
|
+ else
|
|
+ epctrlx &= ~EPCTRL_RX_ENABLE;
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+
|
|
+ /* The request is in progress, or completed but not dequeued */
|
|
+ if (ep->queue.next == &req->queue) {
|
|
+ _req->status = -ECONNRESET;
|
|
+ mv_ep_fifo_flush(_ep); /* flush current transfer */
|
|
+
|
|
+ /* The request isn't the last request in this ep queue */
|
|
+ if (req->queue.next != &ep->queue) {
|
|
+ struct mv_req *next_req;
|
|
+
|
|
+ next_req = list_entry(req->queue.next,
|
|
+ struct mv_req, queue);
|
|
+
|
|
+ /* Point the QH to the first TD of next request */
|
|
+ mv_prime_ep(ep, next_req);
|
|
+ } else {
|
|
+ struct mv_dqh *qh;
|
|
+
|
|
+ qh = ep->dqh;
|
|
+ qh->next_dtd_ptr = 1;
|
|
+ qh->size_ioc_int_sts = 0;
|
|
+ }
|
|
+
|
|
+ /* The request hasn't been processed, patch up the TD chain */
|
|
+ } else {
|
|
+ struct mv_req *prev_req;
|
|
+
|
|
+ prev_req = list_entry(req->queue.prev, struct mv_req, queue);
|
|
+ writel(readl(&req->tail->dtd_next),
|
|
+ &prev_req->tail->dtd_next);
|
|
+
|
|
+ }
|
|
+
|
|
+ ret = done(ep, req, -ECONNRESET);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ /* Enable EP */
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ if (ep_dir(ep) == EP_DIR_IN)
|
|
+ epctrlx |= EPCTRL_TX_ENABLE;
|
|
+ else
|
|
+ epctrlx |= EPCTRL_RX_ENABLE;
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
|
|
+ ep->stopped = stopped;
|
|
+
|
|
+out:
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
|
|
+{
|
|
+ u32 epctrlx;
|
|
+
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
|
|
+
|
|
+ if (stall) {
|
|
+ if (direction == EP_DIR_IN)
|
|
+ epctrlx |= EPCTRL_TX_EP_STALL;
|
|
+ else
|
|
+ epctrlx |= EPCTRL_RX_EP_STALL;
|
|
+ } else {
|
|
+ if (direction == EP_DIR_IN) {
|
|
+ epctrlx &= ~EPCTRL_TX_EP_STALL;
|
|
+ epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
|
|
+ } else {
|
|
+ epctrlx &= ~EPCTRL_RX_EP_STALL;
|
|
+ epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
|
|
+ }
|
|
+ }
|
|
+ writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
|
|
+}
|
|
+
|
|
+static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
|
|
+{
|
|
+ u32 epctrlx;
|
|
+
|
|
+ epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
|
|
+
|
|
+ if (direction == EP_DIR_OUT)
|
|
+ return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
|
|
+ else
|
|
+ return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
|
|
+}
|
|
+
|
|
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
|
|
+{
|
|
+ struct mv_ep *ep;
|
|
+ unsigned long flags = 0;
|
|
+ int status = 0;
|
|
+ struct mv_udc *udc;
|
|
+
|
|
+ ep = container_of(_ep, struct mv_ep, ep);
|
|
+ udc = ep->udc;
|
|
+ if (!_ep || !ep->ep.desc) {
|
|
+ status = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
|
|
+ status = -EOPNOTSUPP;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Attempt to halt IN ep will fail if any transfer requests
|
|
+ * are still queue
|
|
+ */
|
|
+ if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
|
|
+ status = -EAGAIN;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&ep->udc->lock, flags);
|
|
+ ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
|
|
+ if (halt && wedge)
|
|
+ ep->wedge = 1;
|
|
+ else if (!halt)
|
|
+ ep->wedge = 0;
|
|
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
|
|
+
|
|
+ if (ep->ep_num == 0) {
|
|
+ udc->ep0_state = WAIT_FOR_SETUP;
|
|
+ udc->ep0_dir = EP_DIR_OUT;
|
|
+ }
|
|
+out:
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
|
|
+{
|
|
+ return mv_ep_set_halt_wedge(_ep, halt, 0);
|
|
+}
|
|
+
|
|
+static int mv_ep_set_wedge(struct usb_ep *_ep)
|
|
+{
|
|
+ return mv_ep_set_halt_wedge(_ep, 1, 1);
|
|
+}
|
|
+
|
|
+static struct usb_ep_ops mv_ep_ops = {
|
|
+ .enable = mv_ep_enable,
|
|
+ .disable = mv_ep_disable,
|
|
+
|
|
+ .alloc_request = mv_alloc_request,
|
|
+ .free_request = mv_free_request,
|
|
+
|
|
+ .queue = mv_ep_queue,
|
|
+ .dequeue = mv_ep_dequeue,
|
|
+
|
|
+ .set_wedge = mv_ep_set_wedge,
|
|
+ .set_halt = mv_ep_set_halt,
|
|
+ .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
|
|
+};
|
|
+
|
|
+static int udc_clock_enable(struct mv_udc *udc)
|
|
+{
|
|
+ return clk_enable(udc->clk);
|
|
+}
|
|
+
|
|
+static void udc_clock_disable(struct mv_udc *udc)
|
|
+{
|
|
+ clk_disable(udc->clk);
|
|
+}
|
|
+
|
|
+static void udc_stop(struct mv_udc *udc)
|
|
+{
|
|
+ u32 tmp;
|
|
+
|
|
+ pr_info("udc_stop ...\n");
|
|
+ /* Disable interrupts */
|
|
+ tmp = readl(&udc->op_regs->usbintr);
|
|
+ tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
|
|
+ USBINTR_PORT_CHANGE_DETECT_EN |
|
|
+ USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND);
|
|
+ writel(tmp, &udc->op_regs->usbintr);
|
|
+
|
|
+ udc->stopped = 1;
|
|
+
|
|
+ /* Reset the Run the bit in the command register to stop VUSB */
|
|
+ tmp = readl(&udc->op_regs->usbcmd);
|
|
+ tmp &= ~USBCMD_RUN_STOP;
|
|
+ writel(tmp, &udc->op_regs->usbcmd);
|
|
+}
|
|
+
|
|
+static void udc_start(struct mv_udc *udc)
|
|
+{
|
|
+ u32 usbintr;
|
|
+
|
|
+ pr_info("udc_start ...\n");
|
|
+ usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_SYS_ERR
|
|
+ | USBINTR_PORT_CHANGE_DETECT_EN
|
|
+ | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
|
|
+ /* Enable interrupts */
|
|
+ writel(usbintr, &udc->op_regs->usbintr);
|
|
+
|
|
+ writel(0xffffffff, &udc->op_regs->usbsts);
|
|
+ udc->stopped = 0;
|
|
+
|
|
+ /* Set the Run bit in the command register */
|
|
+ writel(USBCMD_RUN_STOP | USBCMD_INT_THREAD_CTRL8, &udc->op_regs->usbcmd);
|
|
+}
|
|
+
|
|
+static int udc_reset(struct mv_udc *udc)
|
|
+{
|
|
+ unsigned int loops;
|
|
+ u32 tmp;
|
|
+
|
|
+ pr_info("udc_reset ...\n");
|
|
+
|
|
+ /* Stop the controller */
|
|
+ tmp = readl(&udc->op_regs->usbcmd);
|
|
+ tmp &= ~USBCMD_RUN_STOP;
|
|
+ writel(tmp, &udc->op_regs->usbcmd);
|
|
+
|
|
+ /* Reset the controller to get default values */
|
|
+ writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
|
|
+
|
|
+ /* wait for reset to complete */
|
|
+ loops = LOOPS(RESET_TIMEOUT);
|
|
+ while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
|
|
+ if (loops == 0) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "Wait for RESET completed TIMEOUT\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+ loops--;
|
|
+ udelay(LOOPS_USEC);
|
|
+ }
|
|
+
|
|
+ /* set controller to device mode, turn setup lockout off */
|
|
+ tmp = readl(&udc->op_regs->usbmode);
|
|
+ tmp |= USBMODE_CTRL_MODE_DEVICE;
|
|
+
|
|
+ tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
|
|
+ writel(tmp, &udc->op_regs->usbmode);
|
|
+
|
|
+ writel(0xffff, &udc->op_regs->epsetupstat);
|
|
+
|
|
+ /* Configure the Endpoint List Address */
|
|
+ writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, &udc->op_regs->eplistaddr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_enable_internal(struct mv_udc *udc)
|
|
+{
|
|
+ int retval;
|
|
+
|
|
+ pr_debug("mv_udc_enable_internal: udc->active= %d \n", udc->active);
|
|
+ if (udc->active)
|
|
+ return 0;
|
|
+
|
|
+ retval = udc_clock_enable(udc);
|
|
+ if (retval)
|
|
+ return retval;
|
|
+
|
|
+ retval = reset_control_deassert(udc->reset);
|
|
+ if (retval) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "deassert reset error %d\n", retval);
|
|
+ return retval;
|
|
+ }
|
|
+
|
|
+ retval = usb_phy_init(udc->phy);
|
|
+ if (retval) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "init phy error %d\n", retval);
|
|
+ udc_clock_disable(udc);
|
|
+ return retval;
|
|
+ }
|
|
+
|
|
+ udc->active = 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_enable(struct mv_udc *udc)
|
|
+{
|
|
+ if (udc->clock_gating)
|
|
+ return mv_udc_enable_internal(udc);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_udc_disable_internal(struct mv_udc *udc)
|
|
+{
|
|
+ pr_debug("mv_udc_disable_internal... \n");
|
|
+ if (udc->active) {
|
|
+ dev_dbg(&udc->dev->dev, "disable udc\n");
|
|
+ usb_phy_shutdown(udc->phy);
|
|
+ reset_control_assert(udc->reset);
|
|
+ udc_clock_disable(udc);
|
|
+ udc->active = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mv_udc_disable(struct mv_udc *udc)
|
|
+{
|
|
+ if (udc->clock_gating)
|
|
+ mv_udc_disable_internal(udc);
|
|
+}
|
|
+
|
|
+static int mv_udc_get_frame(struct usb_gadget *gadget)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ u16 retval;
|
|
+
|
|
+ if (!gadget)
|
|
+ return -ENODEV;
|
|
+
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+
|
|
+ retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+/* Tries to wake up the host connected to this gadget */
|
|
+static int mv_udc_wakeup(struct usb_gadget *gadget)
|
|
+{
|
|
+ struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
|
|
+ u32 portsc;
|
|
+
|
|
+ /* Remote wakeup feature not enabled by host */
|
|
+ if (!udc->remote_wakeup)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ portsc = readl(&udc->op_regs->portsc);
|
|
+ /* not suspended? */
|
|
+ if (!(portsc & PORTSCX_PORT_SUSPEND))
|
|
+ return 0;
|
|
+ /* trigger force resume */
|
|
+ portsc |= PORTSCX_PORT_FORCE_RESUME;
|
|
+ writel(portsc, &udc->op_regs->portsc[0]);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ unsigned long flags;
|
|
+ int retval = 0;
|
|
+
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ udc->vbus_active = (is_active != 0);
|
|
+
|
|
+ dev_info(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
|
|
+ __func__, udc->softconnect, udc->vbus_active);
|
|
+
|
|
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
|
|
+ /* Clock is disabled, need re-init registers */
|
|
+ retval = mv_udc_enable(udc);
|
|
+ if (retval == 0) {
|
|
+ udc_reset(udc);
|
|
+ ep0_reset(udc);
|
|
+ udc_start(udc);
|
|
+ }
|
|
+ } else if (udc->driver && udc->softconnect) {
|
|
+ if (!udc->active)
|
|
+ goto out;
|
|
+
|
|
+ /* stop all the transfer in queue*/
|
|
+ stop_activity(udc, udc->driver);
|
|
+ udc_stop(udc);
|
|
+ mv_udc_disable(udc);
|
|
+ }
|
|
+
|
|
+out:
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+/* constrain controller's VBUS power usage */
|
|
+static int mv_udc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+ udc->power = mA;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ unsigned long flags;
|
|
+ int retval = 0;
|
|
+
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ if (udc->softconnect == is_on)
|
|
+ goto out;
|
|
+
|
|
+ udc->softconnect = (is_on != 0);
|
|
+
|
|
+ dev_info(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
|
|
+ __func__, udc->softconnect, udc->vbus_active);
|
|
+
|
|
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
|
|
+ retval = mv_udc_enable(udc);
|
|
+ if (retval == 0) {
|
|
+ /* Clock is disabled, need re-init registers */
|
|
+ udc_reset(udc);
|
|
+ ep0_reset(udc);
|
|
+ udc_start(udc);
|
|
+ }
|
|
+ } else if (udc->driver && udc->vbus_active) {
|
|
+ /* stop all the transfer in queue*/
|
|
+ udc_stop(udc);
|
|
+ stop_activity(udc, udc->driver);
|
|
+ mv_udc_disable(udc);
|
|
+ }
|
|
+out:
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static int mv_set_selfpowered(struct usb_gadget *gadget, int is_on)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ unsigned long flags;
|
|
+
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+ udc->selfpowered = (is_on != 0);
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
|
|
+static int mv_udc_stop(struct usb_gadget *);
|
|
+/* device controller usb_gadget_ops structure */
|
|
+static const struct usb_gadget_ops mv_ops = {
|
|
+
|
|
+ /* returns the current frame number */
|
|
+ .get_frame = mv_udc_get_frame,
|
|
+
|
|
+ /* tries to wake up the host connected to this gadget */
|
|
+ .wakeup = mv_udc_wakeup,
|
|
+
|
|
+ /* notify controller that VBUS is powered or not */
|
|
+ .vbus_session = mv_udc_vbus_session,
|
|
+
|
|
+ /* constrain controller's VBUS power usage */
|
|
+ .vbus_draw = mv_udc_vbus_draw,
|
|
+
|
|
+ /* D+ pullup, software-controlled connect/disconnect to USB host */
|
|
+ .pullup = mv_udc_pullup,
|
|
+
|
|
+ .set_selfpowered = mv_set_selfpowered,
|
|
+
|
|
+ .udc_start = mv_udc_start,
|
|
+ .udc_stop = mv_udc_stop,
|
|
+};
|
|
+
|
|
+static int eps_init(struct mv_udc *udc)
|
|
+{
|
|
+ struct mv_ep *ep;
|
|
+ char name[12];
|
|
+ int i;
|
|
+
|
|
+ /* initialize ep0 */
|
|
+ ep = &udc->eps[0];
|
|
+ ep->udc = udc;
|
|
+ strncpy(ep->name, "ep0", sizeof(ep->name));
|
|
+ ep->ep.name = ep->name;
|
|
+ ep->ep.ops = &mv_ep_ops;
|
|
+ ep->wedge = 0;
|
|
+ ep->stopped = 0;
|
|
+ usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
|
|
+ ep->ep.caps.type_control = true;
|
|
+ ep->ep.caps.dir_in = true;
|
|
+ ep->ep.caps.dir_out = true;
|
|
+ ep->ep_num = 0;
|
|
+ ep->ep.desc = &mv_ep0_desc;
|
|
+ INIT_LIST_HEAD(&ep->queue);
|
|
+
|
|
+ ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
|
|
+
|
|
+ /* initialize other endpoints */
|
|
+ for (i = 2; i < udc->max_eps * 2; i++) {
|
|
+ ep = &udc->eps[i];
|
|
+ if (i % 2) {
|
|
+ snprintf(name, sizeof(name), "ep%din", i / 2);
|
|
+ ep->direction = EP_DIR_IN;
|
|
+ ep->ep.caps.dir_in = true;
|
|
+ } else {
|
|
+ snprintf(name, sizeof(name), "ep%dout", i / 2);
|
|
+ ep->direction = EP_DIR_OUT;
|
|
+ ep->ep.caps.dir_out = true;
|
|
+ }
|
|
+ ep->udc = udc;
|
|
+ strncpy(ep->name, name, sizeof(ep->name));
|
|
+ ep->ep.name = ep->name;
|
|
+
|
|
+ ep->ep.caps.type_iso = true;
|
|
+ ep->ep.caps.type_bulk = true;
|
|
+ ep->ep.caps.type_int = true;
|
|
+
|
|
+ ep->ep.ops = &mv_ep_ops;
|
|
+ ep->stopped = 0;
|
|
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
|
|
+ ep->ep_num = i / 2;
|
|
+
|
|
+ INIT_LIST_HEAD(&ep->queue);
|
|
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
|
|
+
|
|
+ ep->dqh = &udc->ep_dqh[i];
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* delete all endpoint requests, called with spinlock held */
|
|
+static void nuke(struct mv_ep *ep, int status)
|
|
+{
|
|
+ /* called with spinlock held */
|
|
+ ep->stopped = 1;
|
|
+
|
|
+ /* endpoint fifo flush */
|
|
+ mv_ep_fifo_flush(&ep->ep);
|
|
+
|
|
+ while (!list_empty(&ep->queue)) {
|
|
+ struct mv_req *req = NULL;
|
|
+ req = list_entry(ep->queue.next, struct mv_req, queue);
|
|
+ done(ep, req, status);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* stop all USB activities */
|
|
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
|
|
+{
|
|
+ struct mv_ep *ep;
|
|
+
|
|
+ nuke(&udc->eps[0], -ESHUTDOWN);
|
|
+
|
|
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
|
|
+ nuke(ep, -ESHUTDOWN);
|
|
+ }
|
|
+
|
|
+ /* report disconnect; the driver is already quiesced */
|
|
+ if (driver) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ driver->disconnect(&udc->gadget);
|
|
+ spin_lock(&udc->lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mv_udc_start(struct usb_gadget *gadget,
|
|
+ struct usb_gadget_driver *driver)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ int retval = 0;
|
|
+ unsigned long flags;
|
|
+
|
|
+ pr_info("mv_udc_start ... \n");
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+
|
|
+ if (udc->driver)
|
|
+ return -EBUSY;
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ /* hook up the driver ... */
|
|
+ udc->driver = driver;
|
|
+
|
|
+ udc->usb_state = USB_STATE_ATTACHED;
|
|
+ udc->ep0_state = WAIT_FOR_SETUP;
|
|
+ udc->ep0_dir = EP_DIR_OUT;
|
|
+ udc->selfpowered = 0;
|
|
+
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+
|
|
+ if (udc->transceiver) {
|
|
+ retval = otg_set_peripheral(udc->transceiver->otg,
|
|
+ &udc->gadget);
|
|
+ if (retval) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "unable to register peripheral to otg\n");
|
|
+ udc->driver = NULL;
|
|
+ return retval;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* When boot with cable attached, there will be no vbus irq occurred */
|
|
+ if (udc->qwork)
|
|
+ queue_work(udc->qwork, &udc->vbus_work);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_stop(struct usb_gadget *gadget)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ unsigned long flags;
|
|
+
|
|
+ pr_info("mv_udc_stop ... \n");
|
|
+ udc = container_of(gadget, struct mv_udc, gadget);
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+
|
|
+ mv_udc_enable(udc);
|
|
+ udc_stop(udc);
|
|
+
|
|
+ /* stop all usb activities */
|
|
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
|
|
+ stop_activity(udc, NULL);
|
|
+ mv_udc_disable(udc);
|
|
+
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+
|
|
+ /* unbind gadget driver */
|
|
+ udc->driver = NULL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_set_ptc(struct mv_udc *udc, u32 mode)
|
|
+{
|
|
+ u32 portsc;
|
|
+
|
|
+ portsc = readl(&udc->op_regs->portsc[0]);
|
|
+ portsc |= mode << 16;
|
|
+ writel(portsc, &udc->op_regs->portsc[0]);
|
|
+}
|
|
+
|
|
+static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
|
|
+{
|
|
+ struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
|
|
+ struct mv_req *req = container_of(_req, struct mv_req, req);
|
|
+ struct mv_udc *udc;
|
|
+ unsigned long flags;
|
|
+
|
|
+ udc = mvep->udc;
|
|
+
|
|
+ dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
|
|
+
|
|
+ spin_lock_irqsave(&udc->lock, flags);
|
|
+ if (req->test_mode) {
|
|
+ mv_set_ptc(udc, req->test_mode);
|
|
+ req->test_mode = 0;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&udc->lock, flags);
|
|
+}
|
|
+
|
|
+static int
|
|
+udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
|
|
+{
|
|
+ int retval = 0;
|
|
+ struct mv_req *req;
|
|
+ struct mv_ep *ep;
|
|
+
|
|
+ ep = &udc->eps[0];
|
|
+ udc->ep0_dir = direction;
|
|
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
|
|
+
|
|
+ req = udc->status_req;
|
|
+
|
|
+ /* fill in the reqest structure */
|
|
+ if (empty == false) {
|
|
+ *((u16 *) req->req.buf) = cpu_to_le16(status);
|
|
+ req->req.length = 2;
|
|
+ } else
|
|
+ req->req.length = 0;
|
|
+
|
|
+ req->ep = ep;
|
|
+ req->req.status = -EINPROGRESS;
|
|
+ req->req.actual = 0;
|
|
+ if (udc->test_mode) {
|
|
+ req->req.complete = prime_status_complete;
|
|
+ req->test_mode = udc->test_mode;
|
|
+ udc->test_mode = 0;
|
|
+ } else
|
|
+ req->req.complete = NULL;
|
|
+ req->dtd_count = 0;
|
|
+
|
|
+ if (req->req.dma == DMA_ADDR_INVALID) {
|
|
+ if (req->req.length > 0)
|
|
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
|
|
+ req->req.buf, req->req.length,
|
|
+ ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
+ req->mapped = 1;
|
|
+ }
|
|
+
|
|
+ /* prime the data phase */
|
|
+ if (!req_to_dtd(req)) {
|
|
+ retval = queue_dtd(ep, req);
|
|
+ if (retval) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "Failed to queue dtd when prime status\n");
|
|
+ goto out;
|
|
+ }
|
|
+ } else{ /* no mem */
|
|
+ retval = -ENOMEM;
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "Failed to dma_pool_alloc when prime status\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ list_add_tail(&req->queue, &ep->queue);
|
|
+
|
|
+ return 0;
|
|
+out:
|
|
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
|
|
+ req->req.dma = DMA_ADDR_INVALID;
|
|
+ req->mapped = 0;
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static void mv_udc_testmode(struct mv_udc *udc, u16 index)
|
|
+{
|
|
+ if (index <= USB_TEST_FORCE_ENABLE) {
|
|
+ udc->test_mode = index;
|
|
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
|
|
+ ep0_stall(udc);
|
|
+ } else
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "This test mode(%d) is not supported\n", index);
|
|
+}
|
|
+
|
|
+static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
|
|
+{
|
|
+ udc->dev_addr = (u8)setup->wValue;
|
|
+
|
|
+ /* update usb state */
|
|
+ udc->usb_state = USB_STATE_ADDRESS;
|
|
+
|
|
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
|
|
+ ep0_stall(udc);
|
|
+}
|
|
+
|
|
+static void ch9getstatus(struct mv_udc *udc, u8 __maybe_unused ep_num,
|
|
+ struct usb_ctrlrequest *setup)
|
|
+{
|
|
+ u16 status = 0;
|
|
+ int retval;
|
|
+
|
|
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
|
|
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
|
|
+ return;
|
|
+
|
|
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
|
|
+ status = udc->selfpowered << USB_DEVICE_SELF_POWERED;
|
|
+ status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
|
|
+ } else if ((setup->bRequestType & USB_RECIP_MASK)
|
|
+ == USB_RECIP_INTERFACE) {
|
|
+ /* get interface status */
|
|
+ status = 0;
|
|
+ } else if ((setup->bRequestType & USB_RECIP_MASK)
|
|
+ == USB_RECIP_ENDPOINT) {
|
|
+ u8 ep_index, direction;
|
|
+
|
|
+ ep_index = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
|
|
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
|
|
+ ? EP_DIR_IN : EP_DIR_OUT;
|
|
+ status = ep_is_stall(udc, ep_index, direction)
|
|
+ << USB_ENDPOINT_HALT;
|
|
+ }
|
|
+
|
|
+ retval = udc_prime_status(udc, EP_DIR_IN, status, false);
|
|
+ if (retval)
|
|
+ ep0_stall(udc);
|
|
+ else
|
|
+ udc->ep0_state = DATA_STATE_XMIT;
|
|
+}
|
|
+
|
|
+static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
|
|
+{
|
|
+ u8 ep_num;
|
|
+ u8 direction;
|
|
+ struct mv_ep *ep;
|
|
+
|
|
+ if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
|
|
+ == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
|
|
+ switch (setup->wValue) {
|
|
+ case USB_DEVICE_REMOTE_WAKEUP:
|
|
+ udc->remote_wakeup = 0;
|
|
+ break;
|
|
+ default:
|
|
+ goto out;
|
|
+ }
|
|
+ } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
|
|
+ == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
|
|
+ switch (setup->wValue) {
|
|
+ case USB_ENDPOINT_HALT:
|
|
+ ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
|
|
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
|
|
+ ? EP_DIR_IN : EP_DIR_OUT;
|
|
+ if (setup->wValue != 0 || setup->wLength != 0
|
|
+ || ep_num > udc->max_eps)
|
|
+ goto out;
|
|
+ ep = &udc->eps[ep_num * 2 + direction];
|
|
+ if (ep->wedge == 1)
|
|
+ break;
|
|
+ spin_unlock(&udc->lock);
|
|
+ ep_set_stall(udc, ep_num, direction, 0);
|
|
+ spin_lock(&udc->lock);
|
|
+ break;
|
|
+ default:
|
|
+ goto out;
|
|
+ }
|
|
+ } else
|
|
+ goto out;
|
|
+
|
|
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
|
|
+ ep0_stall(udc);
|
|
+out:
|
|
+ return;
|
|
+}
|
|
+
|
|
+static const char *reqname(unsigned bRequest)
|
|
+{
|
|
+ switch (bRequest) {
|
|
+ case USB_REQ_GET_STATUS: return "GET_STATUS";
|
|
+ case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
|
|
+ case USB_REQ_SET_FEATURE: return "SET_FEATURE";
|
|
+ case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
|
|
+ case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
|
|
+ case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
|
|
+ case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
|
|
+ case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
|
|
+ case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
|
|
+ case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
|
|
+ default: return "*UNKNOWN*";
|
|
+ }
|
|
+}
|
|
+
|
|
+static const char *desc_type(unsigned type)
|
|
+{
|
|
+ switch (type) {
|
|
+ case USB_DT_DEVICE: return "USB_DT_DEVICE";
|
|
+ case USB_DT_CONFIG: return "USB_DT_CONFIG";
|
|
+ case USB_DT_STRING: return "USB_DT_STRING";
|
|
+ case USB_DT_INTERFACE: return "USB_DT_INTERFACE";
|
|
+ case USB_DT_ENDPOINT: return "USB_DT_ENDPOINT";
|
|
+ case USB_DT_DEVICE_QUALIFIER: return "USB_DT_DEVICE_QUALIFIER";
|
|
+ case USB_DT_OTHER_SPEED_CONFIG: return "USB_DT_OTHER_SPEED_CONFIG";
|
|
+ case USB_DT_INTERFACE_POWER: return "USB_DT_INTERFACE_POWER";
|
|
+ default: return "*UNKNOWN*";
|
|
+ }
|
|
+}
|
|
+
|
|
+static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
|
|
+{
|
|
+ u8 ep_num;
|
|
+ u8 direction;
|
|
+
|
|
+ if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
|
|
+ == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
|
|
+ switch (setup->wValue) {
|
|
+ case USB_DEVICE_REMOTE_WAKEUP:
|
|
+ udc->remote_wakeup = 1;
|
|
+ break;
|
|
+ case USB_DEVICE_TEST_MODE:
|
|
+ if (setup->wIndex & 0xFF
|
|
+ || udc->gadget.speed != USB_SPEED_HIGH)
|
|
+ ep0_stall(udc);
|
|
+
|
|
+ if (udc->usb_state != USB_STATE_CONFIGURED
|
|
+ && udc->usb_state != USB_STATE_ADDRESS
|
|
+ && udc->usb_state != USB_STATE_DEFAULT)
|
|
+ ep0_stall(udc);
|
|
+
|
|
+ mv_udc_testmode(udc, (setup->wIndex >> 8));
|
|
+ goto out;
|
|
+ default:
|
|
+ goto out;
|
|
+ }
|
|
+ } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
|
|
+ == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
|
|
+ switch (setup->wValue) {
|
|
+ case USB_ENDPOINT_HALT:
|
|
+ ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
|
|
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
|
|
+ ? EP_DIR_IN : EP_DIR_OUT;
|
|
+ if (setup->wValue != 0 || setup->wLength != 0
|
|
+ || ep_num > udc->max_eps)
|
|
+ goto out;
|
|
+ spin_unlock(&udc->lock);
|
|
+ ep_set_stall(udc, ep_num, direction, 1);
|
|
+ spin_lock(&udc->lock);
|
|
+ break;
|
|
+ default:
|
|
+ goto out;
|
|
+ }
|
|
+ } else
|
|
+ goto out;
|
|
+
|
|
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
|
|
+ ep0_stall(udc);
|
|
+out:
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
|
|
+ struct usb_ctrlrequest *setup)
|
|
+ __releases(&ep->udc->lock)
|
|
+ __acquires(&ep->udc->lock)
|
|
+{
|
|
+ bool delegate = false;
|
|
+
|
|
+ nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
|
|
+
|
|
+ dev_dbg(&udc->dev->dev, "%s, \t%s, \t0x%x\n", reqname(setup->bRequest),
|
|
+ (setup->bRequest == USB_REQ_GET_DESCRIPTOR)
|
|
+ ? desc_type(setup->wValue >> 8) : NULL,
|
|
+ setup->wIndex);
|
|
+
|
|
+ /* We process some stardard setup requests here */
|
|
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
|
|
+ switch (setup->bRequest) {
|
|
+ case USB_REQ_GET_STATUS:
|
|
+ ch9getstatus(udc, ep_num, setup);
|
|
+ break;
|
|
+
|
|
+ case USB_REQ_SET_ADDRESS:
|
|
+ ch9setaddress(udc, setup);
|
|
+ break;
|
|
+
|
|
+ case USB_REQ_CLEAR_FEATURE:
|
|
+ ch9clearfeature(udc, setup);
|
|
+ break;
|
|
+
|
|
+ case USB_REQ_SET_FEATURE:
|
|
+ ch9setfeature(udc, setup);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ delegate = true;
|
|
+ }
|
|
+ } else
|
|
+ delegate = true;
|
|
+
|
|
+ /* delegate USB standard requests to the gadget driver */
|
|
+ if (delegate == true) {
|
|
+ /* USB requests handled by gadget */
|
|
+ if (setup->wLength) {
|
|
+ /* DATA phase from gadget, STATUS phase from udc */
|
|
+ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
|
|
+ ? EP_DIR_IN : EP_DIR_OUT;
|
|
+ spin_unlock(&udc->lock);
|
|
+ if (udc->driver->setup(&udc->gadget,
|
|
+ &udc->local_setup_buff) < 0)
|
|
+ ep0_stall(udc);
|
|
+
|
|
+ spin_lock(&udc->lock);
|
|
+ udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
|
|
+ ? DATA_STATE_XMIT : DATA_STATE_RECV;
|
|
+ } else {
|
|
+ /* no DATA phase, IN STATUS phase from gadget */
|
|
+ udc->ep0_dir = EP_DIR_IN;
|
|
+ spin_unlock(&udc->lock);
|
|
+ if (udc->driver->setup(&udc->gadget,
|
|
+ &udc->local_setup_buff) < 0)
|
|
+ ep0_stall(udc);
|
|
+
|
|
+ spin_lock(&udc->lock);
|
|
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
|
|
+static int ep0_req_complete(struct mv_udc *udc,
|
|
+ struct mv_ep *ep0, struct mv_req *req)
|
|
+{
|
|
+ u32 new_addr;
|
|
+ int ret;
|
|
+
|
|
+ if (udc->usb_state == USB_STATE_ADDRESS) {
|
|
+ /* set the new address */
|
|
+ new_addr = (u32)udc->dev_addr;
|
|
+ writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
|
|
+ &udc->op_regs->deviceaddr);
|
|
+ }
|
|
+
|
|
+ ret = done(ep0, req, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ switch (udc->ep0_state) {
|
|
+ case DATA_STATE_XMIT:
|
|
+ /* receive status phase */
|
|
+ if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
|
|
+ ep0_stall(udc);
|
|
+ break;
|
|
+ case DATA_STATE_RECV:
|
|
+ /* send status phase */
|
|
+ if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
|
|
+ ep0_stall(udc);
|
|
+ break;
|
|
+ case WAIT_FOR_OUT_STATUS:
|
|
+ udc->ep0_state = WAIT_FOR_SETUP;
|
|
+ break;
|
|
+ case WAIT_FOR_SETUP:
|
|
+ dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
|
|
+ break;
|
|
+ default:
|
|
+ ep0_stall(udc);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
|
|
+{
|
|
+ struct mv_dqh *dqh;
|
|
+
|
|
+ dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
|
|
+
|
|
+ /* Clear bit in ENDPTSETUPSTAT */
|
|
+ writel((1 << ep_num), &udc->op_regs->epsetupstat);
|
|
+
|
|
+ memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
|
|
+}
|
|
+
|
|
+static void irq_process_tr_complete(struct mv_udc *udc)
|
|
+{
|
|
+ u32 tmp, bit_pos;
|
|
+ int i, ep_num = 0, direction = 0;
|
|
+ struct mv_ep *curr_ep;
|
|
+ struct mv_req *curr_req, *temp_req;
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
|
|
+ * because the setup packets are to be read ASAP
|
|
+ */
|
|
+
|
|
+ /* Process all Setup packet received interrupts */
|
|
+ tmp = readl(&udc->op_regs->epsetupstat);
|
|
+
|
|
+ if (tmp) {
|
|
+ for (i = 0; i < udc->max_eps; i++) {
|
|
+ if (tmp & (1 << i)) {
|
|
+ get_setup_data(udc, i,
|
|
+ (u8 *)(&udc->local_setup_buff));
|
|
+ handle_setup_packet(udc, i,
|
|
+ &udc->local_setup_buff);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!udc->active)
|
|
+ return;
|
|
+
|
|
+ /* Don't clear the endpoint setup status register here.
|
|
+ * It is cleared as a setup packet is read out of the buffer
|
|
+ */
|
|
+
|
|
+ /* Process non-setup transaction complete interrupts */
|
|
+ tmp = readl(&udc->op_regs->epcomplete);
|
|
+
|
|
+ if (!tmp)
|
|
+ return;
|
|
+
|
|
+ writel(tmp, &udc->op_regs->epcomplete);
|
|
+
|
|
+ for (i = 0; i < udc->max_eps * 2; i++) {
|
|
+ ep_num = i >> 1;
|
|
+ direction = i % 2;
|
|
+
|
|
+ bit_pos = 1 << (ep_num + 16 * direction);
|
|
+
|
|
+ if (!(bit_pos & tmp))
|
|
+ continue;
|
|
+
|
|
+ if (i == 1)
|
|
+ curr_ep = &udc->eps[0];
|
|
+ else
|
|
+ curr_ep = &udc->eps[i];
|
|
+ /* process the req queue until an uncomplete request */
|
|
+ list_for_each_entry_safe(curr_req, temp_req,
|
|
+ &curr_ep->queue, queue) {
|
|
+ status = process_ep_req(udc, i, curr_req);
|
|
+ if (status)
|
|
+ break;
|
|
+
|
|
+ /* write back status to req */
|
|
+ curr_req->req.status = status;
|
|
+
|
|
+ /* ep0 request completion */
|
|
+ if (ep_num == 0) {
|
|
+ ep0_req_complete(udc, curr_ep, curr_req);
|
|
+ break;
|
|
+ } else {
|
|
+ done(curr_ep, curr_req, status);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void irq_process_reset(struct mv_udc *udc)
|
|
+{
|
|
+ u32 tmp;
|
|
+ unsigned int loops;
|
|
+
|
|
+ udc->ep0_dir = EP_DIR_OUT;
|
|
+ udc->ep0_state = WAIT_FOR_SETUP;
|
|
+ udc->remote_wakeup = 0; /* default to 0 on reset */
|
|
+
|
|
+ /* The address bits are past bit 25-31. Set the address */
|
|
+ tmp = readl(&udc->op_regs->deviceaddr);
|
|
+ tmp &= ~(USB_DEVICE_ADDRESS_MASK);
|
|
+ writel(tmp, &udc->op_regs->deviceaddr);
|
|
+
|
|
+ /* Clear all the setup token semaphores */
|
|
+ tmp = readl(&udc->op_regs->epsetupstat);
|
|
+ writel(tmp, &udc->op_regs->epsetupstat);
|
|
+
|
|
+ /* Clear all the endpoint complete status bits */
|
|
+ tmp = readl(&udc->op_regs->epcomplete);
|
|
+ writel(tmp, &udc->op_regs->epcomplete);
|
|
+
|
|
+ /* wait until all endptprime bits cleared */
|
|
+ loops = LOOPS(PRIME_TIMEOUT);
|
|
+ while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
|
|
+ if (loops == 0) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "Timeout for ENDPTPRIME = 0x%x\n",
|
|
+ readl(&udc->op_regs->epprime));
|
|
+ break;
|
|
+ }
|
|
+ loops--;
|
|
+ udelay(LOOPS_USEC);
|
|
+ }
|
|
+
|
|
+ /* Write 1s to the Flush register */
|
|
+ writel((u32)~0, &udc->op_regs->epflush);
|
|
+
|
|
+ if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
|
|
+ dev_info(&udc->dev->dev, "usb bus reset\n");
|
|
+ udc->usb_state = USB_STATE_DEFAULT;
|
|
+ /* reset all the queues, stop all USB activities */
|
|
+ stop_activity(udc, udc->driver);
|
|
+ } else {
|
|
+ dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
|
|
+ readl(&udc->op_regs->portsc));
|
|
+
|
|
+ /*
|
|
+ * re-initialize
|
|
+ * controller reset
|
|
+ */
|
|
+// udc_reset(udc);
|
|
+
|
|
+ /* reset all the queues, stop all USB activities */
|
|
+ stop_activity(udc, udc->driver);
|
|
+
|
|
+ /* reset ep0 dQH and endptctrl */
|
|
+ ep0_reset(udc);
|
|
+
|
|
+ /* enable interrupt and set controller to run state */
|
|
+// udc_start(udc);
|
|
+
|
|
+ udc->usb_state = USB_STATE_ATTACHED;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void handle_bus_resume(struct mv_udc *udc)
|
|
+{
|
|
+ udc->usb_state = udc->resume_state;
|
|
+ udc->resume_state = 0;
|
|
+
|
|
+ /* report resume to the driver */
|
|
+ if (udc->driver) {
|
|
+ if (udc->driver->resume) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ udc->driver->resume(&udc->gadget);
|
|
+ spin_lock(&udc->lock);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void irq_process_suspend(struct mv_udc *udc)
|
|
+{
|
|
+ udc->resume_state = udc->usb_state;
|
|
+ udc->usb_state = USB_STATE_SUSPENDED;
|
|
+
|
|
+ if (udc->driver->suspend) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ udc->driver->suspend(&udc->gadget);
|
|
+ spin_lock(&udc->lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void irq_process_port_change(struct mv_udc *udc)
|
|
+{
|
|
+ u32 portsc;
|
|
+
|
|
+ portsc = readl(&udc->op_regs->portsc[0]);
|
|
+ if (!(portsc & PORTSCX_PORT_RESET)) {
|
|
+ /* Get the speed */
|
|
+ u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
|
|
+ switch (speed) {
|
|
+ case PORTSCX_PORT_SPEED_HIGH:
|
|
+ udc->gadget.speed = USB_SPEED_HIGH;
|
|
+ break;
|
|
+ case PORTSCX_PORT_SPEED_FULL:
|
|
+ udc->gadget.speed = USB_SPEED_FULL;
|
|
+ break;
|
|
+ case PORTSCX_PORT_SPEED_LOW:
|
|
+ udc->gadget.speed = USB_SPEED_LOW;
|
|
+ break;
|
|
+ default:
|
|
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (portsc & PORTSCX_PORT_SUSPEND) {
|
|
+ udc->resume_state = udc->usb_state;
|
|
+ udc->usb_state = USB_STATE_SUSPENDED;
|
|
+ if (udc->driver->suspend) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ udc->driver->suspend(&udc->gadget);
|
|
+ spin_lock(&udc->lock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!(portsc & PORTSCX_PORT_SUSPEND)
|
|
+ && udc->usb_state == USB_STATE_SUSPENDED) {
|
|
+ handle_bus_resume(udc);
|
|
+ }
|
|
+
|
|
+ if (!udc->resume_state)
|
|
+ udc->usb_state = USB_STATE_DEFAULT;
|
|
+}
|
|
+
|
|
+static void irq_process_error(struct mv_udc *udc)
|
|
+{
|
|
+ /* Increment the error count */
|
|
+ udc->errors++;
|
|
+}
|
|
+
|
|
+static irqreturn_t mv_udc_irq(int irq, void *dev)
|
|
+{
|
|
+ struct mv_udc *udc = (struct mv_udc *)dev;
|
|
+ u32 status, intr;
|
|
+
|
|
+ spin_lock(&udc->lock);
|
|
+
|
|
+ /* Disable ISR when stopped bit is set */
|
|
+ if (udc->stopped) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ return IRQ_NONE;
|
|
+ }
|
|
+
|
|
+ status = readl(&udc->op_regs->usbsts);
|
|
+ intr = readl(&udc->op_regs->usbintr);
|
|
+ status &= intr;
|
|
+
|
|
+ if (status == 0) {
|
|
+ spin_unlock(&udc->lock);
|
|
+ return IRQ_NONE;
|
|
+ }
|
|
+
|
|
+ /* Clear all the interrupts occurred */
|
|
+ writel(status, &udc->op_regs->usbsts);
|
|
+
|
|
+ if (status & USBSTS_INT) {
|
|
+ irq_process_tr_complete(udc);
|
|
+ }
|
|
+
|
|
+ if (status & USBSTS_ERR) {
|
|
+ pr_info("usb ctrl error ... \n");
|
|
+ irq_process_error(udc);
|
|
+ }
|
|
+
|
|
+ if (status & USBSTS_RESET) {
|
|
+ pr_info("usb reset ... \n");
|
|
+ irq_process_reset(udc);
|
|
+ }
|
|
+
|
|
+ if (status & USBSTS_PORT_CHANGE) {
|
|
+ pr_info("usb port change ... \n");
|
|
+ irq_process_port_change(udc);
|
|
+ }
|
|
+
|
|
+ if (status & USBSTS_SUSPEND) {
|
|
+ pr_info("usb suspend ... \n");
|
|
+ irq_process_suspend(udc);
|
|
+ }
|
|
+
|
|
+ if (status & USBSTS_SYS_ERR)
|
|
+ pr_info("system error ... \n");
|
|
+
|
|
+ spin_unlock(&udc->lock);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static BLOCKING_NOTIFIER_HEAD(mv_udc_notifier_list);
|
|
+
|
|
+/* For any user that care about USB udc events, for example the charger*/
|
|
+int mv_udc_register_client(struct notifier_block *nb)
|
|
+{
|
|
+ struct mv_udc *udc = the_controller;
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = blocking_notifier_chain_register(&mv_udc_notifier_list, nb);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (!udc)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(mv_udc_register_client);
|
|
+
|
|
+int mv_udc_unregister_client(struct notifier_block *nb)
|
|
+{
|
|
+ return blocking_notifier_chain_unregister(&mv_udc_notifier_list, nb);
|
|
+}
|
|
+EXPORT_SYMBOL(mv_udc_unregister_client);
|
|
+
|
|
+static int mv_udc_vbus_notifier_call(struct notifier_block *nb,
|
|
+ unsigned long val, void *v)
|
|
+{
|
|
+ struct mv_udc *udc = container_of(nb, struct mv_udc, notifier);
|
|
+
|
|
+ pr_info("mv_udc_vbus_notifier_call : udc->vbus_work\n");
|
|
+ /* polling VBUS and init phy may cause too much time*/
|
|
+ if (udc->qwork)
|
|
+ queue_work(udc->qwork, &udc->vbus_work);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+static void mv_udc_vbus_work(struct work_struct *work)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ unsigned int vbus = 0;
|
|
+
|
|
+ udc = container_of(work, struct mv_udc, vbus_work);
|
|
+
|
|
+ vbus = extcon_get_state(udc->extcon, EXTCON_USB);
|
|
+ dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
|
|
+
|
|
+ mv_udc_vbus_session(&udc->gadget, vbus);
|
|
+}
|
|
+
|
|
+/* release device structure */
|
|
+static void gadget_release(struct device *_dev)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+
|
|
+ udc = dev_get_drvdata(_dev);
|
|
+
|
|
+ complete(udc->done);
|
|
+}
|
|
+
|
|
+static int mv_udc_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+
|
|
+ device_init_wakeup(&pdev->dev, 0);
|
|
+ udc = platform_get_drvdata(pdev);
|
|
+
|
|
+ usb_del_gadget_udc(&udc->gadget);
|
|
+
|
|
+ if (udc->qwork) {
|
|
+ flush_workqueue(udc->qwork);
|
|
+ destroy_workqueue(udc->qwork);
|
|
+ }
|
|
+
|
|
+ /* free memory allocated in probe */
|
|
+ if (udc->dtd_pool)
|
|
+ dma_pool_destroy(udc->dtd_pool);
|
|
+
|
|
+ if (udc->ep_dqh)
|
|
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
|
|
+ udc->ep_dqh, udc->ep_dqh_dma);
|
|
+
|
|
+ mv_udc_disable(udc);
|
|
+ reset_control_assert(udc->reset);
|
|
+ clk_unprepare(udc->clk);
|
|
+
|
|
+ /* free dev, wait for the release() finished */
|
|
+ wait_for_completion(udc->done);
|
|
+
|
|
+ the_controller = NULL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_dt_parse(struct platform_device *pdev,
|
|
+ struct mv_usb_platform_data *pdata)
|
|
+{
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+
|
|
+ if (of_property_read_string(np, "spacemit,udc-name",
|
|
+ &((pdev->dev).init_name)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,udc-mode", &(pdata->mode)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,dev-id", &(pdata->id)))
|
|
+ pdata->id = PXA_USB_DEV_OTG;
|
|
+
|
|
+ of_property_read_u32(np, "spacemit,extern-attr", &(pdata->extern_attr));
|
|
+ pdata->otg_force_a_bus_req = of_property_read_bool(np,
|
|
+ "spacemit,otg-force-a-bus-req");
|
|
+ pdata->disable_otg_clock_gating = of_property_read_bool(np,
|
|
+ "spacemit,disable-otg-clock-gating");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+ struct mv_udc *udc;
|
|
+ int retval = 0;
|
|
+ struct resource *r;
|
|
+ size_t size;
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+// const __be32 *prop;
|
|
+// unsigned int proplen;
|
|
+
|
|
+ pr_info("K1X_UDC: mv_udc_probe enter ...\n");
|
|
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
+ if (pdata == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to allocate memory for platform_data\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ mv_udc_dt_parse(pdev, pdata);
|
|
+ udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
|
|
+ if (udc == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to allocate memory for udc\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ the_controller = udc;
|
|
+
|
|
+ udc->done = &release_done;
|
|
+ udc->pdata = pdata;
|
|
+ spin_lock_init(&udc->lock);
|
|
+
|
|
+ udc->dev = pdev;
|
|
+
|
|
+ if (pdata->mode == MV_USB_MODE_OTG) {
|
|
+ udc->transceiver = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-otg", 0);
|
|
+ /* try again */
|
|
+ if (IS_ERR_OR_NULL(udc->transceiver)) {
|
|
+ dev_err(&pdev->dev, "failed to get usb-otg transceiver\n");
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* udc only have one sysclk. */
|
|
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
|
|
+ if (IS_ERR(udc->clk))
|
|
+ return PTR_ERR(udc->clk);
|
|
+ clk_prepare(udc->clk);
|
|
+
|
|
+ udc->reset = devm_reset_control_array_get_optional_shared(&pdev->dev);
|
|
+ if (IS_ERR(udc->reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get reset control\n");
|
|
+ retval = PTR_ERR(udc->reset);
|
|
+ goto err_disable_internal;
|
|
+ }
|
|
+
|
|
+ r = platform_get_resource(udc->dev, IORESOURCE_MEM, 0);
|
|
+ if (r == NULL) {
|
|
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ udc->cap_regs = (struct mv_cap_regs __iomem *)
|
|
+ devm_ioremap(&pdev->dev, r->start, resource_size(r));
|
|
+ if (udc->cap_regs == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ udc->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
|
|
+ if (IS_ERR_OR_NULL(udc->phy)) {
|
|
+ pr_info("K1X_UDC: mv_udc_probe exit: no usb-phy found ...\n");
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+
|
|
+ /* we will acces controller register, so enable the clk */
|
|
+ retval = mv_udc_enable_internal(udc);
|
|
+ if (retval) {
|
|
+ return retval;
|
|
+ }
|
|
+
|
|
+ udc->op_regs =
|
|
+ (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
|
|
+ + (readl(&udc->cap_regs->caplength_hciversion)
|
|
+ & CAPLENGTH_MASK));
|
|
+ udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
|
|
+
|
|
+ /*
|
|
+ * some platform will use usb to download image, it may not disconnect
|
|
+ * usb gadget before loading kernel. So first stop udc here.
|
|
+ */
|
|
+ udc_stop(udc);
|
|
+ writel(0xFFFFFFFF, &udc->op_regs->usbsts);
|
|
+
|
|
+ dev_info(&pdev->dev, " use 32bit DMA mask\n");
|
|
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
+
|
|
+ size = udc->max_eps * sizeof(struct mv_dqh) *2;
|
|
+ size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
|
|
+ udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
|
|
+ &udc->ep_dqh_dma, GFP_KERNEL);
|
|
+ if (udc->ep_dqh == NULL) {
|
|
+ dev_err(&pdev->dev, "allocate dQH memory failed\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_disable_internal;
|
|
+ }
|
|
+ udc->ep_dqh_size = size;
|
|
+ pr_err("mv_udc: dqh size = 0x%zx udc->ep_dqh_dma = 0x%llx\n", size, udc->ep_dqh_dma);
|
|
+
|
|
+ /* create dTD dma_pool resource */
|
|
+ udc->dtd_pool = dma_pool_create("mv_dtd",
|
|
+ &pdev->dev,
|
|
+ sizeof(struct mv_dtd),
|
|
+ DTD_ALIGNMENT,
|
|
+ DMA_BOUNDARY);
|
|
+
|
|
+ if (!udc->dtd_pool) {
|
|
+ retval = -ENOMEM;
|
|
+ goto err_free_dma;
|
|
+ }
|
|
+
|
|
+ size = udc->max_eps * sizeof(struct mv_ep) *2;
|
|
+ udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
|
|
+ if (udc->eps == NULL) {
|
|
+ dev_err(&pdev->dev, "allocate ep memory failed\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_destroy_dma;
|
|
+ }
|
|
+
|
|
+ /* initialize ep0 status request structure */
|
|
+ udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
|
|
+ GFP_KERNEL);
|
|
+ if (!udc->status_req) {
|
|
+ dev_err(&pdev->dev, "allocate status_req memory failed\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_destroy_dma;
|
|
+ }
|
|
+ INIT_LIST_HEAD(&udc->status_req->queue);
|
|
+
|
|
+ /* allocate a small amount of memory to get valid address */
|
|
+ udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
|
|
+ udc->status_req->req.dma = DMA_ADDR_INVALID;
|
|
+
|
|
+ udc->resume_state = USB_STATE_NOTATTACHED;
|
|
+ udc->usb_state = USB_STATE_POWERED;
|
|
+ udc->ep0_dir = EP_DIR_OUT;
|
|
+ udc->remote_wakeup = 0;
|
|
+
|
|
+ udc->irq = platform_get_irq(pdev, 0);
|
|
+ if (udc->irq < 0) {
|
|
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
|
|
+ retval = -ENODEV;
|
|
+ goto err_destroy_dma;
|
|
+ }
|
|
+
|
|
+ if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
|
|
+ IRQF_SHARED, driver_name, udc)) {
|
|
+ dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
|
|
+ udc->irq);
|
|
+ retval = -ENODEV;
|
|
+ goto err_destroy_dma;
|
|
+ }
|
|
+
|
|
+ /* initialize gadget structure */
|
|
+ udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
|
|
+ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
|
|
+ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
|
|
+ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
|
|
+ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
|
|
+
|
|
+ /* the "gadget" abstracts/virtualizes the controller */
|
|
+ udc->gadget.name = driver_name; /* gadget name */
|
|
+
|
|
+ eps_init(udc);
|
|
+
|
|
+ /*--------------------handle vbus-----------------------------*/
|
|
+ /* TODO: use device tree to parse extcon device name */
|
|
+ if (pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION) {
|
|
+ if (of_property_read_bool(np, "extcon")) {
|
|
+ udc->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
|
|
+ if (IS_ERR(udc->extcon)) {
|
|
+ dev_err(&pdev->dev, "couldn't get extcon device\n");
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+ dev_dbg(&pdev->dev,"extcon_dev name: %s \n", extcon_get_edev_name(udc->extcon));
|
|
+ } else {
|
|
+ dev_err(&pdev->dev, "usb extcon cable is not exist\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION)
|
|
+ || udc->transceiver)
|
|
+ udc->clock_gating = 1;
|
|
+
|
|
+ if ((pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION)
|
|
+ && udc->transceiver == NULL) {
|
|
+
|
|
+ //pr_info("udc: MV_USB_HAS_VBUS_DETECTION \n");
|
|
+ udc->notifier.notifier_call = mv_udc_vbus_notifier_call;
|
|
+ retval = devm_extcon_register_notifier(&pdev->dev, udc->extcon, EXTCON_USB, &udc->notifier);
|
|
+ if (retval)
|
|
+ return retval;
|
|
+
|
|
+ udc->vbus_active = extcon_get_state(udc->extcon, EXTCON_USB);
|
|
+ udc->qwork = create_singlethread_workqueue("mv_udc_queue");
|
|
+ if (!udc->qwork) {
|
|
+ dev_err(&pdev->dev, "cannot create workqueue\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_create_workqueue;
|
|
+ }
|
|
+
|
|
+ INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * When clock gating is supported, we can disable clk and phy.
|
|
+ * If not, it means that VBUS detection is not supported, we
|
|
+ * have to enable vbus active all the time to let controller work.
|
|
+ */
|
|
+ if (udc->clock_gating)
|
|
+ mv_udc_disable_internal(udc);
|
|
+ else
|
|
+ udc->vbus_active = 1;
|
|
+
|
|
+ retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
|
|
+ gadget_release);
|
|
+ if (retval)
|
|
+ goto err_create_workqueue;
|
|
+
|
|
+ platform_set_drvdata(pdev, udc);
|
|
+ device_init_wakeup(&pdev->dev, 1);
|
|
+
|
|
+ if (udc->transceiver) {
|
|
+ retval = otg_set_peripheral(udc->transceiver->otg,
|
|
+ &udc->gadget);
|
|
+ if (retval) {
|
|
+ dev_err(&udc->dev->dev,
|
|
+ "unable to register peripheral to otg\n");
|
|
+ return retval;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
|
|
+ udc->clock_gating ? "with" : "without");
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_create_workqueue:
|
|
+ if (udc->qwork) {
|
|
+ flush_workqueue(udc->qwork);
|
|
+ destroy_workqueue(udc->qwork);
|
|
+ }
|
|
+err_destroy_dma:
|
|
+ dma_pool_destroy(udc->dtd_pool);
|
|
+err_free_dma:
|
|
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
|
|
+ udc->ep_dqh, udc->ep_dqh_dma);
|
|
+err_disable_internal:
|
|
+ mv_udc_disable_internal(udc);
|
|
+ the_controller = NULL;
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int mv_udc_suspend(struct device *dev)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+
|
|
+ udc = dev_get_drvdata(dev);
|
|
+
|
|
+ /* if OTG is enabled, the following will be done in OTG driver*/
|
|
+ if (udc->transceiver)
|
|
+ return 0;
|
|
+
|
|
+ if (!udc->clock_gating) {
|
|
+ spin_lock_irq(&udc->lock);
|
|
+ /* stop all usb activities */
|
|
+ udc_stop(udc);
|
|
+ stop_activity(udc, udc->driver);
|
|
+ spin_unlock_irq(&udc->lock);
|
|
+
|
|
+ mv_udc_disable_internal(udc);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_udc_resume(struct device *dev)
|
|
+{
|
|
+ struct mv_udc *udc;
|
|
+ int retval;
|
|
+
|
|
+ udc = dev_get_drvdata(dev);
|
|
+
|
|
+ /* if OTG is enabled, the following will be done in OTG driver*/
|
|
+ if (udc->transceiver)
|
|
+ return 0;
|
|
+
|
|
+ if (!udc->clock_gating) {
|
|
+ retval = mv_udc_enable_internal(udc);
|
|
+ if (retval)
|
|
+ return retval;
|
|
+
|
|
+ if (udc->driver && udc->softconnect) {
|
|
+ udc_reset(udc);
|
|
+ ep0_reset(udc);
|
|
+ udc_start(udc);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops mv_udc_pm_ops = {
|
|
+ .suspend = mv_udc_suspend,
|
|
+ .resume = mv_udc_resume,
|
|
+};
|
|
+#endif
|
|
+
|
|
+static void mv_udc_shutdown(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_udc *udc = the_controller;
|
|
+
|
|
+ if (!udc)
|
|
+ return;
|
|
+ mv_udc_pullup(&udc->gadget, 0);
|
|
+}
|
|
+
|
|
+static const struct of_device_id mv_udc_dt_match[] = {
|
|
+ { .compatible = "spacemit,mv-udc" },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, mv_udc_dt_match);
|
|
+
|
|
+static struct platform_driver udc_driver = {
|
|
+ .probe = mv_udc_probe,
|
|
+ .remove = mv_udc_remove,
|
|
+ .shutdown = mv_udc_shutdown,
|
|
+ .driver = {
|
|
+ .of_match_table = of_match_ptr(mv_udc_dt_match),
|
|
+ .name = "mv-udc",
|
|
+#ifdef CONFIG_PM
|
|
+ .pm = &mv_udc_pm_ops,
|
|
+#endif
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(udc_driver);
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_DESCRIPTION("SPACEMIT K1x UDC Controller");
|
|
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/host/Kconfig
|
|
+++ b/drivers/usb/host/Kconfig
|
|
@@ -306,6 +306,12 @@ config USB_EHCI_MV
|
|
Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
|
|
on-chip EHCI USB controller" for those.
|
|
|
|
+config USB_EHCI_K1X
|
|
+ bool "EHCI support for Spacemit k1x USB controller"
|
|
+ select USB_EHCI_ROOT_HUB_TT
|
|
+ help
|
|
+ Enable support for Spacemit k1x SoC's on-chip EHCI controller.
|
|
+
|
|
config USB_OCTEON_HCD
|
|
tristate "Cavium Networks Octeon USB support"
|
|
depends on CAVIUM_OCTEON_SOC && USB
|
|
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/host/ehci-hcd.c
|
|
+++ b/drivers/usb/host/ehci-hcd.c
|
|
@@ -1333,6 +1333,10 @@ MODULE_LICENSE ("GPL");
|
|
#include "ehci-grlib.c"
|
|
#endif
|
|
|
|
+#ifdef CONFIG_USB_EHCI_K1X
|
|
+#include "ehci-k1x-ci.c"
|
|
+#endif
|
|
+
|
|
static struct platform_driver * const platform_drivers[] = {
|
|
#ifdef CONFIG_USB_EHCI_SH
|
|
&ehci_hcd_sh_driver,
|
|
@@ -1346,6 +1350,9 @@ static struct platform_driver * const platform_drivers[] = {
|
|
#ifdef CONFIG_SPARC_LEON
|
|
&ehci_grlib_driver,
|
|
#endif
|
|
+#ifdef CONFIG_USB_EHCI_K1X
|
|
+ &ehci_k1x_driver,
|
|
+#endif
|
|
};
|
|
|
|
static int __init ehci_hcd_init(void)
|
|
diff --git a/drivers/usb/host/ehci-k1x-ci.c b/drivers/usb/host/ehci-k1x-ci.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/host/ehci-k1x-ci.c
|
|
@@ -0,0 +1,497 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * EHCI Driver for Spacemit k1x SoCs
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+
|
|
+#include "linux/reset.h"
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/usb/otg.h>
|
|
+#include <linux/usb.h>
|
|
+#include <linux/usb/ch9.h>
|
|
+#include <linux/usb/otg.h>
|
|
+#include <linux/usb/gadget.h>
|
|
+#include <linux/usb/hcd.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/platform_data/k1x_ci_usb.h>
|
|
+#include <dt-bindings/usb/k1x_ci_usb.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+
|
|
+#define CAPLENGTH_MASK (0xff)
|
|
+
|
|
+#define PMU_SD_ROT_WAKE_CLR 0x7C
|
|
+#define PMU_SD_ROT_WAKE_CLR_VBUS_DRV (0x1 << 21)
|
|
+
|
|
+struct ehci_hcd_mv {
|
|
+ struct usb_hcd *hcd;
|
|
+ struct usb_phy *phy;
|
|
+
|
|
+ /* Which mode does this ehci running OTG/Host ? */
|
|
+ int mode;
|
|
+
|
|
+ void __iomem *cap_regs;
|
|
+ void __iomem *op_regs;
|
|
+ void __iomem *apmu_base;
|
|
+
|
|
+ struct usb_phy *otg;
|
|
+
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+
|
|
+ struct clk *clk;
|
|
+ struct reset_control *reset;
|
|
+ struct regulator *vbus_otg;
|
|
+
|
|
+ bool reset_on_resume;
|
|
+};
|
|
+
|
|
+static int ehci_otg_enable(struct device *dev, struct ehci_hcd_mv *ehci_mv, bool enable)
|
|
+{
|
|
+ uint32_t temp;
|
|
+
|
|
+ temp = readl(ehci_mv->apmu_base + PMU_SD_ROT_WAKE_CLR);
|
|
+ if (enable)
|
|
+ writel(PMU_SD_ROT_WAKE_CLR_VBUS_DRV | temp, ehci_mv->apmu_base + PMU_SD_ROT_WAKE_CLR);
|
|
+ else
|
|
+ writel(temp & ~PMU_SD_ROT_WAKE_CLR_VBUS_DRV , ehci_mv->apmu_base + PMU_SD_ROT_WAKE_CLR);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
|
|
+{
|
|
+ clk_enable(ehci_mv->clk);
|
|
+}
|
|
+
|
|
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
|
|
+{
|
|
+ clk_disable(ehci_mv->clk);
|
|
+}
|
|
+
|
|
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
|
|
+{
|
|
+ usb_phy_shutdown(ehci_mv->phy);
|
|
+ reset_control_assert(ehci_mv->reset);
|
|
+ ehci_clock_disable(ehci_mv);
|
|
+}
|
|
+
|
|
+static int mv_ehci_reset(struct usb_hcd *hcd)
|
|
+{
|
|
+ struct device *dev = hcd->self.controller;
|
|
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
|
|
+ int retval;
|
|
+
|
|
+ if (ehci_mv == NULL) {
|
|
+ dev_err(dev, "Can not find private ehci data\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ hcd->has_tt = 1;
|
|
+
|
|
+ retval = ehci_setup(hcd);
|
|
+ if (retval)
|
|
+ dev_err(dev, "ehci_setup failed %d\n", retval);
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static const struct hc_driver mv_ehci_hc_driver = {
|
|
+ .description = hcd_name,
|
|
+ .product_desc = "Spacemit EHCI",
|
|
+ .hcd_priv_size = sizeof(struct ehci_hcd),
|
|
+
|
|
+ /*
|
|
+ * generic hardware linkage
|
|
+ */
|
|
+ .irq = ehci_irq,
|
|
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH | HCD_DMA,
|
|
+
|
|
+ /*
|
|
+ * basic lifecycle operations
|
|
+ */
|
|
+ .reset = mv_ehci_reset,
|
|
+ .start = ehci_run,
|
|
+ .stop = ehci_stop,
|
|
+ .shutdown = ehci_shutdown,
|
|
+
|
|
+ /*
|
|
+ * managing i/o requests and associated device resources
|
|
+ */
|
|
+ .urb_enqueue = ehci_urb_enqueue,
|
|
+ .urb_dequeue = ehci_urb_dequeue,
|
|
+ .endpoint_disable = ehci_endpoint_disable,
|
|
+ .endpoint_reset = ehci_endpoint_reset,
|
|
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
|
|
+
|
|
+ /*
|
|
+ * scheduling support
|
|
+ */
|
|
+ .get_frame_number = ehci_get_frame,
|
|
+
|
|
+ /*
|
|
+ * root hub support
|
|
+ */
|
|
+ .hub_status_data = ehci_hub_status_data,
|
|
+ .hub_control = ehci_hub_control,
|
|
+ .bus_suspend = ehci_bus_suspend,
|
|
+ .bus_resume = ehci_bus_resume,
|
|
+ .relinquish_port = ehci_relinquish_port,
|
|
+ .port_handed_over = ehci_port_handed_over,
|
|
+ .get_resuming_ports = ehci_get_resuming_ports,
|
|
+
|
|
+ /*
|
|
+ * device support
|
|
+ */
|
|
+ .free_dev = ehci_remove_device,
|
|
+};
|
|
+
|
|
+static int mv_ehci_dt_parse(struct platform_device *pdev,
|
|
+ struct mv_usb_platform_data *pdata)
|
|
+{
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+
|
|
+ if (of_property_read_string(np,
|
|
+ "spacemit,ehci-name", &((pdev->dev).init_name)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,udc-mode", &(pdata->mode)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,dev-id", &(pdata->id)))
|
|
+ pdata->id = PXA_USB_DEV_OTG;
|
|
+
|
|
+ of_property_read_u32(np, "spacemit,extern-attr", &(pdata->extern_attr));
|
|
+ pdata->otg_force_a_bus_req = of_property_read_bool(np,
|
|
+ "spacemit,otg-force-a-bus-req");
|
|
+ pdata->disable_otg_clock_gating = of_property_read_bool(np,
|
|
+ "spacemit,disable-otg-clock-gating");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_ehci_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct device_node *node;
|
|
+ struct usb_hcd *hcd;
|
|
+ struct ehci_hcd *ehci;
|
|
+ struct ehci_hcd_mv *ehci_mv;
|
|
+ struct resource *r;
|
|
+ int retval = -ENODEV;
|
|
+ u32 offset;
|
|
+
|
|
+ dev_dbg(&pdev->dev, "mv_ehci_probe: Enter ... \n");
|
|
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
+ if (!pdata) {
|
|
+ dev_err(&pdev->dev, "failed to allocate memory for platform_data\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
+ mv_ehci_dt_parse(pdev, pdata);
|
|
+ /*
|
|
+ * Right now device-tree probed devices don't get dma_mask set.
|
|
+ * Since shared usb code relies on it, set it here for now.
|
|
+ * Once we have dma capability bindings this can go away.
|
|
+ */
|
|
+ if (!dev->dma_mask)
|
|
+ dev->dma_mask = &dev->coherent_dma_mask;
|
|
+ if (!dev->coherent_dma_mask)
|
|
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
|
+
|
|
+ if (usb_disabled())
|
|
+ return -ENODEV;
|
|
+
|
|
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
|
|
+ if (!hcd)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
|
|
+ if (ehci_mv == NULL) {
|
|
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
|
|
+ retval = -ENOMEM;
|
|
+ goto err_put_hcd;
|
|
+ }
|
|
+
|
|
+ platform_set_drvdata(pdev, ehci_mv);
|
|
+ ehci_mv->pdata = pdata;
|
|
+ ehci_mv->hcd = hcd;
|
|
+ ehci_mv->reset_on_resume = of_property_read_bool(pdev->dev.of_node,
|
|
+ "spacemit,reset-on-resume");
|
|
+
|
|
+ ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
|
|
+ if (IS_ERR(ehci_mv->clk)) {
|
|
+ dev_err(&pdev->dev, "error getting clock\n");
|
|
+ retval = PTR_ERR(ehci_mv->clk);
|
|
+ goto err_clear_drvdata;
|
|
+ }
|
|
+ clk_prepare(ehci_mv->clk);
|
|
+
|
|
+ ehci_mv->reset = devm_reset_control_array_get_optional_shared(&pdev->dev);
|
|
+ if (IS_ERR(ehci_mv->reset)) {
|
|
+ dev_err(&pdev->dev, "failed to get reset control\n");
|
|
+ retval = PTR_ERR(ehci_mv->reset);
|
|
+ goto err_clear_drvdata;
|
|
+ }
|
|
+
|
|
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (!r) {
|
|
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
|
|
+ retval = -ENODEV;
|
|
+ goto err_clear_drvdata;
|
|
+ }
|
|
+
|
|
+ ehci_mv->cap_regs = devm_ioremap(&pdev->dev, r->start,
|
|
+ resource_size(r));
|
|
+ if (ehci_mv->cap_regs == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
|
|
+ retval = -EFAULT;
|
|
+ goto err_clear_drvdata;
|
|
+ }
|
|
+
|
|
+ ehci_mv->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
|
|
+ if (IS_ERR_OR_NULL(ehci_mv->phy)) {
|
|
+ retval = PTR_ERR(ehci_mv->phy);
|
|
+ if (retval != -EPROBE_DEFER && retval != -ENODEV)
|
|
+ dev_err(&pdev->dev, "failed to get the outer phy\n");
|
|
+ else {
|
|
+ kfree(hcd->bandwidth_mutex);
|
|
+ kfree(hcd);
|
|
+
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+ goto err_clear_drvdata;
|
|
+ }
|
|
+
|
|
+ ehci_clock_enable(ehci_mv);
|
|
+
|
|
+ retval = reset_control_deassert(ehci_mv->reset);
|
|
+ if (retval) {
|
|
+ dev_err(&pdev->dev, "reset error %d\n", retval);
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+
|
|
+ retval = usb_phy_init(ehci_mv->phy);
|
|
+ if (retval) {
|
|
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+
|
|
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
|
|
+ ehci_mv->op_regs =
|
|
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
|
|
+ hcd->rsrc_start = r->start;
|
|
+ hcd->rsrc_len = resource_size(r);
|
|
+ hcd->regs = ehci_mv->op_regs;
|
|
+
|
|
+ hcd->irq = platform_get_irq(pdev, 0);
|
|
+ if (!hcd->irq) {
|
|
+ dev_err(&pdev->dev, "Cannot get irq.");
|
|
+ retval = -ENODEV;
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+
|
|
+ ehci = hcd_to_ehci(hcd);
|
|
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
|
|
+
|
|
+ ehci_mv->mode = pdata->mode;
|
|
+
|
|
+ node = of_find_compatible_node(NULL, NULL, "spacemit,spacemit-apmu");
|
|
+ BUG_ON(!node);
|
|
+ ehci_mv->apmu_base = of_iomap(node, 0);
|
|
+ if (ehci_mv->apmu_base == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to map apmu base memory\n");
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+
|
|
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
|
|
+ pr_info("ehci_mv MV_USB_MODE_OTG ... \n");
|
|
+ ehci_mv->otg = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-otg", 0);
|
|
+ if (IS_ERR(ehci_mv->otg)) {
|
|
+ retval = PTR_ERR(ehci_mv->otg);
|
|
+
|
|
+ if (retval == -ENXIO)
|
|
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
|
|
+ "must have CONFIG_USB_PHY enabled\n");
|
|
+ else if (retval != -EPROBE_DEFER)
|
|
+ dev_err(&pdev->dev,
|
|
+ "unable to find transceiver\n");
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+
|
|
+ retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
|
|
+ if (retval < 0) {
|
|
+ dev_err(&pdev->dev,
|
|
+ "unable to register with transceiver\n");
|
|
+ retval = -ENODEV;
|
|
+ goto err_disable_clk_rst;
|
|
+ }
|
|
+ /* otg will enable clock before use as host */
|
|
+ mv_ehci_disable(ehci_mv);
|
|
+ } else {
|
|
+ retval = ehci_otg_enable(dev, ehci_mv, 1);
|
|
+ if (retval)
|
|
+ goto err_disable_clk_rst;
|
|
+
|
|
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
|
|
+ if (retval) {
|
|
+ dev_err(&pdev->dev,
|
|
+ "failed to add hcd with err %d\n", retval);
|
|
+ goto err_set_vbus;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pm_runtime_set_active(dev);
|
|
+ pm_runtime_enable(dev);
|
|
+ pm_suspend_ignore_children(dev, false);
|
|
+ pm_runtime_get_sync(dev);
|
|
+
|
|
+ dev_dbg(&pdev->dev,
|
|
+ "successful find EHCI device with regs 0x%p irq %d"
|
|
+ " working in %s mode\n", hcd->regs, hcd->irq,
|
|
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_set_vbus:
|
|
+ ehci_otg_enable(dev, ehci_mv, 0);
|
|
+err_disable_clk_rst:
|
|
+ mv_ehci_disable(ehci_mv);
|
|
+err_clear_drvdata:
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+err_put_hcd:
|
|
+ usb_put_hcd(hcd);
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+static int mv_ehci_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
|
|
+ struct usb_hcd *hcd = ehci_mv->hcd;
|
|
+
|
|
+ if (hcd->rh_registered)
|
|
+ usb_remove_hcd(hcd);
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(ehci_mv->otg))
|
|
+ otg_set_host(ehci_mv->otg->otg, NULL);
|
|
+
|
|
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
|
|
+ ehci_otg_enable(&pdev->dev, ehci_mv, 0);
|
|
+ mv_ehci_disable(ehci_mv);
|
|
+ clk_unprepare(ehci_mv->clk);
|
|
+ }
|
|
+
|
|
+ usb_put_hcd(hcd);
|
|
+
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ pm_runtime_put_noidle(&pdev->dev);
|
|
+ pm_runtime_set_suspended(&pdev->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+MODULE_ALIAS("mv-ehci");
|
|
+
|
|
+static const struct of_device_id mv_ehci_dt_match[] = {
|
|
+ {.compatible = "spacemit,mv-ehci"},
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, mv_ehci_dt_match);
|
|
+
|
|
+static void mv_ehci_shutdown(struct platform_device *pdev)
|
|
+{
|
|
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
|
|
+ struct usb_hcd *hcd = ehci_mv->hcd;
|
|
+
|
|
+ if (!hcd->rh_registered)
|
|
+ return;
|
|
+
|
|
+ if (hcd->driver->shutdown)
|
|
+ hcd->driver->shutdown(hcd);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int mv_ehci_suspend(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
|
|
+ struct usb_hcd *hcd = ehci_mv->hcd;
|
|
+ bool do_wakeup = device_may_wakeup(dev);
|
|
+ int ret;
|
|
+
|
|
+ ret = ehci_suspend(hcd, do_wakeup);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ usb_phy_shutdown(ehci_mv->phy);
|
|
+
|
|
+ if (ehci_mv->reset_on_resume) {
|
|
+ ret = reset_control_assert(ehci_mv->reset);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ dev_info(dev, "Will reset controller and phy on resume\n");
|
|
+ }
|
|
+
|
|
+ clk_disable_unprepare(ehci_mv->clk);
|
|
+ dev_dbg(dev, "pm suspend: disable clks and phy\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mv_ehci_resume(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
|
|
+ struct usb_hcd *hcd = ehci_mv->hcd;
|
|
+ int ret;
|
|
+
|
|
+ ret = clk_prepare_enable(ehci_mv->clk);
|
|
+ if (ret){
|
|
+ dev_err(dev, "Failed to enable clock");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (ehci_mv->reset_on_resume) {
|
|
+ dev_info(dev, "Resetting controller and phy\n");
|
|
+ ret = reset_control_deassert(ehci_mv->reset);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = usb_phy_init(ehci_mv->phy);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to init phy\n");
|
|
+ ehci_clock_disable(ehci_mv);
|
|
+ return ret;
|
|
+ }
|
|
+ dev_dbg(dev, "pm resume: do EHCI resume\n");
|
|
+ ehci_resume(hcd, ehci_mv->reset_on_resume);
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+#define mv_ehci_suspend NULL
|
|
+#define mv_ehci_resume NULL
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+static const struct dev_pm_ops mv_ehci_pm_ops = {
|
|
+ .suspend = mv_ehci_suspend,
|
|
+ .resume = mv_ehci_resume,
|
|
+};
|
|
+
|
|
+static struct platform_driver ehci_k1x_driver = {
|
|
+ .probe = mv_ehci_probe,
|
|
+ .remove = mv_ehci_remove,
|
|
+ .shutdown = mv_ehci_shutdown,
|
|
+ .driver = {
|
|
+ .name = "mv-ehci",
|
|
+ .of_match_table = of_match_ptr(mv_ehci_dt_match),
|
|
+ .bus = &platform_bus_type,
|
|
+#ifdef CONFIG_PM
|
|
+ .pm = &mv_ehci_pm_ops,
|
|
+#endif
|
|
+ },
|
|
+};
|
|
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/host/xhci-hub.c
|
|
+++ b/drivers/usb/host/xhci-hub.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/slab.h>
|
|
#include <asm/unaligned.h>
|
|
#include <linux/bitfield.h>
|
|
+#include <linux/usb/phy.h>
|
|
|
|
#include "xhci.h"
|
|
#include "xhci-trace.h"
|
|
@@ -587,6 +588,10 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
|
|
hcd->self.busnum, wIndex + 1, port_status);
|
|
}
|
|
|
|
+#if defined(CONFIG_SOC_SPACEMIT_K1X)
|
|
+extern void dwc3_spacemit_clear_disconnect(struct device *dev);
|
|
+#endif
|
|
+
|
|
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
|
|
u16 wIndex, __le32 __iomem *addr, u32 port_status)
|
|
{
|
|
@@ -604,6 +609,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
|
|
break;
|
|
case USB_PORT_FEAT_C_CONNECTION:
|
|
status = PORT_CSC;
|
|
+#if defined(CONFIG_SOC_SPACEMIT_K1X)
|
|
+ dwc3_spacemit_clear_disconnect(xhci_to_hcd(xhci)->
|
|
+ self.controller->parent->parent);
|
|
+#endif
|
|
port_change_bit = "connect";
|
|
break;
|
|
case USB_PORT_FEAT_C_OVER_CURRENT:
|
|
--
|
|
Armbian
|
|
|
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Patrick Yavitz <pyavitz@xxxxx.com>
|
|
Date: Sat, 22 Jun 2024 08:11:52 -0400
|
|
Subject: drivers: usb: host: xhci-plat.c
|
|
|
|
Signed-off-by: Patrick Yavitz <pyavitz@xxxxx.com>
|
|
---
|
|
drivers/usb/host/xhci-plat.c | 4 +
|
|
drivers/usb/host/xhci.c | 10 +
|
|
drivers/usb/misc/Kconfig | 8 +
|
|
drivers/usb/misc/Makefile | 1 +
|
|
drivers/usb/misc/spacemit_onboard_hub.c | 229 ++
|
|
drivers/usb/misc/spacemit_onboard_hub.h | 186 ++
|
|
drivers/usb/phy/Kconfig | 17 +
|
|
drivers/usb/phy/Makefile | 2 +
|
|
drivers/usb/phy/phy-k1x-ci-otg.c | 1075 ++++++++++
|
|
drivers/usb/phy/phy-k1x-ci-otg.h | 181 ++
|
|
drivers/usb/phy/phy-k1x-ci-usb2.c | 167 ++
|
|
drivers/usb/phy/phy-k1x-ci-usb2.h | 43 +
|
|
12 files changed, 1923 insertions(+)
|
|
|
|
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/host/xhci-plat.c
|
|
+++ b/drivers/usb/host/xhci-plat.c
|
|
@@ -304,6 +304,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
|
|
if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
|
|
xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
|
|
|
|
+#if defined(CONFIG_SOC_SPACEMIT_K1X)
|
|
+ if (device_property_read_bool(tmpdev, "reset-on-resume"))
|
|
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
|
|
+#endif
|
|
device_property_read_u32(tmpdev, "imod-interval-ns",
|
|
&xhci->imod_interval);
|
|
}
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -5391,6 +5391,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
|
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
|
|
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
|
|
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
|
|
+ } else if (IS_ENABLED(CONFIG_SOC_SPACEMIT_K1PRO) &&
|
|
+ !dma_set_mask(dev, DMA_BIT_MASK(40))) {
|
|
+ xhci_dbg(xhci, "Force Enabling 40-bit DMA addresses.\n");
|
|
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(40));
|
|
} else {
|
|
/*
|
|
* This is to avoid error in cases where a 32-bit USB
|
|
@@ -5403,6 +5407,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
|
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
|
|
}
|
|
|
|
+ if (IS_ENABLED(CONFIG_SOC_SPACEMIT_K1X) &&
|
|
+ !dma_set_mask(dev, DMA_BIT_MASK(32))) {
|
|
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
|
|
+ xhci_dbg(xhci, "Force Enabling 32-bit DMA addresses.\n");
|
|
+ }
|
|
+
|
|
xhci_dbg(xhci, "Calling HCD init\n");
|
|
/* Initialize HCD and host controller data structures. */
|
|
retval = xhci_init(hcd);
|
|
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/misc/Kconfig
|
|
+++ b/drivers/usb/misc/Kconfig
|
|
@@ -311,3 +311,11 @@ config USB_ONBOARD_HUB
|
|
this config will enable the driver and it will automatically
|
|
match the state of the USB subsystem. If this driver is a
|
|
module it will be called onboard_usb_hub.
|
|
+
|
|
+config SPACEMIT_ONBOARD_USB_HUB
|
|
+ tristate "Spacemit onboard USB hub support"
|
|
+ depends on ARCH_SPACEMIT || COMPILE_TEST
|
|
+ default SOC_SPACEMIT_K1X && USB_DWC3_SPACEMIT
|
|
+ help
|
|
+ Say Y here if you want to support onboard usb hubs on Spacemit
|
|
+ platform. If unsure, say Y when compile for Spacemit platform.
|
|
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/misc/Makefile
|
|
+++ b/drivers/usb/misc/Makefile
|
|
@@ -34,3 +34,4 @@ obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
|
|
obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o
|
|
obj-$(CONFIG_BRCM_USB_PINMAP) += brcmstb-usb-pinmap.o
|
|
obj-$(CONFIG_USB_ONBOARD_HUB) += onboard_usb_hub.o
|
|
+obj-$(CONFIG_SPACEMIT_ONBOARD_USB_HUB) += spacemit_onboard_hub.o
|
|
diff --git a/drivers/usb/misc/spacemit_onboard_hub.c b/drivers/usb/misc/spacemit_onboard_hub.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/misc/spacemit_onboard_hub.c
|
|
@@ -0,0 +1,229 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * Onboard USB Hub support for Spacemit platform
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/resource.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
+
|
|
+#include "spacemit_onboard_hub.h"
|
|
+
|
|
+#define DRIVER_VERSION "v1.0.2"
|
|
+
|
|
+static void spacemit_hub_enable(struct spacemit_hub_priv *spacemit, bool on)
|
|
+{
|
|
+ unsigned i;
|
|
+ int active_val = spacemit->hub_gpio_active_low ? 0 : 1;
|
|
+
|
|
+ if (!spacemit->hub_gpios)
|
|
+ return;
|
|
+
|
|
+ dev_dbg(spacemit->dev, "do hub enable %s\n", on ? "on" : "off");
|
|
+
|
|
+ if (on) {
|
|
+ for (i = 0; i < spacemit->hub_gpios->ndescs; i++) {
|
|
+ gpiod_set_value(spacemit->hub_gpios->desc[i],
|
|
+ active_val);
|
|
+ if (spacemit->hub_inter_delay_ms) {
|
|
+ msleep(spacemit->hub_inter_delay_ms);
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ for (i = spacemit->hub_gpios->ndescs; i > 0; --i) {
|
|
+ gpiod_set_value(spacemit->hub_gpios->desc[i - 1],
|
|
+ !active_val);
|
|
+ if (spacemit->hub_inter_delay_ms) {
|
|
+ msleep(spacemit->hub_inter_delay_ms);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spacemit->is_hub_on = on;
|
|
+}
|
|
+
|
|
+static void spacemit_hub_vbus_enable(struct spacemit_hub_priv *spacemit,
|
|
+ bool on)
|
|
+{
|
|
+ unsigned i;
|
|
+ int active_val = spacemit->vbus_gpio_active_low ? 0 : 1;
|
|
+
|
|
+ if (!spacemit->vbus_gpios)
|
|
+ return;
|
|
+
|
|
+ dev_dbg(spacemit->dev, "do hub vbus on %s\n", on ? "on" : "off");
|
|
+ if (on) {
|
|
+ for (i = 0; i < spacemit->vbus_gpios->ndescs; i++) {
|
|
+ gpiod_set_value(spacemit->vbus_gpios->desc[i],
|
|
+ active_val);
|
|
+ if (spacemit->vbus_inter_delay_ms) {
|
|
+ msleep(spacemit->vbus_inter_delay_ms);
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ for (i = spacemit->vbus_gpios->ndescs; i > 0; --i) {
|
|
+ gpiod_set_value(spacemit->vbus_gpios->desc[i - 1],
|
|
+ !active_val);
|
|
+ if (spacemit->vbus_inter_delay_ms) {
|
|
+ msleep(spacemit->vbus_inter_delay_ms);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spacemit->is_vbus_on = on;
|
|
+}
|
|
+
|
|
+static void spacemit_hub_configure(struct spacemit_hub_priv *spacemit, bool on)
|
|
+{
|
|
+ dev_dbg(spacemit->dev, "do hub configure %s\n", on ? "on" : "off");
|
|
+ if (on) {
|
|
+ spacemit_hub_enable(spacemit, true);
|
|
+ if (spacemit->vbus_delay_ms && spacemit->vbus_gpios) {
|
|
+ msleep(spacemit->vbus_delay_ms);
|
|
+ }
|
|
+ spacemit_hub_vbus_enable(spacemit, true);
|
|
+ } else {
|
|
+ spacemit_hub_vbus_enable(spacemit, false);
|
|
+ if (spacemit->vbus_delay_ms && spacemit->vbus_gpios) {
|
|
+ msleep(spacemit->vbus_delay_ms);
|
|
+ }
|
|
+ spacemit_hub_enable(spacemit, false);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void spacemit_read_u32_prop(struct device *dev, const char *name,
|
|
+ u32 init_val, u32 *pval)
|
|
+{
|
|
+ if (device_property_read_u32(dev, name, pval))
|
|
+ *pval = init_val;
|
|
+ dev_dbg(dev, "hub %s, delay: %u ms\n", name, *pval);
|
|
+}
|
|
+
|
|
+static int spacemit_hub_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct spacemit_hub_priv *spacemit;
|
|
+ struct device *dev = &pdev->dev;
|
|
+
|
|
+ dev_info(&pdev->dev, "%s\n", DRIVER_VERSION);
|
|
+
|
|
+ spacemit = devm_kzalloc(&pdev->dev, sizeof(*spacemit), GFP_KERNEL);
|
|
+ if (!spacemit)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ spacemit_read_u32_prop(dev, "hub_inter_delay_ms", 0,
|
|
+ &spacemit->hub_inter_delay_ms);
|
|
+ spacemit_read_u32_prop(dev, "vbus_inter_delay_ms", 0,
|
|
+ &spacemit->vbus_inter_delay_ms);
|
|
+ spacemit_read_u32_prop(dev, "vbus_delay_ms", 10,
|
|
+ &spacemit->vbus_delay_ms);
|
|
+
|
|
+ spacemit->hub_gpio_active_low =
|
|
+ device_property_read_bool(dev, "hub_gpio_active_low");
|
|
+ spacemit->vbus_gpio_active_low =
|
|
+ device_property_read_bool(dev, "vbus_gpio_active_low");
|
|
+ spacemit->suspend_power_on =
|
|
+ device_property_read_bool(dev, "suspend_power_on");
|
|
+
|
|
+ spacemit->hub_gpios = devm_gpiod_get_array_optional(
|
|
+ &pdev->dev, "hub",
|
|
+ spacemit->hub_gpio_active_low ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
|
|
+ if (IS_ERR(spacemit->hub_gpios)) {
|
|
+ dev_err(&pdev->dev, "failed to retrieve hub-gpios from dts\n");
|
|
+ return PTR_ERR(spacemit->hub_gpios);
|
|
+ }
|
|
+
|
|
+ spacemit->vbus_gpios = devm_gpiod_get_array_optional(
|
|
+ &pdev->dev, "vbus",
|
|
+ spacemit->vbus_gpio_active_low ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
|
|
+ if (IS_ERR(spacemit->vbus_gpios)) {
|
|
+ dev_err(&pdev->dev, "failed to retrieve vbus-gpios from dts\n");
|
|
+ return PTR_ERR(spacemit->vbus_gpios);
|
|
+ }
|
|
+
|
|
+ platform_set_drvdata(pdev, spacemit);
|
|
+ spacemit->dev = &pdev->dev;
|
|
+ mutex_init(&spacemit->hub_mutex);
|
|
+
|
|
+ spacemit_hub_configure(spacemit, true);
|
|
+
|
|
+ dev_info(&pdev->dev, "onboard usb hub driver probed, hub configured\n");
|
|
+
|
|
+ spacemit_hub_debugfs_init(spacemit);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spacemit_hub_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct spacemit_hub_priv *spacemit = platform_get_drvdata(pdev);
|
|
+
|
|
+ debugfs_remove(debugfs_lookup(dev_name(&pdev->dev), usb_debug_root));
|
|
+ spacemit_hub_configure(spacemit, false);
|
|
+ mutex_destroy(&spacemit->hub_mutex);
|
|
+ dev_info(&pdev->dev, "onboard usb hub driver exit, disable hub\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id spacemit_hub_dt_match[] = {
|
|
+ { .compatible = "spacemit,usb3-hub",},
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, spacemit_hub_dt_match);
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int spacemit_hub_suspend(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct spacemit_hub_priv *spacemit = platform_get_drvdata(pdev);
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ if (!spacemit->suspend_power_on) {
|
|
+ spacemit_hub_configure(spacemit, false);
|
|
+ dev_info(dev, "turn off hub power supply\n");
|
|
+ }
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spacemit_hub_resume(struct device *dev)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct spacemit_hub_priv *spacemit = platform_get_drvdata(pdev);
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ if (!spacemit->suspend_power_on) {
|
|
+ spacemit_hub_configure(spacemit, true);
|
|
+ dev_info(dev, "resume hub power supply\n");
|
|
+ }
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops spacemit_onboard_hub_pm_ops = {
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(spacemit_hub_suspend, spacemit_hub_resume)
|
|
+};
|
|
+#define DEV_PM_OPS (&spacemit_onboard_hub_pm_ops)
|
|
+#else
|
|
+#define DEV_PM_OPS NULL
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+static struct platform_driver spacemit_hub_driver = {
|
|
+ .probe = spacemit_hub_probe,
|
|
+ .remove = spacemit_hub_remove,
|
|
+ .driver = {
|
|
+ .name = "spacemit-usb3-hub",
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = of_match_ptr(spacemit_hub_dt_match),
|
|
+ .pm = DEV_PM_OPS,
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(spacemit_hub_driver);
|
|
+MODULE_DESCRIPTION("Spacemit Onboard USB Hub driver");
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/usb/misc/spacemit_onboard_hub.h b/drivers/usb/misc/spacemit_onboard_hub.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/misc/spacemit_onboard_hub.h
|
|
@@ -0,0 +1,186 @@
|
|
+#include <linux/property.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/usb.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/mutex.h>
|
|
+
|
|
+
|
|
+struct spacemit_hub_priv {
|
|
+ struct device *dev;
|
|
+ bool is_hub_on;
|
|
+ bool is_vbus_on;
|
|
+
|
|
+ struct gpio_descs *hub_gpios;
|
|
+ struct gpio_descs *vbus_gpios;
|
|
+ bool hub_gpio_active_low;
|
|
+ bool vbus_gpio_active_low;
|
|
+
|
|
+ u32 hub_inter_delay_ms;
|
|
+ u32 vbus_delay_ms;
|
|
+ u32 vbus_inter_delay_ms;
|
|
+
|
|
+ bool suspend_power_on;
|
|
+
|
|
+ struct mutex hub_mutex;
|
|
+};
|
|
+
|
|
+static void spacemit_hub_enable(struct spacemit_hub_priv *spacemit, bool on);
|
|
+
|
|
+static void spacemit_hub_vbus_enable(struct spacemit_hub_priv *spacemit,
|
|
+ bool on);
|
|
+
|
|
+static int spacemit_hub_enable_show(struct seq_file *s, void *unused)
|
|
+{
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ seq_puts(s, spacemit->is_hub_on ? "true\n" : "false\n");
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t spacemit_hub_enable_write(struct file *file,
|
|
+ const char __user *ubuf, size_t count,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ struct seq_file *s = file->private_data;
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ bool on = false;
|
|
+ char buf[32];
|
|
+
|
|
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if ((!strncmp(buf, "true", 4)) || (!strncmp(buf, "1", 1)))
|
|
+ on = true;
|
|
+ if ((!strncmp(buf, "false", 5)) || !strncmp(buf, "0", 1))
|
|
+ on = false;
|
|
+
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ if (on != spacemit->is_hub_on) {
|
|
+ spacemit_hub_enable(spacemit, on);
|
|
+ }
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int spacemit_hub_enable_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, spacemit_hub_enable_show, inode->i_private);
|
|
+}
|
|
+
|
|
+struct file_operations spacemit_hub_enable_fops = {
|
|
+ .open = spacemit_hub_enable_open,
|
|
+ .write = spacemit_hub_enable_write,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release,
|
|
+};
|
|
+
|
|
+static int spacemit_hub_vbus_show(struct seq_file *s, void *unused)
|
|
+{
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ seq_puts(s, spacemit->is_vbus_on ? "true\n" : "false\n");
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t spacemit_hub_vbus_write(struct file *file,
|
|
+ const char __user *ubuf, size_t count,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ struct seq_file *s = file->private_data;
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ bool on = false;
|
|
+ char buf[32];
|
|
+
|
|
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if ((!strncmp(buf, "true", 4)) || (!strncmp(buf, "1", 1)))
|
|
+ on = true;
|
|
+ if ((!strncmp(buf, "false", 5)) || !strncmp(buf, "0", 1))
|
|
+ on = false;
|
|
+
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ if (on != spacemit->is_vbus_on) {
|
|
+ spacemit_hub_vbus_enable(spacemit, on);
|
|
+ }
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int spacemit_hub_vbus_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, spacemit_hub_vbus_show, inode->i_private);
|
|
+}
|
|
+
|
|
+struct file_operations spacemit_hub_vbus_fops = {
|
|
+ .open = spacemit_hub_vbus_open,
|
|
+ .write = spacemit_hub_vbus_write,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release,
|
|
+};
|
|
+
|
|
+static int spacemit_hub_suspend_show(struct seq_file *s, void *unused)
|
|
+{
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ seq_puts(s, spacemit->suspend_power_on ? "true\n" : "false\n");
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t spacemit_hub_suspend_write(struct file *file,
|
|
+ const char __user *ubuf, size_t count,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ struct seq_file *s = file->private_data;
|
|
+ struct spacemit_hub_priv *spacemit = s->private;
|
|
+ bool on = false;
|
|
+ char buf[32];
|
|
+
|
|
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if ((!strncmp(buf, "true", 4)) || (!strncmp(buf, "1", 1)))
|
|
+ on = true;
|
|
+ if ((!strncmp(buf, "false", 5)) || !strncmp(buf, "0", 1))
|
|
+ on = false;
|
|
+
|
|
+ mutex_lock(&spacemit->hub_mutex);
|
|
+ spacemit->suspend_power_on = on;
|
|
+ mutex_unlock(&spacemit->hub_mutex);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int spacemit_hub_suspend_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, spacemit_hub_suspend_show, inode->i_private);
|
|
+}
|
|
+
|
|
+struct file_operations spacemit_hub_suspend_fops = {
|
|
+ .open = spacemit_hub_suspend_open,
|
|
+ .write = spacemit_hub_suspend_write,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release,
|
|
+};
|
|
+
|
|
+static void spacemit_hub_debugfs_init(struct spacemit_hub_priv *spacemit)
|
|
+{
|
|
+ struct dentry *root;
|
|
+
|
|
+ root = debugfs_create_dir(dev_name(spacemit->dev), usb_debug_root);
|
|
+ debugfs_create_file("vbus_on", 0644, root, spacemit,
|
|
+ &spacemit_hub_vbus_fops);
|
|
+ debugfs_create_file("hub_on", 0644, root, spacemit,
|
|
+ &spacemit_hub_enable_fops);
|
|
+ debugfs_create_file("suspend_power_on", 0644, root, spacemit,
|
|
+ &spacemit_hub_suspend_fops);
|
|
+}
|
|
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/phy/Kconfig
|
|
+++ b/drivers/usb/phy/Kconfig
|
|
@@ -151,6 +151,23 @@ config USB_MV_OTG
|
|
|
|
To compile this driver as a module, choose M here.
|
|
|
|
+config USB_K1XCI_OTG
|
|
+ tristate "Spacemit K1-x USB OTG support"
|
|
+ depends on USB_EHCI_K1X && USB_K1X_UDC && USB_OTG
|
|
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
|
|
+ select USB_PHY
|
|
+ help
|
|
+ Say Y here if you want to build Spacemit K1-x USB OTG transciever driver.
|
|
+ This driver implements role switch between EHCI host driver and gadget driver.
|
|
+
|
|
+config K1XCI_USB2_PHY
|
|
+ tristate "K1x ci USB 2.0 PHY Driver"
|
|
+ depends on USB || USB_GADGET
|
|
+ select USB_PHY
|
|
+ help
|
|
+ Enable this to support USB 2.0 PHY driver. This driver will do the PHY
|
|
+ initialization and shutdown. The PHY driver will be used by K1x udc/ehci/otg driver.
|
|
+
|
|
config USB_MXS_PHY
|
|
tristate "Freescale MXS USB PHY support"
|
|
depends on ARCH_MXC || ARCH_MXS
|
|
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/usb/phy/Makefile
|
|
+++ b/drivers/usb/phy/Makefile
|
|
@@ -25,3 +25,5 @@ obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
|
|
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
|
|
obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o
|
|
obj-$(CONFIG_JZ4770_PHY) += phy-jz4770.o
|
|
+obj-$(CONFIG_USB_K1XCI_OTG) += phy-k1x-ci-otg.o
|
|
+obj-$(CONFIG_K1XCI_USB2_PHY) += phy-k1x-ci-usb2.o
|
|
diff --git a/drivers/usb/phy/phy-k1x-ci-otg.c b/drivers/usb/phy/phy-k1x-ci-otg.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/phy/phy-k1x-ci-otg.c
|
|
@@ -0,0 +1,1075 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * OTG support for Spacemit k1x SoCs
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+#include <linux/irqreturn.h>
|
|
+#include <linux/reset.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/usb.h>
|
|
+#include <linux/usb/ch9.h>
|
|
+#include <linux/usb/otg.h>
|
|
+#include <linux/usb/gadget.h>
|
|
+#include <linux/usb/hcd.h>
|
|
+#include <linux/platform_data/k1x_ci_usb.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <dt-bindings/usb/k1x_ci_usb.h>
|
|
+#include <linux/pm_qos.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
+#include <linux/extcon.h>
|
|
+#include <linux/extcon-provider.h>
|
|
+#include "phy-k1x-ci-otg.h"
|
|
+
|
|
+#define MAX_RETRY_TIMES 60
|
|
+#define RETRY_SLEEP_MS 1000
|
|
+
|
|
+#define APMU_SD_ROT_WAKE_CLR 0x7C
|
|
+#define USB_OTG_ID_WAKEUP_EN (1 << 8)
|
|
+#define USB_OTG_ID_WAKEUP_CLR (1 << 18)
|
|
+
|
|
+#define PMU_SD_ROT_WAKE_CLR_VBUS_DRV (0x1 << 21)
|
|
+
|
|
+static const char driver_name[] = "k1x-ci-otg";
|
|
+
|
|
+static int otg_force_host_mode;
|
|
+static int otg_state = 0;
|
|
+
|
|
+static char *state_string[] = {
|
|
+ "undefined", "b_idle", "b_srp_init", "b_peripheral",
|
|
+ "b_wait_acon", "b_host", "a_idle", "a_wait_vrise",
|
|
+ "a_wait_bcon", "a_host", "a_suspend", "a_peripheral",
|
|
+ "a_wait_vfall", "a_vbus_err",
|
|
+};
|
|
+
|
|
+static int mv_otg_set_vbus(struct usb_otg *otg, bool on)
|
|
+{
|
|
+ struct mv_otg *mvotg = container_of(otg->usb_phy, struct mv_otg, phy);
|
|
+ uint32_t temp;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev, "%s on= %d ... \n", __func__, on);
|
|
+ /* set constraint before turn on vbus */
|
|
+ if (on) {
|
|
+ pm_stay_awake(&mvotg->pdev->dev);
|
|
+ }
|
|
+
|
|
+ if (on) {
|
|
+ temp = readl(mvotg->apmu_base + APMU_SD_ROT_WAKE_CLR);
|
|
+ writel(PMU_SD_ROT_WAKE_CLR_VBUS_DRV | temp,
|
|
+ mvotg->apmu_base + APMU_SD_ROT_WAKE_CLR);
|
|
+ } else {
|
|
+ temp = readl(mvotg->apmu_base + APMU_SD_ROT_WAKE_CLR);
|
|
+ temp &= ~PMU_SD_ROT_WAKE_CLR_VBUS_DRV;
|
|
+ writel(temp, mvotg->apmu_base + APMU_SD_ROT_WAKE_CLR);
|
|
+ }
|
|
+
|
|
+ gpiod_set_value(mvotg->vbus_gpio, on);
|
|
+
|
|
+ /* release constraint after turn off vbus */
|
|
+ if (!on) {
|
|
+ pm_relax(&mvotg->pdev->dev);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
|
|
+{
|
|
+ pr_debug("%s ... \n", __func__);
|
|
+ otg->host = host;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
|
|
+{
|
|
+ pr_debug("%s ... \n", __func__);
|
|
+ otg->gadget = gadget;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_otg_run_state_machine(struct mv_otg *mvotg, unsigned long delay)
|
|
+{
|
|
+ dev_dbg(&mvotg->pdev->dev, "mv_otg_run_state_machine ... \n");
|
|
+ dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
|
|
+ if (!mvotg->qwork)
|
|
+ return;
|
|
+
|
|
+ queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
|
|
+}
|
|
+
|
|
+static void mv_otg_timer_await_bcon(struct timer_list *t)
|
|
+{
|
|
+ struct mv_otg *mvotg =
|
|
+ from_timer(mvotg, t, otg_ctrl.timer[A_WAIT_BCON_TIMER]);
|
|
+
|
|
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
|
|
+
|
|
+ dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
|
|
+
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
|
|
+{
|
|
+ struct timer_list *timer;
|
|
+
|
|
+ if (id >= OTG_TIMER_NUM)
|
|
+ return -EINVAL;
|
|
+
|
|
+ timer = &mvotg->otg_ctrl.timer[id];
|
|
+
|
|
+ if (timer_pending(timer))
|
|
+ del_timer(timer);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
|
|
+ unsigned long interval)
|
|
+{
|
|
+ struct timer_list *timer;
|
|
+
|
|
+ if (id >= OTG_TIMER_NUM)
|
|
+ return -EINVAL;
|
|
+
|
|
+ timer = &mvotg->otg_ctrl.timer[id];
|
|
+ if (timer_pending(timer)) {
|
|
+ dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ timer->expires = jiffies + interval;
|
|
+ add_timer(timer);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_reset(struct mv_otg *mvotg)
|
|
+{
|
|
+ unsigned int loops;
|
|
+ u32 tmp;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev, "mv_otg_reset \n");
|
|
+ /* Stop the controller */
|
|
+ tmp = readl(&mvotg->op_regs->usbcmd);
|
|
+ tmp &= ~USBCMD_RUN_STOP;
|
|
+ writel(tmp, &mvotg->op_regs->usbcmd);
|
|
+
|
|
+ /* Reset the controller to get default values */
|
|
+ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
|
|
+
|
|
+ loops = 500;
|
|
+ while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
|
|
+ if (loops == 0) {
|
|
+ dev_err(&mvotg->pdev->dev,
|
|
+ "Wait for RESET completed TIMEOUT\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+ loops--;
|
|
+ udelay(20);
|
|
+ }
|
|
+
|
|
+ writel(0x0, &mvotg->op_regs->usbintr);
|
|
+ tmp = readl(&mvotg->op_regs->usbsts);
|
|
+ writel(tmp, &mvotg->op_regs->usbsts);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_otg_init_irq(struct mv_otg *mvotg)
|
|
+{
|
|
+ u32 otgsc;
|
|
+
|
|
+ mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID | OTGSC_INTR_A_VBUS_VALID;
|
|
+ mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID |
|
|
+ OTGSC_INTSTS_A_VBUS_VALID;
|
|
+
|
|
+ if ((mvotg->pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION) == 0) {
|
|
+ mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID |
|
|
+ OTGSC_INTR_B_SESSION_END;
|
|
+ mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID |
|
|
+ OTGSC_INTSTS_B_SESSION_END;
|
|
+ }
|
|
+
|
|
+ if ((mvotg->pdata->extern_attr & MV_USB_HAS_IDPIN_DETECTION) == 0) {
|
|
+ mvotg->irq_en |= OTGSC_INTR_USB_ID;
|
|
+ mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
|
|
+ }
|
|
+
|
|
+ otgsc = readl(&mvotg->op_regs->otgsc);
|
|
+ otgsc |= mvotg->irq_en;
|
|
+ writel(otgsc, &mvotg->op_regs->otgsc);
|
|
+}
|
|
+
|
|
+static void mv_otg_start_host(struct mv_otg *mvotg, int on)
|
|
+{
|
|
+ struct usb_otg *otg = mvotg->phy.otg;
|
|
+ struct usb_hcd *hcd;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev, "%s ...\n", __func__);
|
|
+ if (!otg->host) {
|
|
+ int retry = 0;
|
|
+ while (retry < MAX_RETRY_TIMES) {
|
|
+ retry++;
|
|
+ msleep(RETRY_SLEEP_MS);
|
|
+ if (otg->host)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!otg->host) {
|
|
+ dev_err(&mvotg->pdev->dev, "otg->host is not set!\n");
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
|
|
+
|
|
+ hcd = bus_to_hcd(otg->host);
|
|
+
|
|
+ if (on) {
|
|
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
|
|
+ device_wakeup_enable(hcd->self.controller);
|
|
+ } else {
|
|
+ usb_remove_hcd(hcd);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mv_otg_start_peripheral(struct mv_otg *mvotg, int on)
|
|
+{
|
|
+ struct usb_otg *otg = mvotg->phy.otg;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev, "%s ...\n", __func__);
|
|
+ if (!otg->gadget) {
|
|
+ int retry = 0;
|
|
+ while (retry < MAX_RETRY_TIMES) {
|
|
+ retry++;
|
|
+ msleep(RETRY_SLEEP_MS);
|
|
+ if (otg->gadget)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!otg->gadget) {
|
|
+ dev_err(&mvotg->pdev->dev, "otg->gadget is not set!\n");
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dev_info(&mvotg->pdev->dev, "gadget %s\n", on ? "on" : "off");
|
|
+
|
|
+ if (on)
|
|
+ usb_gadget_vbus_connect(otg->gadget);
|
|
+ else
|
|
+ usb_gadget_vbus_disconnect(otg->gadget);
|
|
+}
|
|
+
|
|
+static void otg_clock_enable(struct mv_otg *mvotg)
|
|
+{
|
|
+ clk_enable(mvotg->clk);
|
|
+}
|
|
+
|
|
+static void otg_reset_assert(struct mv_otg *mvotg)
|
|
+{
|
|
+ reset_control_assert(mvotg->reset);
|
|
+}
|
|
+
|
|
+static void otg_reset_deassert(struct mv_otg *mvotg)
|
|
+{
|
|
+ reset_control_deassert(mvotg->reset);
|
|
+}
|
|
+
|
|
+static void otg_clock_disable(struct mv_otg *mvotg)
|
|
+{
|
|
+ clk_disable(mvotg->clk);
|
|
+}
|
|
+
|
|
+static int mv_otg_enable_internal(struct mv_otg *mvotg)
|
|
+{
|
|
+ int retval = 0;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev,
|
|
+ "mv_otg_enable_internal: mvotg->active= %d \n", mvotg->active);
|
|
+ if (mvotg->active)
|
|
+ return 0;
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev,
|
|
+ "otg enabled, will enable clk, release rst\n");
|
|
+
|
|
+ otg_clock_enable(mvotg);
|
|
+ otg_reset_assert(mvotg);
|
|
+ otg_reset_deassert(mvotg);
|
|
+ retval = usb_phy_init(mvotg->outer_phy);
|
|
+ if (retval) {
|
|
+ dev_err(&mvotg->pdev->dev, "failed to initialize phy %d\n",
|
|
+ retval);
|
|
+ otg_clock_disable(mvotg);
|
|
+ return retval;
|
|
+ }
|
|
+
|
|
+ mvotg->active = 1;
|
|
+ otg_state = 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_enable(struct mv_otg *mvotg)
|
|
+{
|
|
+ if (mvotg->clock_gating)
|
|
+ return mv_otg_enable_internal(mvotg);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_otg_disable_internal(struct mv_otg *mvotg)
|
|
+{
|
|
+ dev_dbg(&mvotg->pdev->dev,
|
|
+ "mv_otg_disable_internal: mvotg->active= %d ... \n",
|
|
+ mvotg->active);
|
|
+ if (mvotg->active) {
|
|
+ dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
|
|
+ usb_phy_shutdown(mvotg->outer_phy);
|
|
+ otg_reset_assert(mvotg);
|
|
+ otg_clock_disable(mvotg);
|
|
+ mvotg->active = 0;
|
|
+ otg_state = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mv_otg_disable(struct mv_otg *mvotg)
|
|
+{
|
|
+ if (mvotg->clock_gating)
|
|
+ mv_otg_disable_internal(mvotg);
|
|
+}
|
|
+
|
|
+static void mv_otg_update_inputs(struct mv_otg *mvotg)
|
|
+{
|
|
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
|
|
+ u32 otgsc;
|
|
+
|
|
+ otgsc = readl(&mvotg->op_regs->otgsc);
|
|
+
|
|
+ if (mvotg->pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION) {
|
|
+ unsigned int vbus;
|
|
+ vbus = extcon_get_state(mvotg->extcon, EXTCON_USB);
|
|
+ dev_dbg(&mvotg->pdev->dev, "-->%s: vbus = %d\n", __func__,
|
|
+ vbus);
|
|
+
|
|
+ if (vbus == VBUS_HIGH)
|
|
+ otg_ctrl->b_sess_vld = 1;
|
|
+ else
|
|
+ otg_ctrl->b_sess_vld = 0;
|
|
+ } else {
|
|
+ dev_err(&mvotg->pdev->dev, "vbus detect was not supported ...");
|
|
+ }
|
|
+
|
|
+ if (mvotg->pdata->extern_attr & MV_USB_HAS_IDPIN_DETECTION) {
|
|
+ unsigned int id;
|
|
+ /* id = 0 means the otg cable is absent. */
|
|
+ id = extcon_get_state(mvotg->extcon, EXTCON_USB_HOST);
|
|
+ dev_dbg(&mvotg->pdev->dev, "-->%s: id = %d\n", __func__, id);
|
|
+
|
|
+ otg_ctrl->id = !id;
|
|
+ otg_ctrl->a_vbus_vld = !!id;
|
|
+ } else {
|
|
+ dev_err(&mvotg->pdev->dev,
|
|
+ "id pin detect was not supported ...");
|
|
+ }
|
|
+
|
|
+ if (otg_force_host_mode) {
|
|
+ otg_ctrl->id = 0;
|
|
+ otg_ctrl->a_vbus_vld = 1;
|
|
+ }
|
|
+
|
|
+ dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
|
|
+ dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
|
|
+ dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
|
|
+}
|
|
+
|
|
+static void mv_otg_update_state(struct mv_otg *mvotg)
|
|
+{
|
|
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
|
|
+ int old_state = mvotg->phy.otg->state;
|
|
+
|
|
+ switch (old_state) {
|
|
+ case OTG_STATE_UNDEFINED:
|
|
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
|
|
+ fallthrough;
|
|
+ case OTG_STATE_B_IDLE:
|
|
+ if (otg_ctrl->id == 0)
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_IDLE;
|
|
+ else if (otg_ctrl->b_sess_vld)
|
|
+ mvotg->phy.otg->state = OTG_STATE_B_PERIPHERAL;
|
|
+ break;
|
|
+ case OTG_STATE_B_PERIPHERAL:
|
|
+ if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
|
|
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
|
|
+ break;
|
|
+ case OTG_STATE_A_IDLE:
|
|
+ if (otg_ctrl->id)
|
|
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
|
|
+ else
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_VRISE:
|
|
+ if (otg_ctrl->a_vbus_vld)
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_BCON:
|
|
+ if (otg_ctrl->id || otg_ctrl->a_wait_bcon_timeout) {
|
|
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
|
|
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
|
|
+ } else if (otg_ctrl->b_conn) {
|
|
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
|
|
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_HOST;
|
|
+ }
|
|
+ break;
|
|
+ case OTG_STATE_A_HOST:
|
|
+ if (otg_ctrl->id || !otg_ctrl->b_conn)
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
|
|
+ else if (!otg_ctrl->a_vbus_vld)
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_VFALL:
|
|
+ if (otg_ctrl->id || (!otg_ctrl->b_conn))
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_IDLE;
|
|
+ break;
|
|
+ case OTG_STATE_A_VBUS_ERR:
|
|
+ if (otg_ctrl->id) {
|
|
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mv_otg_work(struct work_struct *work)
|
|
+{
|
|
+ struct mv_otg *mvotg;
|
|
+ struct usb_phy *phy;
|
|
+ struct usb_otg *otg;
|
|
+ int old_state;
|
|
+
|
|
+ mvotg = container_of(to_delayed_work(work), struct mv_otg, work);
|
|
+
|
|
+run:
|
|
+ /* work queue is single thread, or we need spin_lock to protect */
|
|
+ phy = &mvotg->phy;
|
|
+ otg = mvotg->phy.otg;
|
|
+ old_state = otg->state;
|
|
+
|
|
+ if (!mvotg->active)
|
|
+ return;
|
|
+
|
|
+ mv_otg_update_inputs(mvotg);
|
|
+ mv_otg_update_state(mvotg);
|
|
+ if (old_state != mvotg->phy.otg->state) {
|
|
+ dev_dbg(&mvotg->pdev->dev, "change from state %s to %s\n",
|
|
+ state_string[old_state],
|
|
+ state_string[mvotg->phy.otg->state]);
|
|
+
|
|
+ switch (mvotg->phy.otg->state) {
|
|
+ case OTG_STATE_B_IDLE:
|
|
+ otg->default_a = 0;
|
|
+ if (old_state == OTG_STATE_B_PERIPHERAL ||
|
|
+ old_state == OTG_STATE_UNDEFINED)
|
|
+ mv_otg_start_peripheral(mvotg, 0);
|
|
+ mv_otg_reset(mvotg);
|
|
+ mv_otg_disable(mvotg);
|
|
+ break;
|
|
+ case OTG_STATE_B_PERIPHERAL:
|
|
+ mv_otg_enable(mvotg);
|
|
+ mv_otg_start_peripheral(mvotg, 1);
|
|
+ break;
|
|
+ case OTG_STATE_A_IDLE:
|
|
+ otg->default_a = 1;
|
|
+ mv_otg_enable(mvotg);
|
|
+ if (old_state == OTG_STATE_A_WAIT_VFALL)
|
|
+ mv_otg_start_host(mvotg, 0);
|
|
+ mv_otg_reset(mvotg);
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_VRISE:
|
|
+ mv_otg_set_vbus(otg, 1);
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_BCON:
|
|
+ if (old_state != OTG_STATE_A_HOST)
|
|
+ mv_otg_start_host(mvotg, 1);
|
|
+ mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
|
|
+ T_A_WAIT_BCON);
|
|
+ /*
|
|
+ * Now, we directly enter A_HOST. So set b_conn = 1
|
|
+ * here. In fact, it need host driver to notify us.
|
|
+ */
|
|
+ mvotg->otg_ctrl.b_conn = 1;
|
|
+ break;
|
|
+ case OTG_STATE_A_HOST:
|
|
+ break;
|
|
+ case OTG_STATE_A_WAIT_VFALL:
|
|
+ /*
|
|
+ * Now, we has exited A_HOST. So set b_conn = 0
|
|
+ * here. In fact, it need host driver to notify us.
|
|
+ */
|
|
+ mvotg->otg_ctrl.b_conn = 0;
|
|
+ mv_otg_set_vbus(otg, 0);
|
|
+ break;
|
|
+ case OTG_STATE_A_VBUS_ERR:
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ goto run;
|
|
+ } else {
|
|
+ dev_dbg(&mvotg->pdev->dev,
|
|
+ "state no change: last_state: %s, current_state: %s\n",
|
|
+ state_string[old_state],
|
|
+ state_string[mvotg->phy.otg->state]);
|
|
+ }
|
|
+}
|
|
+
|
|
+static irqreturn_t mv_otg_irq(int irq, void *dev)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev;
|
|
+ u32 otgsc;
|
|
+
|
|
+ /* if otg clock is not enabled, otgsc read out will be 0 */
|
|
+ if (!mvotg->active)
|
|
+ mv_otg_enable(mvotg);
|
|
+
|
|
+ otgsc = readl(&mvotg->op_regs->otgsc);
|
|
+ writel(otgsc | mvotg->irq_en, &mvotg->op_regs->otgsc);
|
|
+
|
|
+ if (!(mvotg->pdata->extern_attr & MV_USB_HAS_IDPIN_DETECTION)) {
|
|
+ if (mvotg->otg_ctrl.id != (!!(otgsc & OTGSC_STS_USB_ID))) {
|
|
+ dev_dbg(dev, "mv_otg_irq : ID detect ... \n");
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * if we have vbus, then the vbus detection for B-device
|
|
+ * will be done by mv_otg_inputs_irq().
|
|
+ * currently mv_otg_inputs_irq is removed
|
|
+ */
|
|
+ if (mvotg->pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION)
|
|
+ if ((otgsc & OTGSC_STS_USB_ID) &&
|
|
+ !(otgsc & OTGSC_INTSTS_USB_ID))
|
|
+ return IRQ_NONE;
|
|
+
|
|
+ if ((otgsc & mvotg->irq_status) == 0)
|
|
+ return IRQ_NONE;
|
|
+
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int mv_otg_vbus_notifier_callback(struct notifier_block *nb,
|
|
+ unsigned long val, void *v)
|
|
+{
|
|
+ struct usb_phy *mvotg_phy = container_of(nb, struct usb_phy, vbus_nb);
|
|
+ struct mv_otg *mvotg = container_of(mvotg_phy, struct mv_otg, phy);
|
|
+
|
|
+ /* The clock may disabled at this time */
|
|
+ if (!mvotg->active) {
|
|
+ mv_otg_enable(mvotg);
|
|
+ mv_otg_init_irq(mvotg);
|
|
+ }
|
|
+
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_id_notifier_callback(struct notifier_block *nb,
|
|
+ unsigned long val, void *v)
|
|
+{
|
|
+ struct usb_phy *mvotg_phy = container_of(nb, struct usb_phy, id_nb);
|
|
+ struct mv_otg *mvotg = container_of(mvotg_phy, struct mv_otg, phy);
|
|
+
|
|
+ /* The clock may disabled at this time */
|
|
+ if (!mvotg->active) {
|
|
+ mv_otg_enable(mvotg);
|
|
+ mv_otg_init_irq(mvotg);
|
|
+ }
|
|
+
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t get_a_bus_req(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mvotg->otg_ctrl.a_bus_req);
|
|
+}
|
|
+
|
|
+static ssize_t set_a_bus_req(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+
|
|
+ if (count > 2)
|
|
+ return -1;
|
|
+
|
|
+ /* We will use this interface to change to A device */
|
|
+ if (mvotg->phy.otg->state != OTG_STATE_B_IDLE &&
|
|
+ mvotg->phy.otg->state != OTG_STATE_A_IDLE)
|
|
+ return -1;
|
|
+
|
|
+ /* The clock may disabled and we need to set irq for ID detected */
|
|
+ mv_otg_enable(mvotg);
|
|
+ mv_otg_init_irq(mvotg);
|
|
+
|
|
+ if (buf[0] == '1') {
|
|
+ mvotg->otg_ctrl.a_bus_req = 1;
|
|
+ mvotg->otg_ctrl.a_bus_drop = 0;
|
|
+ dev_dbg(&mvotg->pdev->dev, "User request: a_bus_req = 1\n");
|
|
+
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req);
|
|
+
|
|
+static ssize_t set_a_clr_err(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+ if (!mvotg->phy.otg->default_a)
|
|
+ return -1;
|
|
+
|
|
+ if (count > 2)
|
|
+ return -1;
|
|
+
|
|
+ if (buf[0] == '1') {
|
|
+ mvotg->otg_ctrl.a_clr_err = 1;
|
|
+ dev_dbg(&mvotg->pdev->dev, "User request: a_clr_err = 1\n");
|
|
+ }
|
|
+
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
|
|
+
|
|
+static ssize_t get_a_bus_drop(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mvotg->otg_ctrl.a_bus_drop);
|
|
+}
|
|
+
|
|
+static ssize_t set_a_bus_drop(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+ if (!mvotg->phy.otg->default_a)
|
|
+ return -1;
|
|
+
|
|
+ if (count > 2)
|
|
+ return -1;
|
|
+
|
|
+ if (buf[0] == '0') {
|
|
+ mvotg->otg_ctrl.a_bus_drop = 0;
|
|
+ dev_info(&mvotg->pdev->dev, "User request: a_bus_drop = 0\n");
|
|
+ } else if (buf[0] == '1') {
|
|
+ mvotg->otg_ctrl.a_bus_drop = 1;
|
|
+ mvotg->otg_ctrl.a_bus_req = 0;
|
|
+ dev_info(&mvotg->pdev->dev, "User request: a_bus_drop = 1\n");
|
|
+ dev_info(&mvotg->pdev->dev,
|
|
+ "User request: and a_bus_req = 0\n");
|
|
+ }
|
|
+
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop,
|
|
+ set_a_bus_drop);
|
|
+
|
|
+static ssize_t get_otg_mode(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ char *state = otg_force_host_mode ? "host" : "client";
|
|
+ return sprintf(buf, "OTG mode: %s\n", state);
|
|
+}
|
|
+
|
|
+static ssize_t set_otg_mode(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
|
|
+ char *usage = "Usage: $echo host/client to switch otg mode";
|
|
+ char buff[16], *b;
|
|
+
|
|
+ strncpy(buff, buf, sizeof(buff));
|
|
+ b = strim(buff);
|
|
+ dev_info(dev, "OTG state is %s\n", state_string[mvotg->phy.otg->state]);
|
|
+ if (!strcmp(b, "host")) {
|
|
+ if (mvotg->phy.otg->state == OTG_STATE_B_PERIPHERAL) {
|
|
+ pr_err("Failed to swich mode, pls don't connect to PC!\n");
|
|
+ return count;
|
|
+ }
|
|
+ otg_force_host_mode = 1;
|
|
+ } else if (!strcmp(b, "client")) {
|
|
+ otg_force_host_mode = 0;
|
|
+ } else {
|
|
+ pr_err("%s\n", usage);
|
|
+ return count;
|
|
+ }
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+static DEVICE_ATTR(otg_mode, S_IRUGO | S_IWUSR, get_otg_mode, set_otg_mode);
|
|
+
|
|
+static struct attribute *inputs_attrs[] = {
|
|
+ &dev_attr_a_bus_req.attr,
|
|
+ &dev_attr_a_clr_err.attr,
|
|
+ &dev_attr_a_bus_drop.attr,
|
|
+ &dev_attr_otg_mode.attr,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static struct attribute_group inputs_attr_group = {
|
|
+ .name = "inputs",
|
|
+ .attrs = inputs_attrs,
|
|
+};
|
|
+
|
|
+static int mv_otg_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
|
|
+
|
|
+ device_init_wakeup(&pdev->dev, 0);
|
|
+ sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
|
|
+
|
|
+ if (mvotg->qwork) {
|
|
+ flush_workqueue(mvotg->qwork);
|
|
+ destroy_workqueue(mvotg->qwork);
|
|
+ }
|
|
+
|
|
+ mv_otg_disable(mvotg);
|
|
+
|
|
+ clk_unprepare(mvotg->clk);
|
|
+
|
|
+ usb_remove_phy(&mvotg->phy);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_dt_parse(struct platform_device *pdev,
|
|
+ struct mv_usb_platform_data *pdata)
|
|
+{
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+
|
|
+ if (of_property_read_string(np, "spacemit,otg-name",
|
|
+ &((pdev->dev).init_name)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,udc-mode", &(pdata->mode)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (of_property_read_u32(np, "spacemit,dev-id", &(pdata->id)))
|
|
+ pdata->id = PXA_USB_DEV_OTG;
|
|
+
|
|
+ of_property_read_u32(np, "spacemit,extern-attr", &(pdata->extern_attr));
|
|
+ pdata->otg_force_a_bus_req =
|
|
+ of_property_read_bool(np, "spacemit,otg-force-a-bus-req");
|
|
+ pdata->disable_otg_clock_gating =
|
|
+ of_property_read_bool(np, "spacemit,disable-otg-clock-gating");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+ struct mv_otg *mvotg;
|
|
+ struct usb_otg *otg;
|
|
+ struct resource *r;
|
|
+ int retval = 0, i;
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+
|
|
+ dev_info(&pdev->dev, "k1x otg probe enter ...\n");
|
|
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
+ if (pdata == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to allocate platform data\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ mv_otg_dt_parse(pdev, pdata);
|
|
+ mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL);
|
|
+ if (!mvotg) {
|
|
+ dev_err(&pdev->dev, "failed to allocate memory!\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
|
|
+ if (!otg)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ platform_set_drvdata(pdev, mvotg);
|
|
+
|
|
+ mvotg->pdev = pdev;
|
|
+ mvotg->pdata = pdata;
|
|
+
|
|
+ mvotg->clk = devm_clk_get(&pdev->dev, NULL);
|
|
+ if (IS_ERR(mvotg->clk))
|
|
+ return PTR_ERR(mvotg->clk);
|
|
+ clk_prepare(mvotg->clk);
|
|
+
|
|
+ mvotg->reset = devm_reset_control_get_shared(&pdev->dev, NULL);
|
|
+ if (IS_ERR(mvotg->reset))
|
|
+ return PTR_ERR(mvotg->reset);
|
|
+
|
|
+ mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
|
|
+ if (!mvotg->qwork) {
|
|
+ dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
|
|
+
|
|
+ /* OTG common part */
|
|
+ mvotg->pdev = pdev;
|
|
+ mvotg->phy.dev = &pdev->dev;
|
|
+ mvotg->phy.type = USB_PHY_TYPE_USB2;
|
|
+ mvotg->phy.otg = otg;
|
|
+ mvotg->phy.label = driver_name;
|
|
+
|
|
+ otg->usb_phy = &mvotg->phy;
|
|
+ otg->state = OTG_STATE_UNDEFINED;
|
|
+ otg->set_host = mv_otg_set_host;
|
|
+ otg->set_peripheral = mv_otg_set_peripheral;
|
|
+ otg->set_vbus = mv_otg_set_vbus;
|
|
+
|
|
+ for (i = 0; i < OTG_TIMER_NUM; i++)
|
|
+ timer_setup(&mvotg->otg_ctrl.timer[i], mv_otg_timer_await_bcon,
|
|
+ 0);
|
|
+
|
|
+ r = platform_get_resource(mvotg->pdev, IORESOURCE_MEM, 0);
|
|
+ if (r == NULL) {
|
|
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
|
|
+ retval = -ENODEV;
|
|
+ goto err_destroy_workqueue;
|
|
+ }
|
|
+
|
|
+ mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
|
|
+ if (mvotg->cap_regs == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
|
|
+ retval = -EFAULT;
|
|
+ goto err_destroy_workqueue;
|
|
+ }
|
|
+
|
|
+ mvotg->outer_phy =
|
|
+ devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
|
|
+ if (IS_ERR_OR_NULL(mvotg->outer_phy)) {
|
|
+ retval = PTR_ERR(mvotg->outer_phy);
|
|
+ if (retval != -EPROBE_DEFER)
|
|
+ dev_err(&pdev->dev, "can not find outer phy\n");
|
|
+ goto err_destroy_workqueue;
|
|
+ }
|
|
+
|
|
+ mvotg->vbus_gpio = devm_gpiod_get(&pdev->dev, "vbus", GPIOD_OUT_LOW);
|
|
+ if (IS_ERR_OR_NULL(mvotg->vbus_gpio)) {
|
|
+ dev_err(&pdev->dev, "can not find vbus gpio default state\n");
|
|
+ goto err_destroy_workqueue;
|
|
+ }else{
|
|
+ dev_dbg(&pdev->dev, "Use GPIO to control vbus\n");
|
|
+ }
|
|
+
|
|
+ /* we will acces controller register, so enable the udc controller */
|
|
+ retval = mv_otg_enable_internal(mvotg);
|
|
+ if (retval) {
|
|
+ dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
|
|
+ goto err_destroy_workqueue;
|
|
+ }
|
|
+
|
|
+ mvotg->op_regs =
|
|
+ (struct mv_otg_regs __iomem *)((unsigned long)mvotg->cap_regs +
|
|
+ (readl(mvotg->cap_regs) &
|
|
+ CAPLENGTH_MASK));
|
|
+
|
|
+ if (pdata->extern_attr &
|
|
+ (MV_USB_HAS_VBUS_DETECTION | MV_USB_HAS_IDPIN_DETECTION)) {
|
|
+ dev_info(&mvotg->pdev->dev, "%s : support VBUS/ID detect ...\n",
|
|
+ __func__);
|
|
+ /* TODO: use device tree to parse extcon device name */
|
|
+ if (of_property_read_bool(np, "extcon")) {
|
|
+ mvotg->extcon =
|
|
+ extcon_get_edev_by_phandle(&pdev->dev, 0);
|
|
+ if (IS_ERR(mvotg->extcon)) {
|
|
+ dev_err(&pdev->dev,
|
|
+ "couldn't get extcon device\n");
|
|
+ mv_otg_disable_internal(mvotg);
|
|
+ retval = -EPROBE_DEFER;
|
|
+ goto err_destroy_workqueue;
|
|
+ }
|
|
+ dev_info(&pdev->dev, "extcon_dev name: %s \n",
|
|
+ extcon_get_edev_name(mvotg->extcon));
|
|
+ } else {
|
|
+ dev_err(&pdev->dev, "usb extcon cable is not exist\n");
|
|
+ }
|
|
+
|
|
+ if (pdata->extern_attr & MV_USB_HAS_VBUS_DETECTION)
|
|
+ mvotg->clock_gating = 1;
|
|
+
|
|
+ /*extcon notifier register will be completed in usb_add_phy_dev*/
|
|
+ mvotg->phy.vbus_nb.notifier_call =
|
|
+ mv_otg_vbus_notifier_callback;
|
|
+ mvotg->phy.id_nb.notifier_call = mv_otg_id_notifier_callback;
|
|
+ }
|
|
+
|
|
+ if (pdata->disable_otg_clock_gating)
|
|
+ mvotg->clock_gating = 0;
|
|
+
|
|
+ mv_otg_reset(mvotg);
|
|
+ mv_otg_init_irq(mvotg);
|
|
+
|
|
+ // r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
|
|
+ mvotg->irq = platform_get_irq(pdev, 0);
|
|
+ if (!mvotg->irq) {
|
|
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
|
|
+ retval = -ENODEV;
|
|
+ goto err_disable_clk;
|
|
+ }
|
|
+
|
|
+ // mvotg->irq = r->start;
|
|
+ if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED,
|
|
+ driver_name, mvotg)) {
|
|
+ dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
|
|
+ mvotg->irq);
|
|
+ mvotg->irq = 0;
|
|
+ retval = -ENODEV;
|
|
+ goto err_disable_clk;
|
|
+ }
|
|
+
|
|
+ retval = usb_add_phy_dev(&mvotg->phy);
|
|
+ if (retval < 0) {
|
|
+ dev_err(&pdev->dev, "can't register transceiver, %d\n", retval);
|
|
+ goto err_disable_clk;
|
|
+ }
|
|
+
|
|
+ np = of_find_compatible_node(NULL, NULL, "spacemit,spacemit-apmu");
|
|
+ BUG_ON(!np);
|
|
+ mvotg->apmu_base = of_iomap(np, 0);
|
|
+ if (mvotg->apmu_base == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to map apmu base memory\n");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
|
|
+ if (retval < 0) {
|
|
+ dev_dbg(&pdev->dev, "Can't register sysfs attr group: %d\n",
|
|
+ retval);
|
|
+ goto err_remove_otg_phy;
|
|
+ }
|
|
+
|
|
+ spin_lock_init(&mvotg->wq_lock);
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 2 * HZ);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+
|
|
+ dev_info(&pdev->dev, "successful probe OTG device %s clock gating.\n",
|
|
+ mvotg->clock_gating ? "with" : "without");
|
|
+
|
|
+ device_init_wakeup(&pdev->dev, 1);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_remove_otg_phy:
|
|
+ usb_remove_phy(&mvotg->phy);
|
|
+err_disable_clk:
|
|
+ mv_otg_disable_internal(mvotg);
|
|
+err_destroy_workqueue:
|
|
+ flush_workqueue(mvotg->qwork);
|
|
+ destroy_workqueue(mvotg->qwork);
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
|
|
+{
|
|
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
|
|
+
|
|
+ if (!mvotg->clock_gating)
|
|
+ mv_otg_disable_internal(mvotg);
|
|
+
|
|
+ mvotg->phy.otg->state = OTG_STATE_UNDEFINED;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_otg_resume(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
|
|
+ u32 otgsc;
|
|
+
|
|
+ mv_otg_enable_internal(mvotg);
|
|
+
|
|
+ otgsc = readl(&mvotg->op_regs->otgsc);
|
|
+ otgsc |= mvotg->irq_en;
|
|
+ writel(otgsc, &mvotg->op_regs->otgsc);
|
|
+
|
|
+ if (spin_trylock(&mvotg->wq_lock)) {
|
|
+ mv_otg_run_state_machine(mvotg, 0);
|
|
+ spin_unlock(&mvotg->wq_lock);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct of_device_id mv_otg_dt_match[] = {
|
|
+ { .compatible = "spacemit,mv-otg" },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, mv_udc_dt_match);
|
|
+
|
|
+static struct platform_driver mv_otg_driver = {
|
|
+ .probe = mv_otg_probe,
|
|
+ .remove = mv_otg_remove,
|
|
+ .driver = {
|
|
+ .of_match_table = of_match_ptr(mv_otg_dt_match),
|
|
+ .name = driver_name,
|
|
+ },
|
|
+#ifdef CONFIG_PM
|
|
+ .suspend = mv_otg_suspend,
|
|
+ .resume = mv_otg_resume,
|
|
+#endif
|
|
+};
|
|
+module_platform_driver(mv_otg_driver);
|
|
+
|
|
+MODULE_DESCRIPTION("Spacemit K1-x OTG driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/usb/phy/phy-k1x-ci-otg.h b/drivers/usb/phy/phy-k1x-ci-otg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/phy/phy-k1x-ci-otg.h
|
|
@@ -0,0 +1,181 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * OTG support for Spacemit k1x SoCs
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+
|
|
+#ifndef __MV_USB_OTG_CONTROLLER__
|
|
+#define __MV_USB_OTG_CONTROLLER__
|
|
+
|
|
+#include <linux/gpio.h>
|
|
+#include <linux/reset.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/extcon.h>
|
|
+
|
|
+/* Command Register Bit Masks */
|
|
+#define USBCMD_RUN_STOP (0x00000001)
|
|
+#define USBCMD_CTRL_RESET (0x00000002)
|
|
+
|
|
+/* otgsc Register Bit Masks */
|
|
+#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
|
|
+#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
|
|
+#define OTGSC_CTRL_OTG_TERM 0x00000008
|
|
+#define OTGSC_CTRL_DATA_PULSING 0x00000010
|
|
+#define OTGSC_STS_USB_ID 0x00000100
|
|
+#define OTGSC_STS_A_VBUS_VALID 0x00000200
|
|
+#define OTGSC_STS_A_SESSION_VALID 0x00000400
|
|
+#define OTGSC_STS_B_SESSION_VALID 0x00000800
|
|
+#define OTGSC_STS_B_SESSION_END 0x00001000
|
|
+#define OTGSC_STS_1MS_TOGGLE 0x00002000
|
|
+#define OTGSC_STS_DATA_PULSING 0x00004000
|
|
+#define OTGSC_INTSTS_USB_ID 0x00010000
|
|
+#define PORTSCX_PORT_PHCD 0x00800000
|
|
+#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
|
|
+#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
|
|
+#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
|
|
+#define OTGSC_INTSTS_B_SESSION_END 0x00100000
|
|
+#define OTGSC_INTSTS_1MS 0x00200000
|
|
+#define OTGSC_INTSTS_DATA_PULSING 0x00400000
|
|
+#define OTGSC_INTR_USB_ID 0x01000000
|
|
+#define OTGSC_INTR_A_VBUS_VALID 0x02000000
|
|
+#define OTGSC_INTR_A_SESSION_VALID 0x04000000
|
|
+#define OTGSC_INTR_B_SESSION_VALID 0x08000000
|
|
+#define OTGSC_INTR_B_SESSION_END 0x10000000
|
|
+#define OTGSC_INTR_1MS_TIMER 0x20000000
|
|
+#define OTGSC_INTR_DATA_PULSING 0x40000000
|
|
+
|
|
+#define CAPLENGTH_MASK (0xff)
|
|
+
|
|
+/* Timer's interval, unit 10ms */
|
|
+#define T_A_WAIT_VRISE 100
|
|
+#define T_A_WAIT_BCON 2000
|
|
+#define T_A_AIDL_BDIS 100
|
|
+#define T_A_BIDL_ADIS 20
|
|
+#define T_B_ASE0_BRST 400
|
|
+#define T_B_SE0_SRP 300
|
|
+#define T_B_SRP_FAIL 2000
|
|
+#define T_B_DATA_PLS 10
|
|
+#define T_B_SRP_INIT 100
|
|
+#define T_A_SRP_RSPNS 10
|
|
+#define T_A_DRV_RSM 5
|
|
+
|
|
+enum otg_function {
|
|
+ OTG_B_DEVICE = 0,
|
|
+ OTG_A_DEVICE
|
|
+};
|
|
+
|
|
+enum mv_otg_timer {
|
|
+ A_WAIT_BCON_TIMER = 0,
|
|
+ OTG_TIMER_NUM
|
|
+};
|
|
+
|
|
+/* PXA OTG state machine */
|
|
+struct mv_otg_ctrl {
|
|
+ /* internal variables */
|
|
+ u8 a_set_b_hnp_en; /* A-Device set b_hnp_en */
|
|
+ u8 b_srp_done;
|
|
+ u8 b_hnp_en;
|
|
+
|
|
+ /* OTG inputs */
|
|
+ u8 a_bus_drop;
|
|
+ u8 a_bus_req;
|
|
+ u8 a_clr_err;
|
|
+ u8 a_bus_resume;
|
|
+ u8 a_bus_suspend;
|
|
+ u8 a_conn;
|
|
+ u8 a_sess_vld;
|
|
+ u8 a_srp_det;
|
|
+ u8 a_vbus_vld;
|
|
+ u8 b_bus_req; /* B-Device Require Bus */
|
|
+ u8 b_bus_resume;
|
|
+ u8 b_bus_suspend;
|
|
+ u8 b_conn;
|
|
+ u8 b_se0_srp;
|
|
+ u8 b_sess_end;
|
|
+ u8 b_sess_vld;
|
|
+ u8 id;
|
|
+ u8 a_suspend_req;
|
|
+
|
|
+ /*Timer event */
|
|
+ u8 a_aidl_bdis_timeout;
|
|
+ u8 b_ase0_brst_timeout;
|
|
+ u8 a_bidl_adis_timeout;
|
|
+ u8 a_wait_bcon_timeout;
|
|
+
|
|
+ struct timer_list timer[OTG_TIMER_NUM];
|
|
+};
|
|
+
|
|
+#define VUSBHS_MAX_PORTS 8
|
|
+
|
|
+struct mv_otg_regs {
|
|
+ u32 usbcmd; /* Command register */
|
|
+ u32 usbsts; /* Status register */
|
|
+ u32 usbintr; /* Interrupt enable */
|
|
+ u32 frindex; /* Frame index */
|
|
+ u32 reserved1[1];
|
|
+ u32 deviceaddr; /* Device Address */
|
|
+ u32 eplistaddr; /* Endpoint List Address */
|
|
+ u32 ttctrl; /* HOST TT status and control */
|
|
+ u32 burstsize; /* Programmable Burst Size */
|
|
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
|
|
+ u32 reserved[4];
|
|
+ u32 epnak; /* Endpoint NAK */
|
|
+ u32 epnaken; /* Endpoint NAK Enable */
|
|
+ u32 configflag; /* Configured Flag register */
|
|
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
|
|
+ u32 otgsc;
|
|
+ u32 usbmode; /* USB Host/Device mode */
|
|
+ u32 epsetupstat; /* Endpoint Setup Status */
|
|
+ u32 epprime; /* Endpoint Initialize */
|
|
+ u32 epflush; /* Endpoint De-initialize */
|
|
+ u32 epstatus; /* Endpoint Status */
|
|
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
|
|
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
|
|
+};
|
|
+
|
|
+struct mv_otg {
|
|
+ struct usb_phy phy;
|
|
+ struct usb_phy *outer_phy;
|
|
+ struct mv_otg_ctrl otg_ctrl;
|
|
+
|
|
+ /* base address */
|
|
+ void __iomem *cap_regs;
|
|
+ void __iomem *apmu_base;
|
|
+ struct mv_otg_regs __iomem *op_regs;
|
|
+
|
|
+ struct platform_device *pdev;
|
|
+ int irq;
|
|
+ u32 irq_status;
|
|
+ u32 irq_en;
|
|
+
|
|
+ struct delayed_work work;
|
|
+ struct workqueue_struct *qwork;
|
|
+
|
|
+ spinlock_t wq_lock;
|
|
+
|
|
+ struct mv_usb_platform_data *pdata;
|
|
+ struct notifier_block notifier;
|
|
+ struct notifier_block notifier_charger;
|
|
+
|
|
+ struct pm_qos_request qos_idle;
|
|
+ s32 lpm_qos;
|
|
+
|
|
+ unsigned int active;
|
|
+ unsigned int clock_gating;
|
|
+ struct clk *clk;
|
|
+ struct reset_control *reset;
|
|
+ struct gpio_desc *vbus_gpio;
|
|
+ unsigned int charger_type;
|
|
+
|
|
+
|
|
+ /* for vbus detection */
|
|
+ struct extcon_specific_cable_nb vbus_dev;
|
|
+ /* for id detection */
|
|
+ struct extcon_specific_cable_nb id_dev;
|
|
+ struct extcon_dev *extcon;
|
|
+
|
|
+ struct regulator *vbus_otg;
|
|
+};
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/usb/phy/phy-k1x-ci-usb2.c b/drivers/usb/phy/phy-k1x-ci-usb2.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/phy/phy-k1x-ci-usb2.c
|
|
@@ -0,0 +1,167 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+/*
|
|
+ * UDC Phy support for Spacemit k1x SoCs
|
|
+ *
|
|
+ * Copyright (c) 2023 Spacemit Inc.
|
|
+ */
|
|
+
|
|
+#include <linux/resource.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/export.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/platform_data/k1x_ci_usb.h>
|
|
+#include <linux/of_address.h>
|
|
+#include "phy-k1x-ci-usb2.h"
|
|
+
|
|
+static int mv_usb2_phy_init(struct usb_phy *phy)
|
|
+{
|
|
+ struct mv_usb2_phy *mv_phy = container_of(phy, struct mv_usb2_phy, phy);
|
|
+ void __iomem *base = mv_phy->base;
|
|
+ uint32_t loops, temp;
|
|
+
|
|
+ clk_enable(mv_phy->clk);
|
|
+
|
|
+ // make sure the usb controller is not under reset process before any configuration
|
|
+ udelay(150);
|
|
+
|
|
+ loops = USB2D_CTRL_RESET_TIME_MS * 1000;
|
|
+
|
|
+ //wait for usb2 phy PLL ready
|
|
+ do {
|
|
+ temp = readl(base + USB2_PHY_REG01);
|
|
+ if (temp & USB2_PHY_REG01_PLL_IS_READY)
|
|
+ break;
|
|
+ udelay(50);
|
|
+ } while(--loops);
|
|
+
|
|
+ if (loops == 0) {
|
|
+ pr_err("Wait PHY_REG01[PLLREADY] timeout\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+ //release usb2 phy internal reset and enable clock gating
|
|
+ writel(0x60ef, base + USB2_PHY_REG01);
|
|
+ writel(0x1c, base + USB2_PHY_REG0D);
|
|
+
|
|
+ temp = readl(base + USB2_ANALOG_REG14_13);
|
|
+ temp &= ~(USB2_ANALOG_HSDAC_ISEL_MASK);
|
|
+ temp |= USB2_ANALOG_HSDAC_ISEL_15_INC | USB2_ANALOG_HSDAC_IREG_EN;
|
|
+ writel(temp, base + USB2_ANALOG_REG14_13);
|
|
+
|
|
+ /* auto clear host disc*/
|
|
+ temp = readl(base + USB2_PHY_REG04);
|
|
+ temp |= USB2_PHY_REG04_AUTO_CLEAR_DIS;
|
|
+ writel(temp, base + USB2_PHY_REG04);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mv_usb2_phy_shutdown(struct usb_phy *phy)
|
|
+{
|
|
+ struct mv_usb2_phy *mv_phy = container_of(phy, struct mv_usb2_phy, phy);
|
|
+
|
|
+ clk_disable(mv_phy->clk);
|
|
+}
|
|
+
|
|
+static int mv_usb2_phy_connect_change(struct usb_phy *phy,
|
|
+ enum usb_device_speed speed)
|
|
+{
|
|
+ struct mv_usb2_phy *mv_phy = container_of(phy, struct mv_usb2_phy, phy);
|
|
+ uint32_t reg;
|
|
+ if (!mv_phy->handle_connect_change)
|
|
+ return 0;
|
|
+ reg = readl(mv_phy->base + USB2_PHY_REG40);
|
|
+ reg |= USB2_PHY_REG40_CLR_DISC;
|
|
+ writel(reg, mv_phy->base + USB2_PHY_REG40);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_usb2_phy_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_usb2_phy *mv_phy;
|
|
+ struct resource *r;
|
|
+
|
|
+ dev_dbg(&pdev->dev, "k1x-ci-usb-phy-probe: Enter...\n");
|
|
+ mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL);
|
|
+ if (mv_phy == NULL) {
|
|
+ dev_err(&pdev->dev, "failed to allocate memory\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ mv_phy->pdev = pdev;
|
|
+
|
|
+ mv_phy->clk = devm_clk_get(&pdev->dev, NULL);
|
|
+ if (IS_ERR(mv_phy->clk)) {
|
|
+ dev_err(&pdev->dev, "failed to get clock.\n");
|
|
+ return PTR_ERR(mv_phy->clk);
|
|
+ }
|
|
+ clk_prepare(mv_phy->clk);
|
|
+
|
|
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (r == NULL) {
|
|
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ mv_phy->base = devm_ioremap_resource(&pdev->dev, r);
|
|
+ if (mv_phy->base == NULL) {
|
|
+ dev_err(&pdev->dev, "error map register base\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ mv_phy->handle_connect_change = device_property_read_bool(&pdev->dev,
|
|
+ "spacemit,handle_connect_change");
|
|
+
|
|
+ mv_phy->phy.dev = &pdev->dev;
|
|
+ mv_phy->phy.label = "mv-usb2";
|
|
+ mv_phy->phy.type = USB_PHY_TYPE_USB2;
|
|
+ mv_phy->phy.init = mv_usb2_phy_init;
|
|
+ mv_phy->phy.shutdown = mv_usb2_phy_shutdown;
|
|
+ mv_phy->phy.notify_disconnect = mv_usb2_phy_connect_change;
|
|
+ mv_phy->phy.notify_connect = mv_usb2_phy_connect_change;
|
|
+
|
|
+ usb_add_phy_dev(&mv_phy->phy);
|
|
+
|
|
+ platform_set_drvdata(pdev, mv_phy);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mv_usb2_phy_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mv_usb2_phy *mv_phy = platform_get_drvdata(pdev);
|
|
+
|
|
+ usb_remove_phy(&mv_phy->phy);
|
|
+
|
|
+ clk_unprepare(mv_phy->clk);
|
|
+
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id mv_usbphy_dt_match[] = {
|
|
+ { .compatible = "spacemit,usb2-phy",},
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, mv_usbphy_dt_match);
|
|
+
|
|
+static struct platform_driver mv_usb2_phy_driver = {
|
|
+ .probe = mv_usb2_phy_probe,
|
|
+ .remove = mv_usb2_phy_remove,
|
|
+ .driver = {
|
|
+ .name = "mv-usb2-phy",
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = of_match_ptr(mv_usbphy_dt_match),
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(mv_usb2_phy_driver);
|
|
+MODULE_DESCRIPTION("Spacemit USB2 phy driver");
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/usb/phy/phy-k1x-ci-usb2.h b/drivers/usb/phy/phy-k1x-ci-usb2.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/usb/phy/phy-k1x-ci-usb2.h
|
|
@@ -0,0 +1,43 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-or-later
|
|
+
|
|
+#ifndef __MV_USB2_H
|
|
+#define __MV_USB2_H
|
|
+#include <linux/usb/phy.h>
|
|
+
|
|
+/* phy regs */
|
|
+#define USB2_PHY_REG01 0x4
|
|
+#define USB2_PHY_REG01_PLL_IS_READY (0x1 << 0)
|
|
+#define USB2_PHY_REG04 0x10
|
|
+#define USB2_PHY_REG04_EN_HSTSOF (0x1 << 0)
|
|
+#define USB2_PHY_REG04_AUTO_CLEAR_DIS (0x1 << 2)
|
|
+#define USB2_PHY_REG08 0x20
|
|
+#define USB2_PHY_REG08_DISCON_DET (0x1 << 9)
|
|
+#define USB2_PHY_REG0D 0x34
|
|
+#define USB2_PHY_REG40 0x40
|
|
+#define USB2_PHY_REG40_CLR_DISC (0x1 << 0)
|
|
+#define USB2_PHY_REG26 0x98
|
|
+#define USB2_PHY_REG22 0x88
|
|
+#define USB2_CFG_FORCE_CDRCLK (0x1 << 6)
|
|
+#define USB2_PHY_REG06 0x18
|
|
+#define USB2_CFG_HS_SRC_SEL (0x1 << 0)
|
|
+
|
|
+#define USB2_ANALOG_REG14_13 0xa4
|
|
+#define USB2_ANALOG_HSDAC_IREG_EN (0x1 << 4)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_MASK (0xf)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_11_INC (0xb)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_25_INC (0xf)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_15_INC (0xc)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_17_INC (0xd)
|
|
+#define USB2_ANALOG_HSDAC_ISEL_22_INC (0xe)
|
|
+
|
|
+#define USB2D_CTRL_RESET_TIME_MS 50
|
|
+
|
|
+struct mv_usb2_phy {
|
|
+ struct usb_phy phy;
|
|
+ struct platform_device *pdev;
|
|
+ void __iomem *base;
|
|
+ struct clk *clk;
|
|
+ bool handle_connect_change;
|
|
+};
|
|
+
|
|
+#endif
|
|
--
|
|
Armbian
|
|
|