Merge branch '2024-03-04-assorted-TI-K3-updates' into next

- Merge assorted TI K3 platform / SoC updates
This commit is contained in:
Tom Rini 2024-03-04 12:07:21 -05:00
commit 4ce29a9a4a
16 changed files with 419 additions and 340 deletions

View File

@ -116,13 +116,6 @@ config K3_EARLY_CONS_IDX
Use this option to set the index of the serial device to be used Use this option to set the index of the serial device to be used
for the early console during SPL execution. for the early console during SPL execution.
config SYS_K3_SPL_ATF
bool "Start Cortex-A from SPL"
depends on CPU_V7R
help
Enabling this will try to start Cortex-A (typically with ATF)
after SPL from R5.
config K3_ATF_LOAD_ADDR config K3_ATF_LOAD_ADDR
hex "Load address of ATF image" hex "Load address of ATF image"
default 0x70000000 default 0x70000000

View File

@ -3,9 +3,8 @@
# Copyright (C) 2017-2018 Texas Instruments Incorporated - https://www.ti.com/ # Copyright (C) 2017-2018 Texas Instruments Incorporated - https://www.ti.com/
# Lokesh Vutla <lokeshvutla@ti.com> # Lokesh Vutla <lokeshvutla@ti.com>
obj-$(CONFIG_ARM64) += arm64/
obj-$(CONFIG_CPU_V7R) += r5/ obj-$(CONFIG_CPU_V7R) += r5/
obj-$(CONFIG_ARM64) += arm64-mmu.o
obj-$(CONFIG_ARM64) += cache.o
obj-$(CONFIG_OF_LIBFDT) += common_fdt.o obj-$(CONFIG_OF_LIBFDT) += common_fdt.o
ifeq ($(CONFIG_OF_LIBFDT)$(CONFIG_OF_SYSTEM_SETUP),yy) ifeq ($(CONFIG_OF_LIBFDT)$(CONFIG_OF_SYSTEM_SETUP),yy)
obj-$(CONFIG_SOC_K3_AM654) += am654_fdt.o obj-$(CONFIG_SOC_K3_AM654) += am654_fdt.o

View File

@ -38,11 +38,48 @@ static void fdt_fixup_pru_node_am625(void *blob, int has_pru)
fdt_del_node_path(blob, "/bus@f0000/pruss@30040000"); fdt_del_node_path(blob, "/bus@f0000/pruss@30040000");
} }
static int fdt_fixup_trips_node(void *blob, int zoneoffset, int maxc)
{
int node, trip;
node = fdt_subnode_offset(blob, zoneoffset, "trips");
if (node < 0)
return -1;
fdt_for_each_subnode(trip, blob, node) {
const char *type = fdt_getprop(blob, trip, "type", NULL);
if (!type || (strncmp(type, "critical", 8) != 0))
continue;
if (fdt_setprop_u32(blob, trip, "temperature", 1000 * maxc) < 0)
return -1;
}
return 0;
}
static void fdt_fixup_thermal_zone_nodes_am625(void *blob, int maxc)
{
int node, zone;
node = fdt_path_offset(blob, "/thermal-zones");
if (node < 0)
return;
fdt_for_each_subnode(zone, blob, node) {
if (fdt_fixup_trips_node(blob, zone, maxc) < 0)
printf("Failed to set temperature in %s critical trips\n",
fdt_get_name(blob, zone, NULL));
}
}
int ft_system_setup(void *blob, struct bd_info *bd) int ft_system_setup(void *blob, struct bd_info *bd)
{ {
fdt_fixup_cores_nodes_am625(blob, k3_get_core_nr()); fdt_fixup_cores_nodes_am625(blob, k3_get_core_nr());
fdt_fixup_gpu_nodes_am625(blob, k3_has_gpu()); fdt_fixup_gpu_nodes_am625(blob, k3_has_gpu());
fdt_fixup_pru_node_am625(blob, k3_has_pru()); fdt_fixup_pru_node_am625(blob, k3_has_pru());
fdt_fixup_thermal_zone_nodes_am625(blob, k3_get_max_temp());
return 0; return 0;
} }

View File

@ -142,6 +142,9 @@ void board_init_f(ulong dummy)
panic("ROM has not loaded TIFS firmware\n"); panic("ROM has not loaded TIFS firmware\n");
k3_sysfw_loader(true, NULL, NULL); k3_sysfw_loader(true, NULL, NULL);
/* Disable ROM configured firewalls right after loading sysfw */
remove_fwl_configs(cbass_main_fwls, ARRAY_SIZE(cbass_main_fwls));
#endif #endif
#if defined(CONFIG_CPU_V7R) #if defined(CONFIG_CPU_V7R)
@ -170,9 +173,6 @@ void board_init_f(ulong dummy)
/* Output System Firmware version info */ /* Output System Firmware version info */
k3_sysfw_print_ver(); k3_sysfw_print_ver();
/* Disable ROM configured firewalls right after loading sysfw */
remove_fwl_configs(cbass_main_fwls, ARRAY_SIZE(cbass_main_fwls));
#if defined(CONFIG_K3_AM62A_DDRSS) #if defined(CONFIG_K3_AM62A_DDRSS)
ret = uclass_get_device(UCLASS_RAM, 0, &dev); ret = uclass_get_device(UCLASS_RAM, 0, &dev);
if (ret) if (ret)

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
#
# Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
obj-y += arm64-mmu.o
obj-y += cache.o

View File

@ -28,27 +28,6 @@
#include <elf.h> #include <elf.h>
#include <soc.h> #include <soc.h>
#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
enum {
IMAGE_ID_ATF,
IMAGE_ID_OPTEE,
IMAGE_ID_SPL,
IMAGE_ID_DM_FW,
IMAGE_AMT,
};
#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
static const char *image_os_match[IMAGE_AMT] = {
"arm-trusted-firmware",
"tee",
"U-Boot",
"DM",
};
#endif
static struct image_info fit_image_info[IMAGE_AMT];
#endif
struct ti_sci_handle *get_ti_sci_handle(void) struct ti_sci_handle *get_ti_sci_handle(void)
{ {
struct udevice *dev; struct udevice *dev;
@ -128,234 +107,13 @@ int early_console_init(void)
} }
#endif #endif
#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF) #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) && !IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
void init_env(void)
{
#ifdef CONFIG_SPL_ENV_SUPPORT
char *part;
env_init();
env_relocate();
switch (spl_boot_device()) {
case BOOT_DEVICE_MMC2:
part = env_get("bootpart");
env_set("storage_interface", "mmc");
env_set("fw_dev_part", part);
break;
case BOOT_DEVICE_SPI:
env_set("storage_interface", "ubi");
env_set("fw_ubi_mtdpart", "UBI");
env_set("fw_ubi_volume", "UBI0");
break;
default:
printf("%s from device %u not supported!\n",
__func__, spl_boot_device());
return;
}
#endif
}
int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
{
struct udevice *fsdev;
char *name = NULL;
int size = 0;
if (!IS_ENABLED(CONFIG_FS_LOADER))
return 0;
*loadaddr = 0;
#ifdef CONFIG_SPL_ENV_SUPPORT
switch (spl_boot_device()) {
case BOOT_DEVICE_MMC2:
name = env_get(name_fw);
*loadaddr = env_get_hex(name_loadaddr, *loadaddr);
break;
default:
printf("Loading rproc fw image from device %u not supported!\n",
spl_boot_device());
return 0;
}
#endif
if (!*loadaddr)
return 0;
if (!get_fs_loader(&fsdev)) {
size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
0, 0);
}
return size;
}
void release_resources_for_core_shutdown(void)
{
struct ti_sci_handle *ti_sci = get_ti_sci_handle();
struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
int ret;
u32 i;
/* Iterate through list of devices to put (shutdown) */
for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
u32 id = put_device_ids[i];
ret = dev_ops->put_device(ti_sci, id);
if (ret)
panic("Failed to put device %u (%d)\n", id, ret);
}
/* Iterate through list of cores to put (shutdown) */
for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
u32 id = put_core_ids[i];
/*
* Queue up the core shutdown request. Note that this call
* needs to be followed up by an actual invocation of an WFE
* or WFI CPU instruction.
*/
ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
if (ret)
panic("Failed sending core %u shutdown message (%d)\n",
id, ret);
}
}
void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
{
typedef void __noreturn (*image_entry_noargs_t)(void);
struct ti_sci_handle *ti_sci = get_ti_sci_handle();
u32 loadaddr = 0;
int ret, size = 0, shut_cpu = 0;
/* Release all the exclusive devices held by SPL before starting ATF */
ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
ret = rproc_init();
if (ret)
panic("rproc failed to be initialized (%d)\n", ret);
init_env();
if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
&loadaddr);
}
/*
* It is assumed that remoteproc device 1 is the corresponding
* Cortex-A core which runs ATF. Make sure DT reflects the same.
*/
if (!fit_image_info[IMAGE_ID_ATF].image_start)
fit_image_info[IMAGE_ID_ATF].image_start =
spl_image->entry_point;
ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
if (ret)
panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
#if (CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) && IS_ENABLED(CONFIG_SYS_K3_SPL_ATF))
/* Authenticate ATF */
void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
fit_image_info[IMAGE_ID_ATF].image_start,
fit_image_info[IMAGE_ID_ATF].image_len,
image_os_match[IMAGE_ID_ATF]);
ti_secure_image_post_process(&image_addr,
(size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
/* Authenticate OPTEE */
image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
fit_image_info[IMAGE_ID_OPTEE].image_start,
fit_image_info[IMAGE_ID_OPTEE].image_len,
image_os_match[IMAGE_ID_OPTEE]);
ti_secure_image_post_process(&image_addr,
(size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
#endif
if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
!(size > 0 && valid_elf_image(loadaddr))) {
shut_cpu = 1;
goto start_arm64;
}
if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
loadaddr = load_elf_image_phdr(loadaddr);
} else {
loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
if (valid_elf_image(loadaddr))
loadaddr = load_elf_image_phdr(loadaddr);
}
debug("%s: jumping to address %x\n", __func__, loadaddr);
start_arm64:
/* Add an extra newline to differentiate the ATF logs from SPL */
printf("Starting ATF on ARM64 core...\n\n");
ret = rproc_start(1);
if (ret)
panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
if (shut_cpu) {
debug("Shutting down...\n");
release_resources_for_core_shutdown();
while (1)
asm volatile("wfe");
}
image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
image_entry();
}
#endif
#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
void board_fit_image_post_process(const void *fit, int node, void **p_image, void board_fit_image_post_process(const void *fit, int node, void **p_image,
size_t *p_size) size_t *p_size)
{
#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
int len;
int i;
const char *os;
u32 addr;
os = fdt_getprop(fit, node, "os", &len);
addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
addr, *p_size, os);
for (i = 0; i < IMAGE_AMT; i++) {
if (!strcmp(os, image_os_match[i])) {
fit_image_info[i].image_start = addr;
fit_image_info[i].image_len = *p_size;
debug("%s: matched image for ID %d\n", __func__, i);
break;
}
}
/*
* Only DM and the DTBs are being authenticated here,
* rest will be authenticated when A72 cluster is up
*/
if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE))
#endif
{ {
ti_secure_image_check_binary(p_image, p_size); ti_secure_image_check_binary(p_image, p_size);
ti_secure_image_post_process(p_image, p_size); ti_secure_image_post_process(p_image, p_size);
} }
#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
else
ti_secure_image_check_binary(p_image, p_size);
#endif
}
#endif #endif
#ifndef CONFIG_SYSRESET #ifndef CONFIG_SYSRESET
@ -453,75 +211,6 @@ void board_prep_linux(struct bootm_headers *images)
} }
#endif #endif
#ifdef CONFIG_CPU_V7R
void disable_linefill_optimization(void)
{
u32 actlr;
/*
* On K3 devices there are 2 conditions where R5F can deadlock:
* 1.When software is performing series of store operations to
* cacheable write back/write allocate memory region and later
* on software execute barrier operation (DSB or DMB). R5F may
* hang at the barrier instruction.
* 2.When software is performing a mix of load and store operations
* within a tight loop and store operations are all writing to
* cacheable write back/write allocates memory regions, R5F may
* hang at one of the load instruction.
*
* To avoid the above two conditions disable linefill optimization
* inside Cortex R5F.
*/
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
actlr |= (1 << 13); /* Set DLFO bit */
asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
}
#endif
static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
enum k3_firewall_region_type fwl_type)
{
struct ti_sci_fwl_ops *fwl_ops;
struct ti_sci_handle *ti_sci;
struct ti_sci_msg_fwl_region region;
size_t j;
ti_sci = get_ti_sci_handle();
fwl_ops = &ti_sci->ops.fwl_ops;
for (j = 0; j < fwl_data.regions; j++) {
region.fwl_id = fwl_data.fwl_id;
region.region = j;
region.n_permission_regs = 3;
fwl_ops->get_fwl_region(ti_sci, &region);
/* Don't disable the background regions */
if (region.control != 0 &&
((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
pr_debug("Attempting to disable firewall %5d (%25s)\n",
region.fwl_id, fwl_data.name);
region.control = 0;
if (fwl_ops->set_fwl_region(ti_sci, &region))
pr_err("Could not disable firewall %5d (%25s)\n",
region.fwl_id, fwl_data.name);
}
}
}
void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
{
size_t i;
for (i = 0; i < fwl_data_size; i++) {
remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
K3_FIREWALL_REGION_FOREGROUND);
remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
K3_FIREWALL_REGION_BACKGROUND);
}
}
void spl_enable_cache(void) void spl_enable_cache(void)
{ {
#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))

View File

@ -42,6 +42,10 @@
#define JTAG_DEV_FEATURE_NO_PRU 0x4 #define JTAG_DEV_FEATURE_NO_PRU 0x4
#define JTAG_DEV_TEMP_COMMERCIAL 0x3
#define JTAG_DEV_TEMP_INDUSTRIAL 0x4
#define JTAG_DEV_TEMP_AUTOMOTIVE 0x5
#define CTRLMMR_MAIN_DEVSTAT (WKUP_CTRL_MMR0_BASE + 0x30) #define CTRLMMR_MAIN_DEVSTAT (WKUP_CTRL_MMR0_BASE + 0x30)
#define MAIN_DEVSTAT_PRIMARY_BOOTMODE_MASK GENMASK(6, 3) #define MAIN_DEVSTAT_PRIMARY_BOOTMODE_MASK GENMASK(6, 3)
#define MAIN_DEVSTAT_PRIMARY_BOOTMODE_SHIFT 3 #define MAIN_DEVSTAT_PRIMARY_BOOTMODE_SHIFT 3
@ -105,6 +109,19 @@ static inline int k3_get_temp_grade(void)
return (full_devid & JTAG_DEV_TEMP_MASK) >> JTAG_DEV_TEMP_SHIFT; return (full_devid & JTAG_DEV_TEMP_MASK) >> JTAG_DEV_TEMP_SHIFT;
} }
static inline int k3_get_max_temp(void)
{
switch (k3_get_temp_grade()) {
case JTAG_DEV_TEMP_INDUSTRIAL:
return 105;
case JTAG_DEV_TEMP_AUTOMOTIVE:
return 125;
case JTAG_DEV_TEMP_COMMERCIAL:
default:
return 95;
}
}
static inline int k3_has_pru(void) static inline int k3_has_pru(void)
{ {
u32 full_devid = readl(CTRLMMR_WKUP_JTAG_DEVICE_ID); u32 full_devid = readl(CTRLMMR_WKUP_JTAG_DEVICE_ID);

View File

@ -43,3 +43,9 @@ config K3_SYSFW_IMAGE_SPI_OFFS
help help
Offset of the combined System Firmware and configuration image tree Offset of the combined System Firmware and configuration image tree
blob to be loaded when booting from a SPI flash memory. blob to be loaded when booting from a SPI flash memory.
config SYS_K3_SPL_ATF
bool "Start Cortex-A from SPL"
help
Enabling this will try to start Cortex-A (typically with ATF)
after SPL from R5.

View File

@ -10,6 +10,7 @@ obj-$(CONFIG_SOC_K3_AM625) += am62x/
obj-$(CONFIG_SOC_K3_AM62A7) += am62ax/ obj-$(CONFIG_SOC_K3_AM62A7) += am62ax/
obj-$(CONFIG_SOC_K3_J784S4) += j784s4/ obj-$(CONFIG_SOC_K3_J784S4) += j784s4/
obj-y += common.o
obj-y += lowlevel_init.o obj-y += lowlevel_init.o
obj-y += r5_mpu.o obj-y += r5_mpu.o

View File

@ -0,0 +1,328 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* K3: R5 Common Architecture initialization
*
* Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/printk.h>
#include <linux/types.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <image.h>
#include <fs_loader.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <spl.h>
#include <remoteproc.h>
#include <elf.h>
#include "../common.h"
#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
enum {
IMAGE_ID_ATF,
IMAGE_ID_OPTEE,
IMAGE_ID_SPL,
IMAGE_ID_DM_FW,
IMAGE_AMT,
};
#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
static const char *image_os_match[IMAGE_AMT] = {
"arm-trusted-firmware",
"tee",
"U-Boot",
"DM",
};
#endif
static struct image_info fit_image_info[IMAGE_AMT];
void init_env(void)
{
#ifdef CONFIG_SPL_ENV_SUPPORT
char *part;
env_init();
env_relocate();
switch (spl_boot_device()) {
case BOOT_DEVICE_MMC2:
part = env_get("bootpart");
env_set("storage_interface", "mmc");
env_set("fw_dev_part", part);
break;
case BOOT_DEVICE_SPI:
env_set("storage_interface", "ubi");
env_set("fw_ubi_mtdpart", "UBI");
env_set("fw_ubi_volume", "UBI0");
break;
default:
printf("%s from device %u not supported!\n",
__func__, spl_boot_device());
return;
}
#endif
}
int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
{
struct udevice *fsdev;
char *name = NULL;
int size = 0;
if (!IS_ENABLED(CONFIG_FS_LOADER))
return 0;
*loadaddr = 0;
#ifdef CONFIG_SPL_ENV_SUPPORT
switch (spl_boot_device()) {
case BOOT_DEVICE_MMC2:
name = env_get(name_fw);
*loadaddr = env_get_hex(name_loadaddr, *loadaddr);
break;
default:
printf("Loading rproc fw image from device %u not supported!\n",
spl_boot_device());
return 0;
}
#endif
if (!*loadaddr)
return 0;
if (!get_fs_loader(&fsdev)) {
size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
0, 0);
}
return size;
}
void release_resources_for_core_shutdown(void)
{
struct ti_sci_handle *ti_sci = get_ti_sci_handle();
struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
int ret;
u32 i;
/* Iterate through list of devices to put (shutdown) */
for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
u32 id = put_device_ids[i];
ret = dev_ops->put_device(ti_sci, id);
if (ret)
panic("Failed to put device %u (%d)\n", id, ret);
}
/* Iterate through list of cores to put (shutdown) */
for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
u32 id = put_core_ids[i];
/*
* Queue up the core shutdown request. Note that this call
* needs to be followed up by an actual invocation of an WFE
* or WFI CPU instruction.
*/
ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
if (ret)
panic("Failed sending core %u shutdown message (%d)\n",
id, ret);
}
}
void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
{
typedef void __noreturn (*image_entry_noargs_t)(void);
struct ti_sci_handle *ti_sci = get_ti_sci_handle();
u32 loadaddr = 0;
int ret, size = 0, shut_cpu = 0;
/* Release all the exclusive devices held by SPL before starting ATF */
ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
ret = rproc_init();
if (ret)
panic("rproc failed to be initialized (%d)\n", ret);
init_env();
if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
&loadaddr);
}
/*
* It is assumed that remoteproc device 1 is the corresponding
* Cortex-A core which runs ATF. Make sure DT reflects the same.
*/
if (!fit_image_info[IMAGE_ID_ATF].image_start)
fit_image_info[IMAGE_ID_ATF].image_start =
spl_image->entry_point;
ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
if (ret)
panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
/* Authenticate ATF */
void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
fit_image_info[IMAGE_ID_ATF].image_start,
fit_image_info[IMAGE_ID_ATF].image_len,
image_os_match[IMAGE_ID_ATF]);
ti_secure_image_post_process(&image_addr,
(size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
/* Authenticate OPTEE */
image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
fit_image_info[IMAGE_ID_OPTEE].image_start,
fit_image_info[IMAGE_ID_OPTEE].image_len,
image_os_match[IMAGE_ID_OPTEE]);
ti_secure_image_post_process(&image_addr,
(size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
#endif
if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
!(size > 0 && valid_elf_image(loadaddr))) {
shut_cpu = 1;
goto start_arm64;
}
if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
loadaddr = load_elf_image_phdr(loadaddr);
} else {
loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
if (valid_elf_image(loadaddr))
loadaddr = load_elf_image_phdr(loadaddr);
}
debug("%s: jumping to address %x\n", __func__, loadaddr);
start_arm64:
/* Add an extra newline to differentiate the ATF logs from SPL */
printf("Starting ATF on ARM64 core...\n\n");
ret = rproc_start(1);
if (ret)
panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
if (shut_cpu) {
debug("Shutting down...\n");
release_resources_for_core_shutdown();
while (1)
asm volatile("wfe");
}
image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
image_entry();
}
#endif
void disable_linefill_optimization(void)
{
u32 actlr;
/*
* On K3 devices there are 2 conditions where R5F can deadlock:
* 1.When software is performing series of store operations to
* cacheable write back/write allocate memory region and later
* on software execute barrier operation (DSB or DMB). R5F may
* hang at the barrier instruction.
* 2.When software is performing a mix of load and store operations
* within a tight loop and store operations are all writing to
* cacheable write back/write allocates memory regions, R5F may
* hang at one of the load instruction.
*
* To avoid the above two conditions disable linefill optimization
* inside Cortex R5F.
*/
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
actlr |= (1 << 13); /* Set DLFO bit */
asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
}
static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
enum k3_firewall_region_type fwl_type)
{
struct ti_sci_fwl_ops *fwl_ops;
struct ti_sci_handle *ti_sci;
struct ti_sci_msg_fwl_region region;
size_t j;
ti_sci = get_ti_sci_handle();
fwl_ops = &ti_sci->ops.fwl_ops;
for (j = 0; j < fwl_data.regions; j++) {
region.fwl_id = fwl_data.fwl_id;
region.region = j;
region.n_permission_regs = 3;
fwl_ops->get_fwl_region(ti_sci, &region);
/* Don't disable the background regions */
if (region.control != 0 &&
((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
pr_debug("Attempting to disable firewall %5d (%25s)\n",
region.fwl_id, fwl_data.name);
region.control = 0;
if (fwl_ops->set_fwl_region(ti_sci, &region))
pr_err("Could not disable firewall %5d (%25s)\n",
region.fwl_id, fwl_data.name);
}
}
}
void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
{
size_t i;
for (i = 0; i < fwl_data_size; i++) {
remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
K3_FIREWALL_REGION_FOREGROUND);
remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
K3_FIREWALL_REGION_BACKGROUND);
}
}
#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
void board_fit_image_post_process(const void *fit, int node, void **p_image,
size_t *p_size)
{
int len;
int i;
const char *os;
u32 addr;
os = fdt_getprop(fit, node, "os", &len);
addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
addr, *p_size, os);
for (i = 0; i < IMAGE_AMT; i++) {
if (!strcmp(os, image_os_match[i])) {
fit_image_info[i].image_start = addr;
fit_image_info[i].image_len = *p_size;
debug("%s: matched image for ID %d\n", __func__, i);
break;
}
}
/*
* Only DM and the DTBs are being authenticated here,
* rest will be authenticated when A72 cluster is up
*/
if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE)) {
ti_secure_image_check_binary(p_image, p_size);
ti_secure_image_post_process(p_image, p_size);
} else {
ti_secure_image_check_binary(p_image, p_size);
}
}
#endif

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+ # SPDX-License-Identifier: GPL-2.0+
# Copyright (C) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/ # Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
# #
# Resource management configuration for AM62A # Resource management configuration for AM62A
# #
@ -519,13 +519,13 @@ rm-cfg:
reserved: 0 reserved: 0
- -
start_resource: 44 start_resource: 44
num_resource: 36 num_resource: 35
type: 1802 type: 1802
host_id: 35 host_id: 35
reserved: 0 reserved: 0
- -
start_resource: 44 start_resource: 44
num_resource: 36 num_resource: 35
type: 1802 type: 1802
host_id: 36 host_id: 36
reserved: 0 reserved: 0
@ -567,7 +567,7 @@ rm-cfg:
reserved: 0 reserved: 0
- -
start_resource: 1038 start_resource: 1038
num_resource: 498 num_resource: 497
type: 1805 type: 1805
host_id: 128 host_id: 128
reserved: 0 reserved: 0

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+ # SPDX-License-Identifier: GPL-2.0+
# Copyright (C) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/ # Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
# #
# Resource management configuration for AM62X # Resource management configuration for AM62X
# #
@ -513,13 +513,13 @@ rm-cfg:
reserved: 0 reserved: 0
- -
start_resource: 44 start_resource: 44
num_resource: 36 num_resource: 35
type: 1802 type: 1802
host_id: 35 host_id: 35
reserved: 0 reserved: 0
- -
start_resource: 44 start_resource: 44
num_resource: 36 num_resource: 35
type: 1802 type: 1802
host_id: 36 host_id: 36
reserved: 0 reserved: 0
@ -555,7 +555,7 @@ rm-cfg:
reserved: 0 reserved: 0
- -
start_resource: 909 start_resource: 909
num_resource: 627 num_resource: 626
type: 1805 type: 1805
host_id: 128 host_id: 128
reserved: 0 reserved: 0

View File

@ -42,6 +42,7 @@ CONFIG_SPL_SYS_REPORT_STACK_F_USAGE=y
CONFIG_SPL_BOARD_INIT=y CONFIG_SPL_BOARD_INIT=y
CONFIG_SPL_SYS_MALLOC_SIMPLE=y CONFIG_SPL_SYS_MALLOC_SIMPLE=y
CONFIG_SPL_STACK_R=y CONFIG_SPL_STACK_R=y
CONFIG_SPL_STACK_R_MALLOC_SIMPLE_LEN=0x400000
CONFIG_SPL_SEPARATE_BSS=y CONFIG_SPL_SEPARATE_BSS=y
CONFIG_SPL_SYS_MALLOC=y CONFIG_SPL_SYS_MALLOC=y
CONFIG_SPL_HAS_CUSTOM_MALLOC_START=y CONFIG_SPL_HAS_CUSTOM_MALLOC_START=y

View File

@ -884,10 +884,10 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
return ret; return ret;
tchan = uc->tchan; tchan = uc->tchan;
if (tchan->tflow_id >= 0) if (tchan->tflow_id > 0)
ring_idx = tchan->tflow_id; ring_idx = tchan->tflow_id;
else else
ring_idx = ud->bchan_cnt + tchan->id; ring_idx = tchan->id;
ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
&uc->tchan->t_ring, &uc->tchan->t_ring,
@ -1770,9 +1770,11 @@ static int udma_probe(struct udevice *dev)
return PTR_ERR(ud->ringacc); return PTR_ERR(ud->ringacc);
ud->dev = dev; ud->dev = dev;
ud->ch_count = setup_resources(ud); ret = setup_resources(ud);
if (ud->ch_count <= 0) if (ret < 0)
return ud->ch_count; return ret;
ud->ch_count = ret;
for (i = 0; i < ud->bchan_cnt; i++) { for (i = 0; i < ud->bchan_cnt; i++) {
struct udma_bchan *bchan = &ud->bchans[i]; struct udma_bchan *bchan = &ud->bchans[i];
@ -1831,7 +1833,7 @@ static int udma_probe(struct udevice *dev)
uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV; uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
return ret; return 0;
} }
static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem) static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)