drivers: clk: add fu740 support

Add fu740 support. One abstract layer is added for supporting
multiple chips such as fu540 and fu740.

Signed-off-by: Green Wan <green.wan@sifive.com>
This commit is contained in:
Green Wan 2021-05-27 06:52:08 -07:00 committed by Leo Yu-Chi Liang
parent a74e9d899d
commit d56d79ed27
10 changed files with 1288 additions and 755 deletions

View File

@ -18,7 +18,7 @@ config SIFIVE_FU540
imply SPL_LOAD_FIT
imply SMP
imply CLK_SIFIVE
imply CLK_SIFIVE_FU540_PRCI
imply CLK_SIFIVE_PRCI
imply SIFIVE_SERIAL
imply MACB
imply MII

View File

@ -6,11 +6,11 @@ config CLK_SIFIVE
help
SoC drivers for SiFive Linux-capable SoCs.
config CLK_SIFIVE_FU540_PRCI
bool "PRCI driver for SiFive FU540 SoCs"
config CLK_SIFIVE_PRCI
bool "PRCI driver for SiFive SoCs"
depends on CLK_SIFIVE
select CLK_ANALOGBITS_WRPLL_CLN28HPC
help
Supports the Power Reset Clock interface (PRCI) IP block found in
FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
enable this driver.
FU540/FU740 SoCs. If this kernel is meant to run on a SiFive FU540/
FU740 SoCs, enable this driver.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
obj-y += sifive-prci.o
obj-$(CONFIG_CLK_SIFIVE_PRCI) += fu540-prci.o fu740-prci.o

View File

@ -5,6 +5,8 @@
* Copyright (C) 2018 SiFive, Inc.
* Wesley Terpstra
* Paul Walmsley
* Zong Li
* Pragnesh Patel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -15,632 +17,48 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* The FU540 PRCI implements clock and reset control for the SiFive
* FU540-C000 chip. This driver assumes that it has sole control
* over all PRCI resources.
*
* This driver is based on the PRCI driver written by Wesley Terpstra.
*
* Refer, commit 999529edf517ed75b56659d456d221b2ee56bb60 of:
* https://github.com/riscv/riscv-linux
*
* References:
* - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
*/
#include <common.h>
#include <clk-uclass.h>
#include <clk.h>
#include <div64.h>
#include <dm.h>
#include <dm/device.h>
#include <dm/device_compat.h>
#include <dm/uclass.h>
#include <dt-bindings/clock/sifive-fu540-prci.h>
#include <dt-bindings/reset/sifive-fu540-prci.h>
#include <errno.h>
#include <reset-uclass.h>
#include <asm/io.h>
#include <asm/arch/reset.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
/*
* EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
* hfclk and rtcclk
*/
#define EXPECTED_CLK_PARENT_COUNT 2
/*
* Register offsets and bitmasks
*/
/* COREPLLCFG0 */
#define PRCI_COREPLLCFG0_OFFSET 0x4
#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
#define PRCI_COREPLLCFG0_FSE_SHIFT 25
#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
/* COREPLLCFG1 */
#define PRCI_COREPLLCFG1_OFFSET 0x8
#define PRCI_COREPLLCFG1_CKE_SHIFT 31
#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
/* DDRPLLCFG0 */
#define PRCI_DDRPLLCFG0_OFFSET 0xc
#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
/* DDRPLLCFG1 */
#define PRCI_DDRPLLCFG1_OFFSET 0x10
#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
/* GEMGXLPLLCFG0 */
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
#define PRCI_GEMGXLPLLCFG0_DIVR_MASK \
(0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
#define PRCI_GEMGXLPLLCFG0_DIVF_MASK \
(0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
#define PRCI_GEMGXLPLLCFG0_RANGE_MASK \
(0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK \
(0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
#define PRCI_GEMGXLPLLCFG0_FSE_MASK \
(0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
/* GEMGXLPLLCFG1 */
#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
/* CORECLKSEL */
#define PRCI_CORECLKSEL_OFFSET 0x24
#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
(0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
/* DEVICESRESETREG */
#define PRCI_DEVICESRESETREG_OFFSET 0x28
#define PRCI_DEVICERESETCNT 5
#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
(0x1 << PRCI_RST_DDR_CTRL_N)
#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
(0x1 << PRCI_RST_DDR_AXI_N)
#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
(0x1 << PRCI_RST_DDR_AHB_N)
#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
(0x1 << PRCI_RST_DDR_PHY_N)
#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
(0x1 << PRCI_RST_GEMGXL_N)
/* CLKMUXSTATUSREG */
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
(0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
/* PROCMONCFG */
#define PRCI_PROCMONCFG_OFFSET 0xF0
#define PRCI_PROCMONCFG_CORE_CLOCK_SHIFT 24
#define PRCI_PROCMONCFG_CORE_CLOCK_MASK \
(0x1 << PRCI_PROCMONCFG_CORE_CLOCK_SHIFT)
/*
* Private structures
*/
/**
* struct __prci_data - per-device-instance data
* @va: base virtual address of the PRCI IP block
* @parent: parent clk instance
*
* PRCI per-device instance data
*/
struct __prci_data {
void *va;
struct clk parent_hfclk;
struct clk parent_rtcclk;
};
/**
* struct __prci_wrpll_data - WRPLL configuration and integration data
* @c: WRPLL current configuration record
* @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
* @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
* @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
* @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
* @release_reset: fn ptr to code to release clock reset
*
* @enable_bypass and @disable_bypass are used for WRPLL instances
* that contain a separate external glitchless clock mux downstream
* from the PLL. The WRPLL internal bypass mux is not glitchless.
*/
struct __prci_wrpll_data {
struct wrpll_cfg c;
void (*enable_bypass)(struct __prci_data *pd);
void (*disable_bypass)(struct __prci_data *pd);
u8 cfg0_offs;
u8 cfg1_offs;
void (*release_reset)(struct __prci_data *pd);
};
struct __prci_clock;
/* struct __prci_clock_ops - clock operations */
struct __prci_clock_ops {
int (*set_rate)(struct __prci_clock *pc,
unsigned long rate,
unsigned long parent_rate);
unsigned long (*round_rate)(struct __prci_clock *pc,
unsigned long rate,
unsigned long *parent_rate);
unsigned long (*recalc_rate)(struct __prci_clock *pc,
unsigned long parent_rate);
int (*enable_clk)(struct __prci_clock *pc, bool enable);
};
/**
* struct __prci_clock - describes a clock device managed by PRCI
* @name: user-readable clock name string - should match the manual
* @parent_name: parent name for this clock
* @ops: struct __prci_clock_ops for control
* @pwd: WRPLL-specific data, associated with this clock (if not NULL)
* @pd: PRCI-specific data associated with this clock (if not NULL)
*
* PRCI clock data. Used by the PRCI driver to register PRCI-provided
* clocks to the Linux clock infrastructure.
*/
struct __prci_clock {
const char *name;
const char *parent_name;
const struct __prci_clock_ops *ops;
struct __prci_wrpll_data *pwd;
struct __prci_data *pd;
};
/*
* Private functions
*/
/**
* __prci_readl() - read from a PRCI register
* @pd: PRCI context
* @offs: register offset to read from (in bytes, from PRCI base address)
*
* Read the register located at offset @offs from the base virtual
* address of the PRCI register target described by @pd, and return
* the value to the caller.
*
* Context: Any context.
*
* Return: the contents of the register described by @pd and @offs.
*/
static u32 __prci_readl(struct __prci_data *pd, u32 offs)
{
return readl(pd->va + offs);
}
static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
{
writel(v, pd->va + offs);
}
/* WRPLL-related private functions */
/**
* __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
* @c: ptr to a struct wrpll_cfg record to write config into
* @r: value read from the PRCI PLL configuration register
*
* Given a value @r read from an FU540 PRCI PLL configuration register,
* split it into fields and populate it into the WRPLL configuration record
* pointed to by @c.
*
* The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
* have the same register layout.
*
* Context: Any context.
*/
static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
{
u32 v;
v = r & PRCI_COREPLLCFG0_DIVR_MASK;
v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
c->divr = v;
v = r & PRCI_COREPLLCFG0_DIVF_MASK;
v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
c->divf = v;
v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
c->divq = v;
v = r & PRCI_COREPLLCFG0_RANGE_MASK;
v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
c->range = v;
c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
WRPLL_FLAGS_EXT_FEEDBACK_MASK);
/* external feedback mode not supported */
c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
}
/**
* __prci_wrpll_pack() - pack PLL configuration parameters into a register value
* @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
*
* Using a set of WRPLL configuration values pointed to by @c,
* assemble a PRCI PLL configuration register value, and return it to
* the caller.
*
* Context: Any context. Caller must ensure that the contents of the
* record pointed to by @c do not change during the execution
* of this function.
*
* Returns: a value suitable for writing into a PRCI PLL configuration
* register
*/
static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
{
u32 r = 0;
r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
/* external feedback mode not supported */
r |= PRCI_COREPLLCFG0_FSE_MASK;
return r;
}
/**
* __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
*
* Read the current configuration of the PLL identified by @pwd from
* the PRCI identified by @pd, and store it into the local configuration
* cache in @pwd.
*
* Context: Any context. Caller must prevent the records pointed to by
* @pd and @pwd from changing during execution.
*/
static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
struct __prci_wrpll_data *pwd)
{
__prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
}
/**
* __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
* @c: WRPLL configuration record to write
*
* Write the WRPLL configuration described by @c into the WRPLL
* configuration register identified by @pwd in the PRCI instance
* described by @c. Make a cached copy of the WRPLL's current
* configuration so it can be used by other code.
*
* Context: Any context. Caller must prevent the records pointed to by
* @pd and @pwd from changing during execution.
*/
static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
struct __prci_wrpll_data *pwd,
struct wrpll_cfg *c)
{
__prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
memcpy(&pwd->c, c, sizeof(*c));
}
/**
* __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
* into the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
* @enable: Clock enable or disable value
*/
static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
struct __prci_wrpll_data *pwd,
u32 enable)
{
__prci_writel(enable, pwd->cfg1_offs, pd);
}
/* Core clock mux control */
/**
* __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
*
* Switch the CORECLK mux to the HFCLK input source; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_CORECLKSEL_OFFSET register.
*/
static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
}
/**
* __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
*
* Switch the CORECLK mux to the PLL output clock; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_CORECLKSEL_OFFSET register.
*/
static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
}
static unsigned long sifive_fu540_prci_wrpll_recalc_rate(
struct __prci_clock *pc,
unsigned long parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
static unsigned long sifive_fu540_prci_wrpll_round_rate(
struct __prci_clock *pc,
unsigned long rate,
unsigned long *parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct wrpll_cfg c;
memcpy(&c, &pwd->c, sizeof(c));
wrpll_configure_for_rate(&c, rate, *parent_rate);
return wrpll_calc_output_rate(&c, *parent_rate);
}
static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc,
unsigned long rate,
unsigned long parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct __prci_data *pd = pc->pd;
int r;
r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
if (r)
return r;
if (pwd->enable_bypass)
pwd->enable_bypass(pd);
__prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
udelay(wrpll_calc_max_lock_us(&pwd->c));
if (pwd->disable_bypass)
pwd->disable_bypass(pd);
return 0;
}
static int sifive_fu540_prci_clock_enable(struct __prci_clock *pc, bool enable)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct __prci_data *pd = pc->pd;
if (enable) {
__prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
if (pwd->release_reset)
pwd->release_reset(pd);
} else {
u32 r;
r = __prci_readl(pd, pwd->cfg1_offs);
r &= ~PRCI_COREPLLCFG1_CKE_MASK;
__prci_wrpll_write_cfg1(pd, pwd, r);
}
return 0;
}
static const struct __prci_clock_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_fu540_prci_wrpll_set_rate,
.round_rate = sifive_fu540_prci_wrpll_round_rate,
.recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
.enable_clk = sifive_fu540_prci_clock_enable,
};
/* TLCLKSEL clock integration */
static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(
struct __prci_clock *pc,
unsigned long parent_rate)
{
struct __prci_data *pd = pc->pd;
u32 v;
u8 div;
v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
div = v ? 1 : 2;
return div_u64(parent_rate, div);
}
static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = {
.recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
};
static int __prci_consumer_reset(const char *rst_name, bool trigger)
{
struct udevice *dev;
struct reset_ctl rst_sig;
int ret;
ret = uclass_get_device_by_driver(UCLASS_RESET,
DM_DRIVER_GET(sifive_reset),
&dev);
if (ret) {
dev_err(dev, "Reset driver not found: %d\n", ret);
return ret;
}
ret = reset_get_by_name(dev, rst_name, &rst_sig);
if (ret) {
dev_err(dev, "failed to get %s reset\n", rst_name);
return ret;
}
if (reset_valid(&rst_sig)) {
if (trigger)
ret = reset_deassert(&rst_sig);
else
ret = reset_assert(&rst_sig);
if (ret) {
dev_err(dev, "failed to trigger reset id = %ld\n",
rst_sig.id);
return ret;
}
}
return ret;
}
/**
* __prci_ddr_release_reset() - Release DDR reset
* @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
*
*/
static void __prci_ddr_release_reset(struct __prci_data *pd)
{
/* Release DDR ctrl reset */
__prci_consumer_reset("ddr_ctrl", true);
/* HACK to get the '1 full controller clock cycle'. */
asm volatile ("fence");
/* Release DDR AXI reset */
__prci_consumer_reset("ddr_axi", true);
/* Release DDR AHB reset */
__prci_consumer_reset("ddr_ahb", true);
/* Release DDR PHY reset */
__prci_consumer_reset("ddr_phy", true);
/* HACK to get the '1 full controller clock cycle'. */
asm volatile ("fence");
/*
* These take like 16 cycles to actually propagate. We can't go sending
* stuff before they come out of reset. So wait.
*/
for (int i = 0; i < 256; i++)
asm volatile ("nop");
}
/**
* __prci_ethernet_release_reset() - Release ethernet reset
* @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
*
*/
static void __prci_ethernet_release_reset(struct __prci_data *pd)
{
/* Release GEMGXL reset */
__prci_consumer_reset("gemgxl_reset", true);
/* Procmon => core clock */
__prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
pd);
}
/*
* PRCI integration data for each WRPLL instance
*/
#include "sifive-prci.h"
/* PRCI integration data for each WRPLL instance */
static struct __prci_wrpll_data __prci_corepll_data = {
.cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
.cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
.enable_bypass = __prci_coreclksel_use_hfclk,
.disable_bypass = __prci_coreclksel_use_corepll,
.enable_bypass = sifive_prci_coreclksel_use_hfclk,
.disable_bypass = sifive_prci_coreclksel_use_corepll,
};
static struct __prci_wrpll_data __prci_ddrpll_data = {
.cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
.cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
.release_reset = __prci_ddr_release_reset,
.release_reset = sifive_prci_ddr_release_reset,
};
static struct __prci_wrpll_data __prci_gemgxlpll_data = {
.cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
.cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
.release_reset = __prci_ethernet_release_reset,
.release_reset = sifive_prci_ethernet_release_reset,
};
/*
* List of clock controls provided by the PRCI
*/
/* Linux clock framework integration */
static const struct __prci_clock_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
.round_rate = sifive_prci_wrpll_round_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable_clk = sifive_prci_clock_enable,
};
static struct __prci_clock __prci_init_clocks[] = {
static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = {
.recalc_rate = sifive_prci_tlclksel_recalc_rate,
};
/* List of clock controls provided by the PRCI */
struct __prci_clock __prci_init_clocks_fu540[] = {
[PRCI_CLK_COREPLL] = {
.name = "corepll",
.parent_name = "hfclk",
@ -665,148 +83,3 @@ static struct __prci_clock __prci_init_clocks[] = {
.ops = &sifive_fu540_prci_tlclksel_clk_ops,
},
};
static ulong sifive_fu540_prci_parent_rate(struct __prci_clock *pc)
{
ulong parent_rate;
struct __prci_clock *p;
if (strcmp(pc->parent_name, "corepll") == 0) {
p = &__prci_init_clocks[PRCI_CLK_COREPLL];
if (!p->pd || !p->ops->recalc_rate)
return -ENXIO;
return p->ops->recalc_rate(p, sifive_fu540_prci_parent_rate(p));
}
if (strcmp(pc->parent_name, "rtcclk") == 0)
parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
else
parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
return parent_rate;
}
static ulong sifive_fu540_prci_get_rate(struct clk *clk)
{
struct __prci_clock *pc;
if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
return -ENXIO;
pc = &__prci_init_clocks[clk->id];
if (!pc->pd || !pc->ops->recalc_rate)
return -ENXIO;
return pc->ops->recalc_rate(pc, sifive_fu540_prci_parent_rate(pc));
}
static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate)
{
int err;
struct __prci_clock *pc;
if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
return -ENXIO;
pc = &__prci_init_clocks[clk->id];
if (!pc->pd || !pc->ops->set_rate)
return -ENXIO;
err = pc->ops->set_rate(pc, rate, sifive_fu540_prci_parent_rate(pc));
if (err)
return err;
return rate;
}
static int sifive_fu540_prci_enable(struct clk *clk)
{
struct __prci_clock *pc;
int ret = 0;
if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
return -ENXIO;
pc = &__prci_init_clocks[clk->id];
if (!pc->pd)
return -ENXIO;
if (pc->ops->enable_clk)
ret = pc->ops->enable_clk(pc, 1);
return ret;
}
static int sifive_fu540_prci_disable(struct clk *clk)
{
struct __prci_clock *pc;
int ret = 0;
if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
return -ENXIO;
pc = &__prci_init_clocks[clk->id];
if (!pc->pd)
return -ENXIO;
if (pc->ops->enable_clk)
ret = pc->ops->enable_clk(pc, 0);
return ret;
}
static int sifive_fu540_prci_probe(struct udevice *dev)
{
int i, err;
struct __prci_clock *pc;
struct __prci_data *pd = dev_get_priv(dev);
pd->va = (void *)dev_read_addr(dev);
if (IS_ERR(pd->va))
return PTR_ERR(pd->va);
err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
if (err)
return err;
err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
pc = &__prci_init_clocks[i];
pc->pd = pd;
if (pc->pwd)
__prci_wrpll_read_cfg0(pd, pc->pwd);
}
return 0;
}
static struct clk_ops sifive_fu540_prci_ops = {
.set_rate = sifive_fu540_prci_set_rate,
.get_rate = sifive_fu540_prci_get_rate,
.enable = sifive_fu540_prci_enable,
.disable = sifive_fu540_prci_disable,
};
static int sifive_fu540_clk_bind(struct udevice *dev)
{
return sifive_reset_bind(dev, PRCI_DEVICERESETCNT);
}
static const struct udevice_id sifive_fu540_prci_ids[] = {
{ .compatible = "sifive,fu540-c000-prci" },
{ }
};
U_BOOT_DRIVER(sifive_fu540_prci) = {
.name = "sifive-fu540-prci",
.id = UCLASS_CLK,
.of_match = sifive_fu540_prci_ids,
.probe = sifive_fu540_prci_probe,
.ops = &sifive_fu540_prci_ops,
.priv_auto = sizeof(struct __prci_data),
.bind = sifive_fu540_clk_bind,
};

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 SiFive, Inc.
* Zong Li
* Pragnesh Patel
*/
#ifndef __SIFIVE_CLK_FU540_PRCI_H
#define __SIFIVE_CLK_FU540_PRCI_H
#include "sifive-prci.h"
#define NUM_CLOCK_FU540 4
extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
static const struct prci_clk_desc prci_clk_fu540 = {
.clks = __prci_init_clocks_fu540,
.num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
};
#endif /* __SIFIVE_CLK_FU540_PRCI_H */

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2021 SiFive, Inc.
* Wesley Terpstra
* Paul Walmsley
* Zong Li
* Pragnesh Patel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <dt-bindings/clock/sifive-fu740-prci.h>
#include "sifive-prci.h"
#include <asm/io.h>
int sifive_prci_fu740_pciauxclk_enable(struct __prci_clock *pc, bool enable)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct __prci_data *pd = pc->pd;
u32 v;
if (pwd->cfg1_offs != PRCI_PCIEAUXCFG1_OFFSET)
return -EINVAL;
v = readl(pd->va + pwd->cfg1_offs);
v = enable ? (v | PRCI_PCIEAUXCFG1_MASK) : (v & ~PRCI_PCIEAUXCFG1_MASK);
writel(v, pd->va + pwd->cfg1_offs);
return 0;
}
/* PRCI integration data for each WRPLL instance */
static struct __prci_wrpll_data __prci_corepll_data = {
.cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
.cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
.enable_bypass = sifive_prci_coreclksel_use_hfclk,
.disable_bypass = sifive_prci_coreclksel_use_final_corepll,
};
static struct __prci_wrpll_data __prci_ddrpll_data = {
.cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
.cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
.release_reset = sifive_prci_ddr_release_reset,
};
static struct __prci_wrpll_data __prci_gemgxlpll_data = {
.cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
.cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
.release_reset = sifive_prci_ethernet_release_reset,
};
static struct __prci_wrpll_data __prci_dvfscorepll_data = {
.cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
.cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
.enable_bypass = sifive_prci_corepllsel_use_corepll,
.disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
};
static struct __prci_wrpll_data __prci_hfpclkpll_data = {
.cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
.cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
.enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
.disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
};
static struct __prci_wrpll_data __prci_cltxpll_data = {
.cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
.cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
.release_reset = sifive_prci_cltx_release_reset,
};
static struct __prci_wrpll_data __prci_pcieaux_data = {
.cfg1_offs = PRCI_PCIEAUXCFG1_OFFSET,
};
/* Linux clock framework integration */
static const struct __prci_clock_ops sifive_fu740_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
.round_rate = sifive_prci_wrpll_round_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable_clk = sifive_prci_clock_enable,
};
static const struct __prci_clock_ops sifive_fu740_prci_tlclksel_clk_ops = {
.recalc_rate = sifive_prci_tlclksel_recalc_rate,
};
static const struct __prci_clock_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
.recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
};
static const struct __prci_clock_ops sifive_fu740_prci_pcieaux_clk_ops = {
.enable_clk = sifive_prci_fu740_pciauxclk_enable,
};
/* List of clock controls provided by the PRCI */
struct __prci_clock __prci_init_clocks_fu740[] = {
[PRCI_CLK_COREPLL] = {
.name = "corepll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_corepll_data,
},
[PRCI_CLK_DDRPLL] = {
.name = "ddrpll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_ddrpll_data,
},
[PRCI_CLK_GEMGXLPLL] = {
.name = "gemgxlpll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_gemgxlpll_data,
},
[PRCI_CLK_DVFSCOREPLL] = {
.name = "dvfscorepll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_dvfscorepll_data,
},
[PRCI_CLK_HFPCLKPLL] = {
.name = "hfpclkpll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_hfpclkpll_data,
},
[PRCI_CLK_CLTXPLL] = {
.name = "cltxpll",
.parent_name = "hfclk",
.ops = &sifive_fu740_prci_wrpll_clk_ops,
.pwd = &__prci_cltxpll_data,
},
[PRCI_CLK_TLCLK] = {
.name = "tlclk",
.parent_name = "corepll",
.ops = &sifive_fu740_prci_tlclksel_clk_ops,
},
[PRCI_CLK_PCLK] = {
.name = "pclk",
.parent_name = "hfpclkpll",
.ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
},
[PRCI_CLK_PCIEAUX] {
.name = "pciaux",
.parent_name = "",
.ops = &sifive_fu740_prci_pcieaux_clk_ops,
.pwd = &__prci_pcieaux_data,
}
};

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 SiFive, Inc.
* Zong Li
* Pragnesh Patel
*/
#ifndef __SIFIVE_CLK_FU740_PRCI_H
#define __SIFIVE_CLK_FU740_PRCI_H
#include "sifive-prci.h"
#define NUM_CLOCK_FU740 9
extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
static const struct prci_clk_desc prci_clk_fu740 = {
.clks = __prci_init_clocks_fu740,
.num_clks = ARRAY_SIZE(__prci_init_clocks_fu740),
};
#endif /* __SIFIVE_CLK_FU740_PRCI_H */

View File

@ -0,0 +1,733 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2021 SiFive, Inc.
* Wesley Terpstra
* Paul Walmsley
* Zong Li
* Pragnesh Patel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* The PRCI implements clock and reset control for the SiFive chip.
* This driver assumes that it has sole control over all PRCI resources.
*
* This driver is based on the PRCI driver written by Wesley Terpstra:
* https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
*/
#include <common.h>
#include <clk-uclass.h>
#include <clk.h>
#include <dm.h>
#include <dm/device_compat.h>
#include <reset.h>
#include <asm/io.h>
#include <asm/arch/reset.h>
#include <linux/delay.h>
#include <linux/math64.h>
#include <dt-bindings/clock/sifive-fu740-prci.h>
#include "fu540-prci.h"
#include "fu740-prci.h"
/*
* Private functions
*/
/**
* __prci_readl() - read from a PRCI register
* @pd: PRCI context
* @offs: register offset to read from (in bytes, from PRCI base address)
*
* Read the register located at offset @offs from the base virtual
* address of the PRCI register target described by @pd, and return
* the value to the caller.
*
* Context: Any context.
*
* Return: the contents of the register described by @pd and @offs.
*/
static u32 __prci_readl(struct __prci_data *pd, u32 offs)
{
return readl(pd->va + offs);
}
static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
{
writel(v, pd->va + offs);
}
/* WRPLL-related private functions */
/**
* __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
* @c: ptr to a struct wrpll_cfg record to write config into
* @r: value read from the PRCI PLL configuration register
*
* Given a value @r read from an FU540 PRCI PLL configuration register,
* split it into fields and populate it into the WRPLL configuration record
* pointed to by @c.
*
* The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
* have the same register layout.
*
* Context: Any context.
*/
static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
{
u32 v;
v = r & PRCI_COREPLLCFG0_DIVR_MASK;
v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
c->divr = v;
v = r & PRCI_COREPLLCFG0_DIVF_MASK;
v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
c->divf = v;
v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
c->divq = v;
v = r & PRCI_COREPLLCFG0_RANGE_MASK;
v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
c->range = v;
c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
WRPLL_FLAGS_EXT_FEEDBACK_MASK);
/* external feedback mode not supported */
c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
}
/**
* __prci_wrpll_pack() - pack PLL configuration parameters into a register value
* @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
*
* Using a set of WRPLL configuration values pointed to by @c,
* assemble a PRCI PLL configuration register value, and return it to
* the caller.
*
* Context: Any context. Caller must ensure that the contents of the
* record pointed to by @c do not change during the execution
* of this function.
*
* Returns: a value suitable for writing into a PRCI PLL configuration
* register
*/
static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
{
u32 r = 0;
r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
/* external feedback mode not supported */
r |= PRCI_COREPLLCFG0_FSE_MASK;
return r;
}
/**
* __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
*
* Read the current configuration of the PLL identified by @pwd from
* the PRCI identified by @pd, and store it into the local configuration
* cache in @pwd.
*
* Context: Any context. Caller must prevent the records pointed to by
* @pd and @pwd from changing during execution.
*/
static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
struct __prci_wrpll_data *pwd)
{
__prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
}
/**
* __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
* @c: WRPLL configuration record to write
*
* Write the WRPLL configuration described by @c into the WRPLL
* configuration register identified by @pwd in the PRCI instance
* described by @c. Make a cached copy of the WRPLL's current
* configuration so it can be used by other code.
*
* Context: Any context. Caller must prevent the records pointed to by
* @pd and @pwd from changing during execution.
*/
static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
struct __prci_wrpll_data *pwd,
struct wrpll_cfg *c)
{
__prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
memcpy(&pwd->c, c, sizeof(*c));
}
/**
* __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
* into the PRCI
* @pd: PRCI context
* @pwd: PRCI WRPLL metadata
* @enable: Clock enable or disable value
*/
static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
struct __prci_wrpll_data *pwd,
u32 enable)
{
__prci_writel(enable, pwd->cfg1_offs, pd);
}
unsigned long sifive_prci_wrpll_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
unsigned long sifive_prci_wrpll_round_rate(struct __prci_clock *pc,
unsigned long rate,
unsigned long *parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct wrpll_cfg c;
memcpy(&c, &pwd->c, sizeof(c));
wrpll_configure_for_rate(&c, rate, *parent_rate);
return wrpll_calc_output_rate(&c, *parent_rate);
}
int sifive_prci_wrpll_set_rate(struct __prci_clock *pc,
unsigned long rate,
unsigned long parent_rate)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct __prci_data *pd = pc->pd;
int r;
r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
if (r)
return r;
if (pwd->enable_bypass)
pwd->enable_bypass(pd);
__prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
udelay(wrpll_calc_max_lock_us(&pwd->c));
return 0;
}
int sifive_prci_clock_enable(struct __prci_clock *pc, bool enable)
{
struct __prci_wrpll_data *pwd = pc->pwd;
struct __prci_data *pd = pc->pd;
if (enable) {
__prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
if (pwd->disable_bypass)
pwd->disable_bypass(pd);
if (pwd->release_reset)
pwd->release_reset(pd);
} else {
u32 r;
if (pwd->enable_bypass)
pwd->enable_bypass(pd);
r = __prci_readl(pd, pwd->cfg1_offs);
r &= ~PRCI_COREPLLCFG1_CKE_MASK;
__prci_wrpll_write_cfg1(pd, pwd, r);
}
return 0;
}
/* TLCLKSEL clock integration */
unsigned long sifive_prci_tlclksel_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate)
{
struct __prci_data *pd = pc->pd;
u32 v;
u8 div;
v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
div = v ? 1 : 2;
return div_u64(parent_rate, div);
}
/* HFPCLK clock integration */
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate)
{
struct __prci_data *pd = pc->pd;
u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
return div_u64(parent_rate, div + 2);
}
/**
* sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
* FINAL_COREPLL
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
*
* Switch the CORECLK mux to the final COREPLL output clock; return once
* complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_CORECLKSEL_OFFSET register.
*/
void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
}
/**
* sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
* output DVFS_COREPLL
* @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
*
* Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_COREPLLSEL_OFFSET register.
*/
void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
__prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
}
/**
* sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
* output COREPLL
* @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
*
* Switch the COREPLL mux to the COREPLL output clock; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_COREPLLSEL_OFFSET register.
*/
void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
__prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
}
/**
* sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
* output HFCLK
* @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
*
* Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_HFPCLKPLLSEL_OFFSET register.
*/
void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
__prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
}
/**
* sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
* output HFPCLKPLL
* @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
*
* Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_HFPCLKPLLSEL_OFFSET register.
*/
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
__prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
}
static int __prci_consumer_reset(const char *rst_name, bool trigger)
{
struct udevice *dev;
struct reset_ctl rst_sig;
int ret;
ret = uclass_get_device_by_driver(UCLASS_RESET,
DM_DRIVER_GET(sifive_reset),
&dev);
if (ret) {
dev_err(dev, "Reset driver not found: %d\n", ret);
return ret;
}
ret = reset_get_by_name(dev, rst_name, &rst_sig);
if (ret) {
dev_err(dev, "failed to get %s reset\n", rst_name);
return ret;
}
if (reset_valid(&rst_sig)) {
if (trigger)
ret = reset_deassert(&rst_sig);
else
ret = reset_assert(&rst_sig);
if (ret) {
dev_err(dev, "failed to trigger reset id = %ld\n",
rst_sig.id);
return ret;
}
}
return ret;
}
/**
* sifive_prci_ddr_release_reset() - Release DDR reset
* @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
*
*/
void sifive_prci_ddr_release_reset(struct __prci_data *pd)
{
/* Release DDR ctrl reset */
__prci_consumer_reset("ddr_ctrl", true);
/* HACK to get the '1 full controller clock cycle'. */
asm volatile ("fence");
/* Release DDR AXI reset */
__prci_consumer_reset("ddr_axi", true);
/* Release DDR AHB reset */
__prci_consumer_reset("ddr_ahb", true);
/* Release DDR PHY reset */
__prci_consumer_reset("ddr_phy", true);
/* HACK to get the '1 full controller clock cycle'. */
asm volatile ("fence");
/*
* These take like 16 cycles to actually propagate. We can't go sending
* stuff before they come out of reset. So wait.
*/
for (int i = 0; i < 256; i++)
asm volatile ("nop");
}
/**
* sifive_prci_ethernet_release_reset() - Release ethernet reset
* @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
*
*/
void sifive_prci_ethernet_release_reset(struct __prci_data *pd)
{
/* Release GEMGXL reset */
__prci_consumer_reset("gemgxl_reset", true);
/* Procmon => core clock */
__prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
pd);
/* Release Chiplink reset */
__prci_consumer_reset("cltx_reset", true);
}
/**
* sifive_prci_cltx_release_reset() - Release cltx reset
* @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
*
*/
void sifive_prci_cltx_release_reset(struct __prci_data *pd)
{
/* Release CLTX reset */
__prci_consumer_reset("cltx_reset", true);
}
/* Core clock mux control */
/**
* sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
*
* Switch the CORECLK mux to the HFCLK input source; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_CORECLKSEL_OFFSET register.
*/
void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
}
/**
* sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
*
* Switch the CORECLK mux to the PLL output clock; return once complete.
*
* Context: Any context. Caller must prevent concurrent changes to the
* PRCI_CORECLKSEL_OFFSET register.
*/
void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
{
u32 r;
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
}
static ulong sifive_prci_parent_rate(struct __prci_clock *pc, struct prci_clk_desc *data)
{
ulong parent_rate;
ulong i;
struct __prci_clock *p;
if (strcmp(pc->parent_name, "corepll") == 0 ||
strcmp(pc->parent_name, "hfpclkpll") == 0) {
for (i = 0; i < data->num_clks; i++) {
if (strcmp(pc->parent_name, data->clks[i].name) == 0)
break;
}
if (i >= data->num_clks)
return -ENXIO;
p = &data->clks[i];
if (!p->pd || !p->ops->recalc_rate)
return -ENXIO;
return p->ops->recalc_rate(p, sifive_prci_parent_rate(p, data));
}
if (strcmp(pc->parent_name, "rtcclk") == 0)
parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
else
parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
return parent_rate;
}
static ulong sifive_prci_get_rate(struct clk *clk)
{
struct __prci_clock *pc;
struct prci_clk_desc *data =
(struct prci_clk_desc *)dev_get_driver_data(clk->dev);
if (data->num_clks <= clk->id)
return -ENXIO;
pc = &data->clks[clk->id];
if (!pc->pd || !pc->ops->recalc_rate)
return -ENXIO;
return pc->ops->recalc_rate(pc, sifive_prci_parent_rate(pc, data));
}
static ulong sifive_prci_set_rate(struct clk *clk, ulong rate)
{
int err;
struct __prci_clock *pc;
struct prci_clk_desc *data =
(struct prci_clk_desc *)dev_get_driver_data(clk->dev);
if (data->num_clks <= clk->id)
return -ENXIO;
pc = &data->clks[clk->id];
if (!pc->pd || !pc->ops->set_rate)
return -ENXIO;
err = pc->ops->set_rate(pc, rate, sifive_prci_parent_rate(pc, data));
if (err)
return err;
return rate;
}
static int sifive_prci_enable(struct clk *clk)
{
struct __prci_clock *pc;
int ret = 0;
struct prci_clk_desc *data =
(struct prci_clk_desc *)dev_get_driver_data(clk->dev);
if (data->num_clks <= clk->id)
return -ENXIO;
pc = &data->clks[clk->id];
if (!pc->pd)
return -ENXIO;
if (pc->ops->enable_clk)
ret = pc->ops->enable_clk(pc, 1);
return ret;
}
static int sifive_prci_disable(struct clk *clk)
{
struct __prci_clock *pc;
int ret = 0;
struct prci_clk_desc *data =
(struct prci_clk_desc *)dev_get_driver_data(clk->dev);
if (data->num_clks <= clk->id)
return -ENXIO;
pc = &data->clks[clk->id];
if (!pc->pd)
return -ENXIO;
if (pc->ops->enable_clk)
ret = pc->ops->enable_clk(pc, 0);
return ret;
}
static int sifive_prci_probe(struct udevice *dev)
{
int i, err;
struct __prci_clock *pc;
struct __prci_data *pd = dev_get_priv(dev);
struct prci_clk_desc *data =
(struct prci_clk_desc *)dev_get_driver_data(dev);
pd->va = (void *)dev_read_addr(dev);
if (IS_ERR(pd->va))
return PTR_ERR(pd->va);
err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
if (err)
return err;
err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
if (err)
return err;
for (i = 0; i < data->num_clks; ++i) {
pc = &data->clks[i];
pc->pd = pd;
if (pc->pwd)
__prci_wrpll_read_cfg0(pd, pc->pwd);
}
if (IS_ENABLED(CONFIG_SPL_BUILD)) {
if (device_is_compatible(dev, "sifive,fu740-c000-prci")) {
u32 prci_pll_reg;
unsigned long parent_rate;
prci_pll_reg = readl(pd->va + PRCI_PRCIPLL_OFFSET);
if (prci_pll_reg & PRCI_PRCIPLL_HFPCLKPLL) {
/*
* Only initialize the HFPCLK PLL. In this
* case the design uses hfpclk to drive
* Chiplink
*/
pc = &data->clks[PRCI_CLK_HFPCLKPLL];
parent_rate = sifive_prci_parent_rate(pc, data);
sifive_prci_wrpll_set_rate(pc, 260000000,
parent_rate);
pc->ops->enable_clk(pc, 1);
} else if (prci_pll_reg & PRCI_PRCIPLL_CLTXPLL) {
/* CLTX pll init */
pc = &data->clks[PRCI_CLK_CLTXPLL];
parent_rate = sifive_prci_parent_rate(pc, data);
sifive_prci_wrpll_set_rate(pc, 260000000,
parent_rate);
pc->ops->enable_clk(pc, 1);
}
}
}
return 0;
}
static struct clk_ops sifive_prci_ops = {
.set_rate = sifive_prci_set_rate,
.get_rate = sifive_prci_get_rate,
.enable = sifive_prci_enable,
.disable = sifive_prci_disable,
};
static int sifive_clk_bind(struct udevice *dev)
{
return sifive_reset_bind(dev, PRCI_DEVICERESETCNT);
}
static const struct udevice_id sifive_prci_ids[] = {
{ .compatible = "sifive,fu540-c000-prci", .data = (ulong)&prci_clk_fu540 },
{ .compatible = "sifive,fu740-c000-prci", .data = (ulong)&prci_clk_fu740 },
{ }
};
U_BOOT_DRIVER(sifive_prci) = {
.name = "sifive-prci",
.id = UCLASS_CLK,
.of_match = sifive_prci_ids,
.probe = sifive_prci_probe,
.ops = &sifive_prci_ops,
.priv_auto = sizeof(struct __prci_data),
.bind = sifive_clk_bind,
};

View File

@ -0,0 +1,323 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 SiFive, Inc.
* Wesley Terpstra
* Paul Walmsley
* Zong Li
* Pragnesh Patel
*/
#ifndef __SIFIVE_CLK_SIFIVE_PRCI_H
#define __SIFIVE_CLK_SIFIVE_PRCI_H
#include <clk.h>
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
/*
* EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
* hfclk and rtcclk
*/
#define EXPECTED_CLK_PARENT_COUNT 2
/*
* Register offsets and bitmasks
*/
/* COREPLLCFG0 */
#define PRCI_COREPLLCFG0_OFFSET 0x4
#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
#define PRCI_COREPLLCFG0_FSE_SHIFT 25
#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
/* COREPLLCFG1 */
#define PRCI_COREPLLCFG1_OFFSET 0x8
#define PRCI_COREPLLCFG1_CKE_SHIFT 31
#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
/* DDRPLLCFG0 */
#define PRCI_DDRPLLCFG0_OFFSET 0xc
#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
/* DDRPLLCFG1 */
#define PRCI_DDRPLLCFG1_OFFSET 0x10
#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
/* PCIEAUXCFG1 */
#define PRCI_PCIEAUXCFG1_OFFSET 0x14
#define PRCI_PCIEAUXCFG1_SHIFT 0
#define PRCI_PCIEAUXCFG1_MASK (0x1 << PRCI_PCIEAUXCFG1_SHIFT)
/* GEMGXLPLLCFG0 */
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
#define PRCI_GEMGXLPLLCFG0_DIVR_MASK \
(0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
#define PRCI_GEMGXLPLLCFG0_DIVF_MASK \
(0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
#define PRCI_GEMGXLPLLCFG0_RANGE_MASK \
(0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK \
(0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
#define PRCI_GEMGXLPLLCFG0_FSE_MASK \
(0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
/* GEMGXLPLLCFG1 */
#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
/* CORECLKSEL */
#define PRCI_CORECLKSEL_OFFSET 0x24
#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
(0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
/* DEVICESRESETREG */
#define PRCI_DEVICESRESETREG_OFFSET 0x28
#define PRCI_DEVICERESETCNT 6
/* CLKMUXSTATUSREG */
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
(0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
/* CLTXPLLCFG0 */
#define PRCI_CLTXPLLCFG0_OFFSET 0x30
#define PRCI_CLTXPLLCFG0_DIVR_SHIFT 0
#define PRCI_CLTXPLLCFG0_DIVR_MASK (0x3f << PRCI_CLTXPLLCFG0_DIVR_SHIFT)
#define PRCI_CLTXPLLCFG0_DIVF_SHIFT 6
#define PRCI_CLTXPLLCFG0_DIVF_MASK (0x1ff << PRCI_CLTXPLLCFG0_DIVF_SHIFT)
#define PRCI_CLTXPLLCFG0_DIVQ_SHIFT 15
#define PRCI_CLTXPLLCFG0_DIVQ_MASK (0x7 << PRCI_CLTXPLLCFG0_DIVQ_SHIFT)
#define PRCI_CLTXPLLCFG0_RANGE_SHIFT 18
#define PRCI_CLTXPLLCFG0_RANGE_MASK (0x7 << PRCI_CLTXPLLCFG0_RANGE_SHIFT)
#define PRCI_CLTXPLLCFG0_BYPASS_SHIFT 24
#define PRCI_CLTXPLLCFG0_BYPASS_MASK (0x1 << PRCI_CLTXPLLCFG0_BYPASS_SHIFT)
#define PRCI_CLTXPLLCFG0_FSE_SHIFT 25
#define PRCI_CLTXPLLCFG0_FSE_MASK (0x1 << PRCI_CLTXPLLCFG0_FSE_SHIFT)
#define PRCI_CLTXPLLCFG0_LOCK_SHIFT 31
#define PRCI_CLTXPLLCFG0_LOCK_MASK (0x1 << PRCI_CLTXPLLCFG0_LOCK_SHIFT)
/* CLTXPLLCFG1 */
#define PRCI_CLTXPLLCFG1_OFFSET 0x34
#define PRCI_CLTXPLLCFG1_CKE_SHIFT 24
#define PRCI_CLTXPLLCFG1_CKE_MASK (0x1 << PRCI_CLTXPLLCFG1_CKE_SHIFT)
/* DVFSCOREPLLCFG0 */
#define PRCI_DVFSCOREPLLCFG0_OFFSET 0x38
/* DVFSCOREPLLCFG1 */
#define PRCI_DVFSCOREPLLCFG1_OFFSET 0x3c
#define PRCI_DVFSCOREPLLCFG1_CKE_SHIFT 24
#define PRCI_DVFSCOREPLLCFG1_CKE_MASK (0x1 << PRCI_DVFSCOREPLLCFG1_CKE_SHIFT)
/* COREPLLSEL */
#define PRCI_COREPLLSEL_OFFSET 0x40
#define PRCI_COREPLLSEL_COREPLLSEL_SHIFT 0
#define PRCI_COREPLLSEL_COREPLLSEL_MASK \
(0x1 << PRCI_COREPLLSEL_COREPLLSEL_SHIFT)
/* HFPCLKPLLCFG0 */
#define PRCI_HFPCLKPLLCFG0_OFFSET 0x50
#define PRCI_HFPCLKPLL_CFG0_DIVR_SHIFT 0
#define PRCI_HFPCLKPLL_CFG0_DIVR_MASK \
(0x3f << PRCI_HFPCLKPLLCFG0_DIVR_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_DIVF_SHIFT 6
#define PRCI_HFPCLKPLL_CFG0_DIVF_MASK \
(0x1ff << PRCI_HFPCLKPLLCFG0_DIVF_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_DIVQ_SHIFT 15
#define PRCI_HFPCLKPLL_CFG0_DIVQ_MASK \
(0x7 << PRCI_HFPCLKPLLCFG0_DIVQ_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_RANGE_SHIFT 18
#define PRCI_HFPCLKPLL_CFG0_RANGE_MASK \
(0x7 << PRCI_HFPCLKPLLCFG0_RANGE_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_BYPASS_SHIFT 24
#define PRCI_HFPCLKPLL_CFG0_BYPASS_MASK \
(0x1 << PRCI_HFPCLKPLLCFG0_BYPASS_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_FSE_SHIFT 25
#define PRCI_HFPCLKPLL_CFG0_FSE_MASK \
(0x1 << PRCI_HFPCLKPLLCFG0_FSE_SHIFT)
#define PRCI_HFPCLKPLL_CFG0_LOCK_SHIFT 31
#define PRCI_HFPCLKPLL_CFG0_LOCK_MASK \
(0x1 << PRCI_HFPCLKPLLCFG0_LOCK_SHIFT)
/* HFPCLKPLLCFG1 */
#define PRCI_HFPCLKPLLCFG1_OFFSET 0x54
#define PRCI_HFPCLKPLLCFG1_CKE_SHIFT 24
#define PRCI_HFPCLKPLLCFG1_CKE_MASK \
(0x1 << PRCI_HFPCLKPLLCFG1_CKE_SHIFT)
/* HFPCLKPLLSEL */
#define PRCI_HFPCLKPLLSEL_OFFSET 0x58
#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT 0
#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK \
(0x1 << PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT)
/* HFPCLKPLLDIV */
#define PRCI_HFPCLKPLLDIV_OFFSET 0x5c
/* PRCIPLL */
#define PRCI_PRCIPLL_OFFSET 0xe0
#define PRCI_PRCIPLL_CLTXPLL (0x1 << 0)
#define PRCI_PRCIPLL_GEMGXLPLL (0x1 << 1)
#define PRCI_PRCIPLL_DDRPLL (0x1 << 2)
#define PRCI_PRCIPLL_HFPCLKPLL (0x1 << 3)
#define PRCI_PRCIPLL_DVFSCOREPLL (0x1 << 4)
#define PRCI_PRCIPLL_COREPLL (0x1 << 5)
/* PROCMONCFG */
#define PRCI_PROCMONCFG_OFFSET 0xF0
#define PRCI_PROCMONCFG_CORE_CLOCK_SHIFT 24
#define PRCI_PROCMONCFG_CORE_CLOCK_MASK \
(0x1 << PRCI_PROCMONCFG_CORE_CLOCK_SHIFT)
/*
* Private structures
*/
/**
* struct __prci_data - per-device-instance data
* @va: base virtual address of the PRCI IP block
* @parent: parent clk instance
*
* PRCI per-device instance data
*/
struct __prci_data {
void *va;
struct clk parent_hfclk;
struct clk parent_rtcclk;
};
/**
* struct __prci_wrpll_data - WRPLL configuration and integration data
* @c: WRPLL current configuration record
* @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
* @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
* @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
* @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
* @release_reset: fn ptr to code to release clock reset
*
* @enable_bypass and @disable_bypass are used for WRPLL instances
* that contain a separate external glitchless clock mux downstream
* from the PLL. The WRPLL internal bypass mux is not glitchless.
*/
struct __prci_wrpll_data {
struct wrpll_cfg c;
void (*enable_bypass)(struct __prci_data *pd);
void (*disable_bypass)(struct __prci_data *pd);
u8 cfg0_offs;
u8 cfg1_offs;
void (*release_reset)(struct __prci_data *pd);
};
/**
* struct __prci_clock - describes a clock device managed by PRCI
* @name: user-readable clock name string - should match the manual
* @parent_name: parent name for this clock
* @ops: struct __prci_clock_ops for control
* @pwd: WRPLL-specific data, associated with this clock (if not NULL)
* @pd: PRCI-specific data associated with this clock (if not NULL)
*
* PRCI clock data. Used by the PRCI driver to register PRCI-provided
* clocks to the Linux clock infrastructure.
*/
struct __prci_clock {
const char *name;
const char *parent_name;
const struct __prci_clock_ops *ops;
struct __prci_wrpll_data *pwd;
struct __prci_data *pd;
};
/* struct __prci_clock_ops - clock operations */
struct __prci_clock_ops {
int (*set_rate)(struct __prci_clock *pc,
unsigned long rate,
unsigned long parent_rate);
unsigned long (*round_rate)(struct __prci_clock *pc,
unsigned long rate,
unsigned long *parent_rate);
unsigned long (*recalc_rate)(struct __prci_clock *pc,
unsigned long parent_rate);
int (*enable_clk)(struct __prci_clock *pc, bool enable);
};
/*
* struct prci_clk_desc - describes the information of clocks of each SoCs
* @clks: point to a array of __prci_clock
* @num_clks: the number of element of clks
*/
struct prci_clk_desc {
struct __prci_clock *clks;
size_t num_clks;
};
void sifive_prci_ethernet_release_reset(struct __prci_data *pd);
void sifive_prci_ddr_release_reset(struct __prci_data *pd);
void sifive_prci_cltx_release_reset(struct __prci_data *pd);
/* Core clock mux control */
void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd);
void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd);
void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd);
void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd);
void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
unsigned long sifive_prci_wrpll_round_rate(struct __prci_clock *pc,
unsigned long rate,
unsigned long *parent_rate);
/* Linux clock framework integration */
int sifive_prci_wrpll_set_rate(struct __prci_clock *pc,
unsigned long rate,
unsigned long parent_rate);
unsigned long sifive_prci_wrpll_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate);
unsigned long sifive_prci_tlclksel_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate);
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct __prci_clock *pc,
unsigned long parent_rate);
int sifive_prci_clock_enable(struct __prci_clock *pc, bool enable);
#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */

View File

@ -166,7 +166,7 @@ config RESET_IPQ419
config RESET_SIFIVE
bool "Reset Driver for SiFive SoC's"
depends on DM_RESET && CLK_SIFIVE_FU540_PRCI && TARGET_SIFIVE_UNLEASHED
depends on DM_RESET && CLK_SIFIVE_PRCI && TARGET_SIFIVE_UNLEASHED
default y
help
PRCI module within SiFive SoC's provides mechanism to reset