fsl_qbman: SDK DPAA 1.x QBMan drivers

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
This commit is contained in:
Madalin Bucur 2017-05-10 16:30:12 +03:00 committed by Dong Aisheng
parent 405be24697
commit 16ff861424
35 changed files with 24695 additions and 0 deletions

View File

@ -0,0 +1,228 @@
config FSL_SDK_DPA
bool "Freescale Datapath Queue and Buffer management"
depends on !FSL_DPAA
select FSL_QMAN_FQ_LOOKUP if PPC64
select FSL_QMAN_FQ_LOOKUP if ARM64
menu "Freescale Datapath QMan/BMan options"
depends on FSL_SDK_DPA
config FSL_DPA_CHECKING
bool "additional driver checking"
default n
---help---
Compiles in additional checks to sanity-check the drivers and any
use of it by other code. Not recommended for performance.
config FSL_DPA_CAN_WAIT
bool
default y
config FSL_DPA_CAN_WAIT_SYNC
bool
default y
config FSL_DPA_PIRQ_FAST
bool
default y
config FSL_DPA_PIRQ_SLOW
bool
default y
config FSL_DPA_PORTAL_SHARE
bool
default y
config FSL_SDK_BMAN
bool "Freescale Buffer Manager (BMan) support"
default y
if FSL_SDK_BMAN
config FSL_BMAN_CONFIG
bool "BMan device management"
default y
---help---
If this linux image is running natively, you need this option. If this
linux image is running as a guest OS under the hypervisor, only one
guest OS ("the control plane") needs this option.
config FSL_BMAN_TEST
tristate "BMan self-tests"
default n
---help---
This option compiles self-test code for BMan.
config FSL_BMAN_TEST_HIGH
bool "BMan high-level self-test"
depends on FSL_BMAN_TEST
default y
---help---
This requires the presence of cpu-affine portals, and performs
high-level API testing with them (whichever portal(s) are affine to
the cpu(s) the test executes on).
config FSL_BMAN_TEST_THRESH
bool "BMan threshold test"
depends on FSL_BMAN_TEST
default y
---help---
Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
before multiple threads (one per cpu) create pool objects to track
depletion state changes. The pool is then drained to empty by a
"drainer" thread, and the other threads that they observe exactly
the depletion state changes that are expected.
config FSL_BMAN_DEBUGFS
tristate "BMan debugfs interface"
depends on DEBUG_FS
default y
---help---
This option compiles debugfs code for BMan.
endif # FSL_SDK_BMAN
config FSL_SDK_QMAN
bool "Freescale Queue Manager (QMan) support"
default y
if FSL_SDK_QMAN
config FSL_QMAN_POLL_LIMIT
int
default 32
config FSL_QMAN_CONFIG
bool "QMan device management"
default y
---help---
If this linux image is running natively, you need this option. If this
linux image is running as a guest OS under the hypervisor, only one
guest OS ("the control plane") needs this option.
config FSL_QMAN_TEST
tristate "QMan self-tests"
default n
---help---
This option compiles self-test code for QMan.
config FSL_QMAN_TEST_STASH_POTATO
bool "QMan 'hot potato' data-stashing self-test"
depends on FSL_QMAN_TEST
default y
---help---
This performs a "hot potato" style test enqueuing/dequeuing a frame
across a series of FQs scheduled to different portals (and cpus), with
DQRR, data and context stashing always on.
config FSL_QMAN_TEST_HIGH
bool "QMan high-level self-test"
depends on FSL_QMAN_TEST
default y
---help---
This requires the presence of cpu-affine portals, and performs
high-level API testing with them (whichever portal(s) are affine to
the cpu(s) the test executes on).
config FSL_QMAN_DEBUGFS
tristate "QMan debugfs interface"
depends on DEBUG_FS
default y
---help---
This option compiles debugfs code for QMan.
# H/w settings that can be hard-coded for now.
config FSL_QMAN_FQD_SZ
int "size of Frame Queue Descriptor region"
default 10
---help---
This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
ex: 10 => PAGE_SIZE * (2^10)
Note: Default device-trees now require minimum Kconfig setting of 10.
config FSL_QMAN_PFDR_SZ
int "size of the PFDR pool"
default 13
---help---
This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
ex: 13 => PAGE_SIZE * (2^13)
# Corenet initiator settings. Stash request queues are 4-deep to match cores'
# ability to snart. Stash priority is 3, other priorities are 2.
config FSL_QMAN_CI_SCHED_CFG_SRCCIV
int
depends on FSL_QMAN_CONFIG
default 4
config FSL_QMAN_CI_SCHED_CFG_SRQ_W
int
depends on FSL_QMAN_CONFIG
default 3
config FSL_QMAN_CI_SCHED_CFG_RW_W
int
depends on FSL_QMAN_CONFIG
default 2
config FSL_QMAN_CI_SCHED_CFG_BMAN_W
int
depends on FSL_QMAN_CONFIG
default 2
# portal interrupt settings
config FSL_QMAN_PIRQ_DQRR_ITHRESH
int
default 12
config FSL_QMAN_PIRQ_MR_ITHRESH
int
default 4
config FSL_QMAN_PIRQ_IPERIOD
int
default 100
# 64 bit kernel support
config FSL_QMAN_FQ_LOOKUP
bool
default n
config QMAN_CEETM_UPDATE_PERIOD
int "Token update period for shaping, in nanoseconds"
default 1000
---help---
Traffic shaping works by performing token calculations (using
credits) on shaper instances periodically. This update period
sets the granularity for how often those token rate credit
updates are performed, and thus determines the accuracy and
range of traffic rates that can be configured by users. The
reference manual recommends a 1 microsecond period as providing
a good balance between granularity and range.
Unless you know what you are doing, leave this value at its default.
config FSL_QMAN_INIT_TIMEOUT
int "timeout for qman init stage, in seconds"
default 10
---help---
The timeout setting to quit the initialization loop for non-control
partition in case the control partition fails to boot-up.
endif # FSL_SDK_QMAN
config FSL_USDPAA
bool "Freescale USDPAA process driver"
depends on FSL_SDK_DPA
default y
---help---
This driver provides user-space access to kernel-managed
resource interfaces for USDPAA applications, on the assumption
that each process will open this device once. Specifically, this
device exposes functionality that would be awkward if exposed
via the portal devices - ie. this device exposes functionality
that is inherently process-wide rather than portal-specific.
This device is necessary for obtaining access to DMA memory and
for allocation of Qman and Bman resources. In short, if you wish
to use USDPAA applications, you need this.
If unsure, say Y.
endmenu

View File

@ -0,0 +1,28 @@
subdir-ccflags-y := -Werror
# Common
obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o
obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o
# Bman
obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o
obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
bman_tester-y = bman_test.o
bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
bman_debugfs_interface-y = bman_debugfs.o
# Qman
obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o
obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
qman_tester-y = qman_test.o
qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
qman_debugfs_interface-y = qman_debugfs.o
# USDPAA
obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o

View File

@ -0,0 +1,720 @@
/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/cacheflush.h>
#include "bman_private.h"
#include <linux/of_reserved_mem.h>
/* Last updated for v00.79 of the BG */
struct bman;
/* Register offsets */
#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
#define REG_FBPR_FPC 0x0800
#define REG_STATE_IDLE 0x960
#define REG_STATE_STOP 0x964
#define REG_ECSR 0x0a00
#define REG_ECIR 0x0a04
#define REG_EADR 0x0a08
#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
#define REG_IP_REV_1 0x0bf8
#define REG_IP_REV_2 0x0bfc
#define REG_FBPR_BARE 0x0c00
#define REG_FBPR_BAR 0x0c04
#define REG_FBPR_AR 0x0c10
#define REG_SRCIDR 0x0d04
#define REG_LIODNR 0x0d08
#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
/* Used by all error interrupt registers except 'inhibit' */
#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
/* BMAN_ECIR valid error bit */
#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
union bman_ecir {
u32 ecir_raw;
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u32 __reserved1:4;
u32 portal_num:4;
u32 __reserved2:12;
u32 numb:4;
u32 __reserved3:2;
u32 pid:6;
#else
u32 pid:6;
u32 __reserved3:2;
u32 numb:4;
u32 __reserved2:12;
u32 portal_num:4;
u32 __reserved1:4;
#endif
} __packed info;
};
union bman_eadr {
u32 eadr_raw;
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u32 __reserved1:5;
u32 memid:3;
u32 __reserved2:14;
u32 eadr:10;
#else
u32 eadr:10;
u32 __reserved2:14;
u32 memid:3;
u32 __reserved1:5;
#endif
} __packed info;
};
struct bman_hwerr_txt {
u32 mask;
const char *txt;
};
#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
static const struct bman_hwerr_txt bman_hwerr_txts[] = {
BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
};
#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
struct bman_error_info_mdata {
u16 addr_mask;
u16 bits;
const char *txt;
};
#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
static const struct bman_error_info_mdata error_mdata[] = {
BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
};
#define BMAN_ERR_MDATA_COUNT \
(sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
/* Add this in Kconfig */
#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
/**
* bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
* @v: for accessors that write values, this is the 32-bit value
*
* Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
* manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
* the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
* "write the enable register" rather than "enable the write register"!
*/
#define bm_err_isr_status_read(bm) \
__bm_err_isr_read(bm, bm_isr_status)
#define bm_err_isr_status_clear(bm, m) \
__bm_err_isr_write(bm, bm_isr_status, m)
#define bm_err_isr_enable_read(bm) \
__bm_err_isr_read(bm, bm_isr_enable)
#define bm_err_isr_enable_write(bm, v) \
__bm_err_isr_write(bm, bm_isr_enable, v)
#define bm_err_isr_disable_read(bm) \
__bm_err_isr_read(bm, bm_isr_disable)
#define bm_err_isr_disable_write(bm, v) \
__bm_err_isr_write(bm, bm_isr_disable, v)
#define bm_err_isr_inhibit(bm) \
__bm_err_isr_write(bm, bm_isr_inhibit, 1)
#define bm_err_isr_uninhibit(bm) \
__bm_err_isr_write(bm, bm_isr_inhibit, 0)
/*
* TODO: unimplemented registers
*
* BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
* BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
*/
/* Encapsulate "struct bman *" as a cast of the register space address. */
static struct bman *bm_create(void *regs)
{
return (struct bman *)regs;
}
static inline u32 __bm_in(struct bman *bm, u32 offset)
{
return in_be32((void *)bm + offset);
}
static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
{
out_be32((void *)bm + offset, val);
}
#define bm_in(reg) __bm_in(bm, REG_##reg)
#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
{
return __bm_in(bm, REG_ERR_ISR + (n << 2));
}
static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
{
__bm_out(bm, REG_ERR_ISR + (n << 2), val);
}
static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
{
u32 v = bm_in(IP_REV_1);
*id = (v >> 16);
*major = (v >> 8) & 0xff;
*minor = v & 0xff;
}
static u32 __generate_thresh(u32 val, int roundup)
{
u32 e = 0; /* co-efficient, exponent */
int oddbit = 0;
while (val > 0xff) {
oddbit = val & 1;
val >>= 1;
e++;
if (roundup && oddbit)
val++;
}
DPA_ASSERT(e < 0x10);
return val | (e << 8);
}
static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
u32 hwdet, u32 hwdxt)
{
DPA_ASSERT(pool < bman_pool_max);
bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
}
static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
{
u32 exp = ilog2(size);
/* choke if size isn't within range */
DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
is_power_of_2(size));
/* choke if '[e]ba' has lower-alignment than 'size' */
DPA_ASSERT(!(ba & (size - 1)));
bm_out(FBPR_BARE, upper_32_bits(ba));
bm_out(FBPR_BAR, lower_32_bits(ba));
bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
}
/*****************/
/* Config driver */
/*****************/
/* TODO: Kconfig these? */
#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
/* We support only one of these. */
static struct bman *bm;
static struct device_node *bm_node;
/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
* during bman_init_ccsr(). */
static dma_addr_t fbpr_a;
static size_t fbpr_sz = DEFAULT_FBPR_SZ;
static int bman_fbpr(struct reserved_mem *rmem)
{
fbpr_a = rmem->base;
fbpr_sz = rmem->size;
WARN_ON(!(fbpr_a && fbpr_sz));
return 0;
}
RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
static int __init fsl_bman_init(struct device_node *node)
{
struct resource res;
u32 __iomem *regs;
const char *s;
int ret, standby = 0;
u16 id;
u8 major, minor;
ret = of_address_to_resource(node, 0, &res);
if (ret) {
pr_err("Can't get %s property 'reg'\n",
node->full_name);
return ret;
}
s = of_get_property(node, "fsl,hv-claimable", &ret);
if (s && !strcmp(s, "standby"))
standby = 1;
/* Global configuration */
regs = ioremap(res.start, res.end - res.start + 1);
bm = bm_create(regs);
BUG_ON(!bm);
bm_node = node;
bm_get_version(bm, &id, &major, &minor);
pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
if ((major == 1) && (minor == 0)) {
bman_ip_rev = BMAN_REV10;
bman_pool_max = 64;
} else if ((major == 2) && (minor == 0)) {
bman_ip_rev = BMAN_REV20;
bman_pool_max = 8;
} else if ((major == 2) && (minor == 1)) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
} else {
pr_warn("unknown Bman version, default to rev1.0\n");
}
if (standby) {
pr_info(" -> in standby mode\n");
return 0;
}
return 0;
}
int bman_have_ccsr(void)
{
return bm ? 1 : 0;
}
int bm_pool_set(u32 bpid, const u32 *thresholds)
{
if (!bm)
return -ENODEV;
bm_set_pool(bm, bpid, thresholds[0],
thresholds[1], thresholds[2],
thresholds[3]);
return 0;
}
EXPORT_SYMBOL(bm_pool_set);
__init int bman_init_early(void)
{
struct device_node *dn;
int ret;
for_each_compatible_node(dn, NULL, "fsl,bman") {
if (bm)
pr_err("%s: only one 'fsl,bman' allowed\n",
dn->full_name);
else {
if (!of_device_is_available(dn))
continue;
ret = fsl_bman_init(dn);
BUG_ON(ret);
}
}
return 0;
}
postcore_initcall_sync(bman_init_early);
static void log_edata_bits(u32 bit_count)
{
u32 i, j, mask = 0xffffffff;
pr_warn("Bman ErrInt, EDATA:\n");
i = bit_count/32;
if (bit_count%32) {
i++;
mask = ~(mask << bit_count%32);
}
j = 16-i;
pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
j++;
for (; j < 16; j++)
pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
}
static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
{
union bman_ecir ecir_val;
union bman_eadr eadr_val;
ecir_val.ecir_raw = bm_in(ECIR);
/* Is portal info valid */
if (ecsr_val & PORTAL_ECSR_ERR) {
pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
ecir_val.info.portal_num, ecir_val.info.numb,
ecir_val.info.pid);
}
if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
eadr_val.eadr_raw = bm_in(EADR);
pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
error_mdata[eadr_val.info.memid].txt,
error_mdata[eadr_val.info.memid].addr_mask
& eadr_val.info.eadr);
log_edata_bits(error_mdata[eadr_val.info.memid].bits);
}
}
/* Bman interrupt handler */
static irqreturn_t bman_isr(int irq, void *ptr)
{
u32 isr_val, ier_val, ecsr_val, isr_mask, i;
ier_val = bm_err_isr_enable_read(bm);
isr_val = bm_err_isr_status_read(bm);
ecsr_val = bm_in(ECSR);
isr_mask = isr_val & ier_val;
if (!isr_mask)
return IRQ_NONE;
for (i = 0; i < BMAN_HWE_COUNT; i++) {
if (bman_hwerr_txts[i].mask & isr_mask) {
pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
if (bman_hwerr_txts[i].mask & ecsr_val) {
log_additional_error_info(isr_mask, ecsr_val);
/* Re-arm error capture registers */
bm_out(ECSR, ecsr_val);
}
if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
pr_devel("Bman un-enabling error 0x%x\n",
bman_hwerr_txts[i].mask);
ier_val &= ~bman_hwerr_txts[i].mask;
bm_err_isr_enable_write(bm, ier_val);
}
}
}
bm_err_isr_status_clear(bm, isr_val);
return IRQ_HANDLED;
}
static int __bind_irq(void)
{
int ret, err_irq;
err_irq = of_irq_to_resource(bm_node, 0, NULL);
if (err_irq == 0) {
pr_info("Can't get %s property '%s'\n", bm_node->full_name,
"interrupts");
return -ENODEV;
}
ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
if (ret) {
pr_err("request_irq() failed %d for '%s'\n", ret,
bm_node->full_name);
return -ENODEV;
}
/* Disable Buffer Pool State Change */
bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
/* Write-to-clear any stale bits, (eg. starvation being asserted prior
* to resource allocation during driver init). */
bm_err_isr_status_clear(bm, 0xffffffff);
/* Enable Error Interrupts */
bm_err_isr_enable_write(bm, 0xffffffff);
return 0;
}
int bman_init_ccsr(struct device_node *node)
{
int ret;
if (!bman_have_ccsr())
return 0;
if (node != bm_node)
return -EINVAL;
/* FBPR memory */
bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
ret = __bind_irq();
if (ret)
return ret;
return 0;
}
u32 bm_pool_free_buffers(u32 bpid)
{
return bm_in(POOL_CONTENT(bpid));
}
#ifdef CONFIG_SYSFS
#define DRV_NAME "fsl-bman"
#define SBEC_MAX_ID 1
#define SBEC_MIN_ID 0
static ssize_t show_fbpr_fpc(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
};
static ssize_t show_pool_count(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
u32 data;
int i;
if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
return -EINVAL;
data = bm_in(POOL_CONTENT(i));
return snprintf(buf, PAGE_SIZE, "%d\n", data);
};
static ssize_t show_err_isr(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
};
static ssize_t show_sbec(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
int i;
if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
return -EINVAL;
if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
return -EINVAL;
return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
};
static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
/* Didn't use DEVICE_ATTR as 64 of this would be required.
* Initialize them when needed. */
static char *name_attrs_pool_count; /* "xx" + null-terminator */
static struct device_attribute *dev_attr_buffer_pool_count;
static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
static struct attribute *bman_dev_attributes[] = {
&dev_attr_fbpr_fpc.attr,
&dev_attr_err_isr.attr,
NULL
};
static struct attribute *bman_dev_ecr_attributes[] = {
&dev_attr_sbec_0.attr,
&dev_attr_sbec_1.attr,
NULL
};
static struct attribute **bman_dev_pool_count_attributes;
/* root level */
static const struct attribute_group bman_dev_attr_grp = {
.name = NULL,
.attrs = bman_dev_attributes
};
static const struct attribute_group bman_dev_ecr_grp = {
.name = "error_capture",
.attrs = bman_dev_ecr_attributes
};
static struct attribute_group bman_dev_pool_countent_grp = {
.name = "pool_count",
};
static int of_fsl_bman_remove(struct platform_device *ofdev)
{
sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
return 0;
};
static int of_fsl_bman_probe(struct platform_device *ofdev)
{
int ret, i;
ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
if (ret)
goto done;
ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
if (ret)
goto del_group_0;
name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
GFP_KERNEL);
if (!name_attrs_pool_count) {
pr_err("Can't alloc name_attrs_pool_count\n");
goto del_group_1;
}
dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
bman_pool_max, GFP_KERNEL);
if (!dev_attr_buffer_pool_count) {
pr_err("Can't alloc dev_attr-buffer_pool_count\n");
goto del_group_2;
}
bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
(bman_pool_max + 1), GFP_KERNEL);
if (!bman_dev_pool_count_attributes) {
pr_err("can't alloc bman_dev_pool_count_attributes\n");
goto del_group_3;
}
for (i = 0; i < bman_pool_max; i++) {
ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
if (!ret)
goto del_group_4;
dev_attr_buffer_pool_count[i].attr.name =
(name_attrs_pool_count + i * 3);
dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
dev_attr_buffer_pool_count[i].show = show_pool_count;
bman_dev_pool_count_attributes[i] =
&dev_attr_buffer_pool_count[i].attr;
sysfs_attr_init(bman_dev_pool_count_attributes[i]);
}
bman_dev_pool_count_attributes[bman_pool_max] = NULL;
bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
if (ret)
goto del_group_4;
goto done;
del_group_4:
kfree(bman_dev_pool_count_attributes);
del_group_3:
kfree(dev_attr_buffer_pool_count);
del_group_2:
kfree(name_attrs_pool_count);
del_group_1:
sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
del_group_0:
sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
done:
if (ret)
dev_err(&ofdev->dev,
"Cannot create dev attributes ret=%d\n", ret);
return ret;
};
static struct of_device_id of_fsl_bman_ids[] = {
{
.compatible = "fsl,bman",
},
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
#ifdef CONFIG_SUSPEND
static u32 saved_isdr;
static int bman_pm_suspend_noirq(struct device *dev)
{
uint32_t idle_state;
suspend_unused_bportal();
/* save isdr, disable all, clear isr */
saved_isdr = bm_err_isr_disable_read(bm);
bm_err_isr_disable_write(bm, 0xffffffff);
bm_err_isr_status_clear(bm, 0xffffffff);
if (bman_ip_rev < BMAN_REV21) {
#ifdef CONFIG_PM_DEBUG
pr_info("Bman version doesn't have STATE_IDLE\n");
#endif
return 0;
}
idle_state = bm_in(STATE_IDLE);
if (!(idle_state & 0x1)) {
pr_err("Bman not idle 0x%x aborting\n", idle_state);
bm_err_isr_disable_write(bm, saved_isdr);
resume_unused_bportal();
return -EBUSY;
}
#ifdef CONFIG_PM_DEBUG
pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
#endif
return 0;
}
static int bman_pm_resume_noirq(struct device *dev)
{
/* restore isdr */
bm_err_isr_disable_write(bm, saved_isdr);
resume_unused_bportal();
return 0;
}
#else
#define bman_pm_suspend_noirq NULL
#define bman_pm_resume_noirq NULL
#endif
static const struct dev_pm_ops bman_pm_ops = {
.suspend_noirq = bman_pm_suspend_noirq,
.resume_noirq = bman_pm_resume_noirq,
};
static struct platform_driver of_fsl_bman_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = of_fsl_bman_ids,
.pm = &bman_pm_ops,
},
.probe = of_fsl_bman_probe,
.remove = of_fsl_bman_remove,
};
static int bman_ctrl_init(void)
{
return platform_driver_register(&of_fsl_bman_driver);
}
static void bman_ctrl_exit(void)
{
platform_driver_unregister(&of_fsl_bman_driver);
}
module_init(bman_ctrl_init);
module_exit(bman_ctrl_exit);
#endif /* CONFIG_SYSFS */

View File

@ -0,0 +1,119 @@
/* Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/fsl_bman.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
static struct dentry *dfs_root; /* debugfs root directory */
/*******************************************************************************
* Query Buffer Pool State
******************************************************************************/
static int query_bp_state_show(struct seq_file *file, void *offset)
{
int ret;
struct bm_pool_state state;
int i, j;
u32 mask;
memset(&state, 0, sizeof(struct bm_pool_state));
ret = bman_query_pools(&state);
if (ret) {
seq_printf(file, "Error %d\n", ret);
return 0;
}
seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
for (i = 0; i < 2; i++) {
mask = 0x80000000;
for (j = 0; j < 32; j++) {
seq_printf(file,
" %-2u %-3s %-3s\n",
(i*32)+j,
(state.as.state.__state[i] & mask) ? "no" : "yes",
(state.ds.state.__state[i] & mask) ? "yes" : "no");
mask >>= 1;
}
}
return 0;
}
static int query_bp_state_open(struct inode *inode, struct file *file)
{
return single_open(file, query_bp_state_show, NULL);
}
static const struct file_operations query_bp_state_fops = {
.owner = THIS_MODULE,
.open = query_bp_state_open,
.read = seq_read,
.release = single_release,
};
static int __init bman_debugfs_module_init(void)
{
int ret = 0;
struct dentry *d;
dfs_root = debugfs_create_dir("bman", NULL);
if (dfs_root == NULL) {
ret = -ENOMEM;
pr_err("Cannot create bman debugfs dir\n");
goto _return;
}
d = debugfs_create_file("query_bp_state",
S_IRUGO,
dfs_root,
NULL,
&query_bp_state_fops);
if (d == NULL) {
ret = -ENOMEM;
pr_err("Cannot create query_bp_state\n");
goto _return;
}
return 0;
_return:
debugfs_remove_recursive(dfs_root);
return ret;
}
static void __exit bman_debugfs_module_exit(void)
{
debugfs_remove_recursive(dfs_root);
}
module_init(bman_debugfs_module_init);
module_exit(bman_debugfs_module_exit);
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,559 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman_low.h"
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpu.h>
#endif
/*
* Global variables of the max portal/pool number this bman version supported
*/
u16 bman_ip_rev;
EXPORT_SYMBOL(bman_ip_rev);
u16 bman_pool_max;
EXPORT_SYMBOL(bman_pool_max);
static u16 bman_portal_max;
/* After initialising cpus that own shared portal configs, we cache the
* resulting portals (ie. not just the configs) in this array. Then we
* initialise slave cpus that don't have their own portals, redirecting them to
* portals from this cache in a round-robin assignment. */
static struct bman_portal *shared_portals[NR_CPUS];
static int num_shared_portals;
static int shared_portals_idx;
static LIST_HEAD(unused_pcfgs);
static DEFINE_SPINLOCK(unused_pcfgs_lock);
static void *affine_bportals[NR_CPUS];
static int __init fsl_bpool_init(struct device_node *node)
{
int ret;
u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
if (!bpid || (ret != 4)) {
pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
return -ENODEV;
}
thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
if (thresh) {
if (ret != 16) {
pr_err("Invalid %s property '%s'\n",
node->full_name, "fsl,bpool-thresholds");
return -ENODEV;
}
}
if (thresh) {
#ifdef CONFIG_FSL_BMAN_CONFIG
ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
if (ret)
pr_err("No CCSR node for %s property '%s'\n",
node->full_name, "fsl,bpool-thresholds");
return ret;
#else
pr_err("Ignoring %s property '%s', no CCSR support\n",
node->full_name, "fsl,bpool-thresholds");
#endif
}
return 0;
}
static int __init fsl_bpid_range_init(struct device_node *node)
{
int ret;
u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
if (!range) {
pr_err("No 'fsl,bpid-range' property in node %s\n",
node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
node->full_name);
return -EINVAL;
}
bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
pr_info("Bman: BPID allocator includes range %d:%d\n",
be32_to_cpu(range[0]), be32_to_cpu(range[1]));
return 0;
}
static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
{
struct bm_portal_config *pcfg;
const u32 *index;
int irq, ret;
resource_size_t len;
pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) {
pr_err("can't allocate portal config");
return NULL;
}
if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
bman_ip_rev = BMAN_REV10;
bman_pool_max = 64;
bman_portal_max = 10;
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
bman_ip_rev = BMAN_REV20;
bman_pool_max = 8;
bman_portal_max = 3;
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
bman_portal_max = 50;
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
bman_portal_max = 25;
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
bman_portal_max = 18;
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
bman_portal_max = 10;
} else {
pr_warn("unknown BMan version in portal node,"
"default to rev1.0\n");
bman_ip_rev = BMAN_REV10;
bman_pool_max = 64;
bman_portal_max = 10;
}
ret = of_address_to_resource(node, DPA_PORTAL_CE,
&pcfg->addr_phys[DPA_PORTAL_CE]);
if (ret) {
pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
goto err;
}
ret = of_address_to_resource(node, DPA_PORTAL_CI,
&pcfg->addr_phys[DPA_PORTAL_CI]);
if (ret) {
pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
goto err;
}
index = of_get_property(node, "cell-index", &ret);
if (!index || (ret != 4)) {
pr_err("Can't get %s property '%s'\n", node->full_name,
"cell-index");
goto err;
}
if (be32_to_cpu(*index) >= bman_portal_max) {
pr_err("BMan portal cell index %d out of range, max %d\n",
be32_to_cpu(*index), bman_portal_max);
goto err;
}
pcfg->public_cfg.cpu = -1;
irq = irq_of_parse_and_map(node, 0);
if (irq == 0) {
pr_err("Can't get %s property 'interrupts'\n", node->full_name);
goto err;
}
pcfg->public_cfg.irq = irq;
pcfg->public_cfg.index = be32_to_cpu(*index);
bman_depletion_fill(&pcfg->public_cfg.mask);
len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
if (len != (unsigned long)len)
goto err;
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
pcfg->addr_phys[DPA_PORTAL_CE].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
pcfg->addr_phys[DPA_PORTAL_CI].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
#else
pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
pcfg->addr_phys[DPA_PORTAL_CE].start,
(unsigned long)len,
0);
pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
pcfg->addr_phys[DPA_PORTAL_CI].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
#endif
/* disable bp depletion */
__raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
__raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
return pcfg;
err:
kfree(pcfg);
return NULL;
}
static struct bm_portal_config *get_pcfg(struct list_head *list)
{
struct bm_portal_config *pcfg;
if (list_empty(list))
return NULL;
pcfg = list_entry(list->prev, struct bm_portal_config, list);
list_del(&pcfg->list);
return pcfg;
}
static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
uint32_t idx)
{
struct bm_portal_config *pcfg;
if (list_empty(list))
return NULL;
list_for_each_entry(pcfg, list, list) {
if (pcfg->public_cfg.index == idx) {
list_del(&pcfg->list);
return pcfg;
}
}
return NULL;
}
struct bm_portal_config *bm_get_unused_portal(void)
{
return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
}
struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
{
struct bm_portal_config *ret;
spin_lock(&unused_pcfgs_lock);
if (idx == QBMAN_ANY_PORTAL_IDX)
ret = get_pcfg(&unused_pcfgs);
else
ret = get_pcfg_idx(&unused_pcfgs, idx);
spin_unlock(&unused_pcfgs_lock);
return ret;
}
void bm_put_unused_portal(struct bm_portal_config *pcfg)
{
spin_lock(&unused_pcfgs_lock);
list_add(&pcfg->list, &unused_pcfgs);
spin_unlock(&unused_pcfgs_lock);
}
static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
{
struct bman_portal *p;
p = bman_create_affine_portal(pcfg);
if (p) {
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
#endif
pr_info("Bman portal %sinitialised, cpu %d\n",
pcfg->public_cfg.is_shared ? "(shared) " : "",
pcfg->public_cfg.cpu);
affine_bportals[pcfg->public_cfg.cpu] = p;
} else
pr_crit("Bman portal failure on cpu %d\n",
pcfg->public_cfg.cpu);
return p;
}
static void init_slave(int cpu)
{
struct bman_portal *p;
p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
if (!p)
pr_err("Bman slave portal failure on cpu %d\n", cpu);
else
pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
if (shared_portals_idx >= num_shared_portals)
shared_portals_idx = 0;
affine_bportals[cpu] = p;
}
/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
* parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
* and/or ranges of indexes, with each being optionally prefixed by "s" to
* explicitly mark it or them for sharing.
* Eg;
* bportals=s0,1-3,s4
* means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
* portals, and any remaining cpus share the portals that are assigned to cpus 0
* or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
* cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
* 0's portal.) */
static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
static struct cpumask want_shared __initdata; /* cpus requested with "s" */
static int __init parse_bportals(char *str)
{
return parse_portals_bootarg(str, &want_shared, &want_unshared,
"bportals");
}
__setup("bportals=", parse_bportals);
static int bman_offline_cpu(unsigned int cpu)
{
struct bman_portal *p;
const struct bm_portal_config *pcfg;
p = (struct bman_portal *)affine_bportals[cpu];
if (p) {
pcfg = bman_get_bm_portal_config(p);
if (pcfg)
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
}
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static int bman_online_cpu(unsigned int cpu)
{
struct bman_portal *p;
const struct bm_portal_config *pcfg;
p = (struct bman_portal *)affine_bportals[cpu];
if (p) {
pcfg = bman_get_bm_portal_config(p);
if (pcfg)
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
}
return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
/* Initialise the Bman driver. The meat of this function deals with portals. The
* following describes the flow of portal-handling, the code "steps" refer to
* this description;
* 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
* ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
* bound).
* 2. The "want_shared" and "want_unshared" lists (as filled by the
* "bportals=[...]" bootarg) are processed, allocating portals and assigning
* them to cpus, placing them in the relevant list and setting ::cpu as
* appropriate. If no "bportals" bootarg was present, the defaut is to try to
* assign portals to all online cpus at the time of driver initialisation.
* Any failure to allocate portals (when parsing the "want" lists or when
* using default behaviour) will be silently tolerated (the "fixup" logic in
* step 3 will determine what happens in this case).
* 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
* sharing and sharing is required (because not all cpus have been assigned
* portals), then one portal will marked for sharing. Conversely if no
* sharing is required, any portals marked for sharing will not be shared. It
* may be that sharing occurs when it wasn't expected, if portal allocation
* failed to honour all the requested assignments (including the default
* assignments if no bootarg is present).
* 4. Unshared portals are initialised on their respective cpus.
* 5. Shared portals are initialised on their respective cpus.
* 6. Each remaining cpu is initialised to slave to one of the shared portals,
* which are selected in a round-robin fashion.
* Any portal configs left unused are available for USDPAA allocation.
*/
__init int bman_init(void)
{
struct cpumask slave_cpus;
struct cpumask unshared_cpus = *cpu_none_mask;
struct cpumask shared_cpus = *cpu_none_mask;
LIST_HEAD(unshared_pcfgs);
LIST_HEAD(shared_pcfgs);
struct device_node *dn;
struct bm_portal_config *pcfg;
struct bman_portal *p;
int cpu, ret;
struct cpumask offline_cpus;
/* Initialise the Bman (CCSR) device */
for_each_compatible_node(dn, NULL, "fsl,bman") {
if (!bman_init_ccsr(dn))
pr_info("Bman err interrupt handler present\n");
else
pr_err("Bman CCSR setup failed\n");
}
/* Initialise any declared buffer pools */
for_each_compatible_node(dn, NULL, "fsl,bpool") {
ret = fsl_bpool_init(dn);
if (ret)
return ret;
}
/* Step 1. See comments at the beginning of the file. */
for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
if (!of_device_is_available(dn))
continue;
pcfg = parse_pcfg(dn);
if (pcfg)
list_add_tail(&pcfg->list, &unused_pcfgs);
}
/* Step 2. */
for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, &want_shared)) {
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &shared_pcfgs);
cpumask_set_cpu(cpu, &shared_cpus);
}
if (cpumask_test_cpu(cpu, &want_unshared)) {
if (cpumask_test_cpu(cpu, &shared_cpus))
continue;
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &unshared_pcfgs);
cpumask_set_cpu(cpu, &unshared_cpus);
}
}
if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
/* Default, give an unshared portal to each online cpu */
for_each_online_cpu(cpu) {
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &unshared_pcfgs);
cpumask_set_cpu(cpu, &unshared_cpus);
}
}
/* Step 3. */
cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
if (cpumask_empty(&slave_cpus)) {
/* No sharing required */
if (!list_empty(&shared_pcfgs)) {
/* Migrate "shared" to "unshared" */
cpumask_or(&unshared_cpus, &unshared_cpus,
&shared_cpus);
cpumask_clear(&shared_cpus);
list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
INIT_LIST_HEAD(&shared_pcfgs);
}
} else {
/* Sharing required */
if (list_empty(&shared_pcfgs)) {
/* Migrate one "unshared" to "shared" */
pcfg = get_pcfg(&unshared_pcfgs);
if (!pcfg) {
pr_crit("No BMan portals available!\n");
return 0;
}
cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
list_add_tail(&pcfg->list, &shared_pcfgs);
}
}
/* Step 4. */
list_for_each_entry(pcfg, &unshared_pcfgs, list) {
pcfg->public_cfg.is_shared = 0;
p = init_pcfg(pcfg);
if (!p) {
pr_crit("Unable to initialize bman portal\n");
return 0;
}
}
/* Step 5. */
list_for_each_entry(pcfg, &shared_pcfgs, list) {
pcfg->public_cfg.is_shared = 1;
p = init_pcfg(pcfg);
if (p)
shared_portals[num_shared_portals++] = p;
}
/* Step 6. */
if (!cpumask_empty(&slave_cpus))
for_each_cpu(cpu, &slave_cpus)
init_slave(cpu);
pr_info("Bman portals initialised\n");
cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
for_each_cpu(cpu, &offline_cpus)
bman_offline_cpu(cpu);
#ifdef CONFIG_HOTPLUG_CPU
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"soc/qbman_portal:online",
bman_online_cpu, bman_offline_cpu);
if (ret < 0) {
pr_err("bman: failed to register hotplug callbacks.\n");
return 0;
}
#endif
return 0;
}
__init int bman_resource_init(void)
{
struct device_node *dn;
int ret;
/* Initialise BPID allocation ranges */
for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
ret = fsl_bpid_range_init(dn);
if (ret)
return ret;
}
return 0;
}
#ifdef CONFIG_SUSPEND
void suspend_unused_bportal(void)
{
struct bm_portal_config *pcfg;
if (list_empty(&unused_pcfgs))
return;
list_for_each_entry(pcfg, &unused_pcfgs, list) {
#ifdef CONFIG_PM_DEBUG
pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
#endif
/* save isdr, disable all via isdr, clear isr */
pcfg->saved_isdr =
__raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
__raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
0xe08);
__raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
0xe00);
}
return;
}
void resume_unused_bportal(void)
{
struct bm_portal_config *pcfg;
if (list_empty(&unused_pcfgs))
return;
list_for_each_entry(pcfg, &unused_pcfgs, list) {
#ifdef CONFIG_PM_DEBUG
pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
#endif
/* restore isdr */
__raw_writel(pcfg->saved_isdr,
pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
}
return;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,565 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman_private.h"
/***************************/
/* Portal register assists */
/***************************/
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
/* Cache-inhibited register offsets */
#define BM_REG_RCR_PI_CINH 0x0000
#define BM_REG_RCR_CI_CINH 0x0004
#define BM_REG_RCR_ITR 0x0008
#define BM_REG_CFG 0x0100
#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
#define BM_REG_ISR 0x0e00
#define BM_REG_IIR 0x0e0c
/* Cache-enabled register offsets */
#define BM_CL_CR 0x0000
#define BM_CL_RR0 0x0100
#define BM_CL_RR1 0x0140
#define BM_CL_RCR 0x1000
#define BM_CL_RCR_PI_CENA 0x3000
#define BM_CL_RCR_CI_CENA 0x3100
#endif
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/* Cache-inhibited register offsets */
#define BM_REG_RCR_PI_CINH 0x3000
#define BM_REG_RCR_CI_CINH 0x3100
#define BM_REG_RCR_ITR 0x3200
#define BM_REG_CFG 0x3300
#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
#define BM_REG_ISR 0x3e00
#define BM_REG_IIR 0x3ec0
/* Cache-enabled register offsets */
#define BM_CL_CR 0x0000
#define BM_CL_RR0 0x0100
#define BM_CL_RR1 0x0140
#define BM_CL_RCR 0x1000
#define BM_CL_RCR_PI_CENA 0x3000
#define BM_CL_RCR_CI_CENA 0x3100
#endif
/* BTW, the drivers (and h/w programming model) already obtain the required
* synchronisation for portal accesses via lwsync(), hwsync(), and
* data-dependencies. Use of barrier()s or other order-preserving primitives
* simply degrade performance. Hence the use of the __raw_*() interfaces, which
* simply ensure that the compiler treats the portal registers as volatile (ie.
* non-coherent). */
/* Cache-inhibited register access. */
#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
(bm)->addr_ci + (o));
#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
/* Cache-enabled (index) register access */
#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
#define __bm_cl_out(bm, o, val) \
do { \
u32 *__tmpclout = (bm)->addr_ce + (o); \
__raw_writel(cpu_to_be32(val), __tmpclout); \
dcbf(__tmpclout); \
} while (0)
#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
#define bm_cl_invalidate(reg)\
__bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
* analysis, look at using the "extra" bit in the ring index registers to avoid
* cyclic issues. */
static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
{
/* 'first' is included, 'last' is excluded */
if (first <= last)
return last - first;
return ringsize + last - first;
}
/* Portal modes.
* Enum types;
* pmode == production mode
* cmode == consumption mode,
* Enum values use 3 letter codes. First letter matches the portal mode,
* remaining two letters indicate;
* ci == cache-inhibited portal register
* ce == cache-enabled portal register
* vb == in-band valid-bit (cache-enabled)
*/
enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
bm_rcr_pci = 0, /* PI index, cache-inhibited */
bm_rcr_pce = 1, /* PI index, cache-enabled */
bm_rcr_pvb = 2 /* valid-bit */
};
enum bm_rcr_cmode { /* s/w-only */
bm_rcr_cci, /* CI index, cache-inhibited */
bm_rcr_cce /* CI index, cache-enabled */
};
/* ------------------------- */
/* --- Portal structures --- */
#define BM_RCR_SIZE 8
struct bm_rcr {
struct bm_rcr_entry *ring, *cursor;
u8 ci, available, ithresh, vbit;
#ifdef CONFIG_FSL_DPA_CHECKING
u32 busy;
enum bm_rcr_pmode pmode;
enum bm_rcr_cmode cmode;
#endif
};
struct bm_mc {
struct bm_mc_command *cr;
struct bm_mc_result *rr;
u8 rridx, vbit;
#ifdef CONFIG_FSL_DPA_CHECKING
enum {
/* Can only be _mc_start()ed */
mc_idle,
/* Can only be _mc_commit()ed or _mc_abort()ed */
mc_user,
/* Can only be _mc_retry()ed */
mc_hw
} state;
#endif
};
struct bm_addr {
void __iomem *addr_ce; /* cache-enabled */
void __iomem *addr_ci; /* cache-inhibited */
};
struct bm_portal {
struct bm_addr addr;
struct bm_rcr rcr;
struct bm_mc mc;
struct bm_portal_config config;
} ____cacheline_aligned;
/* --------------- */
/* --- RCR API --- */
/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
#define RCR_CARRYCLEAR(p) \
(void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
/* Bit-wise logic to convert a ring pointer to a ring index */
static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
{
return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
}
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
static inline void RCR_INC(struct bm_rcr *rcr)
{
/* NB: this is odd-looking, but experiments show that it generates
* fast code with essentially no branching overheads. We increment to
* the next RCR pointer and handle overflow and 'vbit'. */
struct bm_rcr_entry *partial = rcr->cursor + 1;
rcr->cursor = RCR_CARRYCLEAR(partial);
if (partial != rcr->cursor)
rcr->vbit ^= BM_RCR_VERB_VBIT;
}
static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
__maybe_unused enum bm_rcr_cmode cmode)
{
/* This use of 'register', as well as all other occurrences, is because
* it has been observed to generate much faster code with gcc than is
* otherwise the case. */
register struct bm_rcr *rcr = &portal->rcr;
u32 cfg;
u8 pi;
rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
rcr->cursor = rcr->ring + pi;
rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
rcr->available = BM_RCR_SIZE - 1
- bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
rcr->ithresh = bm_in(RCR_ITR);
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 0;
rcr->pmode = pmode;
rcr->cmode = cmode;
#endif
cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
bm_out(CFG, cfg);
return 0;
}
static inline void bm_rcr_finish(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
DPA_ASSERT(!rcr->busy);
if (pi != RCR_PTR2IDX(rcr->cursor))
pr_crit("losing uncommited RCR entries\n");
if (ci != rcr->ci)
pr_crit("missing existing RCR completions\n");
if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
pr_crit("RCR destroyed unquiesced\n");
}
static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(!rcr->busy);
if (!rcr->available)
return NULL;
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 1;
#endif
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
dcbz_64(rcr->cursor);
#endif
return rcr->cursor;
}
static inline void bm_rcr_abort(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->busy);
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 0;
#endif
}
static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->busy);
DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
if (rcr->available == 1)
return NULL;
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
dcbf_64(rcr->cursor);
RCR_INC(rcr);
rcr->available--;
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
dcbz_64(rcr->cursor);
#endif
return rcr->cursor;
}
static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->busy);
DPA_ASSERT(rcr->pmode == bm_rcr_pci);
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
RCR_INC(rcr);
rcr->available--;
hwsync();
bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 0;
#endif
}
static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->pmode == bm_rcr_pce);
bm_cl_invalidate(RCR_PI);
bm_cl_touch_rw(RCR_PI);
}
static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->busy);
DPA_ASSERT(rcr->pmode == bm_rcr_pce);
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
RCR_INC(rcr);
rcr->available--;
lwsync();
bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 0;
#endif
}
static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
struct bm_rcr_entry *rcursor;
DPA_ASSERT(rcr->busy);
DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
lwsync();
rcursor = rcr->cursor;
rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
dcbf_64(rcursor);
RCR_INC(rcr);
rcr->available--;
#ifdef CONFIG_FSL_DPA_CHECKING
rcr->busy = 0;
#endif
}
static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
u8 diff, old_ci = rcr->ci;
DPA_ASSERT(rcr->cmode == bm_rcr_cci);
rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
rcr->available += diff;
return diff;
}
static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
DPA_ASSERT(rcr->cmode == bm_rcr_cce);
bm_cl_touch_ro(RCR_CI);
}
static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
u8 diff, old_ci = rcr->ci;
DPA_ASSERT(rcr->cmode == bm_rcr_cce);
rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
bm_cl_invalidate(RCR_CI);
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
rcr->available += diff;
return diff;
}
static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
return rcr->ithresh;
}
static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
{
register struct bm_rcr *rcr = &portal->rcr;
rcr->ithresh = ithresh;
bm_out(RCR_ITR, ithresh);
}
static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
return rcr->available;
}
static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
return BM_RCR_SIZE - 1 - rcr->available;
}
/* ------------------------------ */
/* --- Management command API --- */
static inline int bm_mc_init(struct bm_portal *portal)
{
register struct bm_mc *mc = &portal->mc;
mc->cr = portal->addr.addr_ce + BM_CL_CR;
mc->rr = portal->addr.addr_ce + BM_CL_RR0;
mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
BM_MCC_VERB_VBIT) ? 0 : 1;
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPA_CHECKING
mc->state = mc_idle;
#endif
return 0;
}
static inline void bm_mc_finish(struct bm_portal *portal)
{
__maybe_unused register struct bm_mc *mc = &portal->mc;
DPA_ASSERT(mc->state == mc_idle);
#ifdef CONFIG_FSL_DPA_CHECKING
if (mc->state != mc_idle)
pr_crit("Losing incomplete MC command\n");
#endif
}
static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
{
register struct bm_mc *mc = &portal->mc;
DPA_ASSERT(mc->state == mc_idle);
#ifdef CONFIG_FSL_DPA_CHECKING
mc->state = mc_user;
#endif
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
dcbz_64(mc->cr);
#endif
return mc->cr;
}
static inline void bm_mc_abort(struct bm_portal *portal)
{
__maybe_unused register struct bm_mc *mc = &portal->mc;
DPA_ASSERT(mc->state == mc_user);
#ifdef CONFIG_FSL_DPA_CHECKING
mc->state = mc_idle;
#endif
}
static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_mc *mc = &portal->mc;
struct bm_mc_result *rr = mc->rr + mc->rridx;
DPA_ASSERT(mc->state == mc_user);
lwsync();
mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
dcbf(mc->cr);
dcbit_ro(rr);
#ifdef CONFIG_FSL_DPA_CHECKING
mc->state = mc_hw;
#endif
}
static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
{
register struct bm_mc *mc = &portal->mc;
struct bm_mc_result *rr = mc->rr + mc->rridx;
DPA_ASSERT(mc->state == mc_hw);
/* The inactive response register's verb byte always returns zero until
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering... */
if (!__raw_readb(&rr->verb)) {
dcbit_ro(rr);
return NULL;
}
mc->rridx ^= 1;
mc->vbit ^= BM_MCC_VERB_VBIT;
#ifdef CONFIG_FSL_DPA_CHECKING
mc->state = mc_idle;
#endif
return rr;
}
/* ------------------------------------- */
/* --- Portal interrupt register API --- */
static inline int bm_isr_init(__always_unused struct bm_portal *portal)
{
return 0;
}
static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
{
}
#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
int enable)
{
u32 val;
DPA_ASSERT(bpid < bman_pool_max);
/* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
val = __bm_in(&portal->addr, SCN_REG(bpid));
if (enable)
val |= SCN_BIT(bpid);
else
val &= ~SCN_BIT(bpid);
__bm_out(&portal->addr, SCN_REG(bpid), val);
}
static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
#else
return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
#endif
}
static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
u32 val)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
__bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
#else
__bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
#endif
}
/* Buffer Pool Cleanup */
static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
{
struct bm_mc_command *bm_cmd;
struct bm_mc_result *bm_res;
int aq_count = 0;
bool stop = false;
while (!stop) {
/* Acquire buffers until empty */
bm_cmd = bm_mc_start(p);
bm_cmd->acquire.bpid = bpid;
bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
while (!(bm_res = bm_mc_result(p)))
cpu_relax();
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
/* Pool is empty */
/* TBD : Should we do a few extra iterations in
case some other some blocks keep buffers 'on deck',
which may also be problematic */
stop = true;
} else
++aq_count;
}
return 0;
}

View File

@ -0,0 +1,166 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dpa_sys.h"
#include <linux/fsl_bman.h>
/* Revision info (for errata and feature handling) */
#define BMAN_REV10 0x0100
#define BMAN_REV20 0x0200
#define BMAN_REV21 0x0201
#define QBMAN_ANY_PORTAL_IDX 0xffffffff
extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
/*
* Global variables of the max portal/pool number this bman version supported
*/
extern u16 bman_pool_max;
/* used by CCSR and portal interrupt code */
enum bm_isr_reg {
bm_isr_status = 0,
bm_isr_enable = 1,
bm_isr_disable = 2,
bm_isr_inhibit = 3
};
struct bm_portal_config {
/* Corenet portal addresses;
* [0]==cache-enabled, [1]==cache-inhibited. */
__iomem void *addr_virt[2];
struct resource addr_phys[2];
/* Allow these to be joined in lists */
struct list_head list;
/* User-visible portal configuration settings */
struct bman_portal_config public_cfg;
/* power management saved data */
u32 saved_isdr;
};
#ifdef CONFIG_FSL_BMAN_CONFIG
/* Hooks from bman_driver.c to bman_config.c */
int bman_init_ccsr(struct device_node *node);
#endif
/* Hooks from bman_driver.c in to bman_high.c */
struct bman_portal *bman_create_portal(
struct bman_portal *portal,
const struct bm_portal_config *config);
struct bman_portal *bman_create_affine_portal(
const struct bm_portal_config *config);
struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
int cpu);
void bman_destroy_portal(struct bman_portal *bm);
const struct bm_portal_config *bman_destroy_affine_portal(void);
/* Hooks from fsl_usdpaa.c to bman_driver.c */
struct bm_portal_config *bm_get_unused_portal(void);
struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
void bm_put_unused_portal(struct bm_portal_config *pcfg);
void bm_set_liodns(struct bm_portal_config *pcfg);
/* Pool logic in the portal driver, during initialisation, needs to know if
* there's access to CCSR or not (if not, it'll cripple the pool allocator). */
#ifdef CONFIG_FSL_BMAN_CONFIG
int bman_have_ccsr(void);
#else
#define bman_have_ccsr() 0
#endif
/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
* the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
* might fail (if the buffer pool is depleted). So this value provides some
* "stagger" in that the bman_acquire() function will only fail if lots of bufs
* are requested at once or if h/w has been tested a couple of times without
* luck. The _HIGH value: when bman_release() is called and the stockpile
* fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
* the release ring is full). So this value provides some "stagger" so that
* ring-access is retried a couple of times prior to the API returning a
* failure. The following *must* be true;
* BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
* (to avoid thrashing)
* BMAN_STOCKPILE_SZ >= 16
* (as the release logic expects to either send 8 buffers to hw prior to
* adding the given buffers to the stockpile or add the buffers to the
* stockpile before sending 8 to hw, as the API must be an all-or-nothing
* success/fail.)
*/
#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
/*************************************************/
/* BMan s/w corenet portal, low-level i/face */
/*************************************************/
/* Used by all portal interrupt registers except 'inhibit'
* This mask contains all the "irqsource" bits visible to API users
*/
#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
* the disable register" rather than "disable the ability to write". */
#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
#ifdef CONFIG_FSL_BMAN_CONFIG
/* Set depletion thresholds associated with a buffer pool. Requires that the
* operating system have access to Bman CCSR (ie. compiled in support and
* run-time access courtesy of the device-tree). */
int bm_pool_set(u32 bpid, const u32 *thresholds);
#define BM_POOL_THRESH_SW_ENTER 0
#define BM_POOL_THRESH_SW_EXIT 1
#define BM_POOL_THRESH_HW_ENTER 2
#define BM_POOL_THRESH_HW_EXIT 3
/* Read the free buffer count for a given buffer */
u32 bm_pool_free_buffers(u32 bpid);
__init int bman_init(void);
__init int bman_resource_init(void);
const struct bm_portal_config *bman_get_bm_portal_config(
struct bman_portal *portal);
/* power management */
#ifdef CONFIG_SUSPEND
void suspend_unused_bportal(void);
void resume_unused_bportal(void);
#endif
#endif /* CONFIG_FSL_BMAN_CONFIG */

View File

@ -0,0 +1,56 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman_test.h"
MODULE_AUTHOR("Geoff Thorpe");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Bman testing");
static int test_init(void)
{
#ifdef CONFIG_FSL_BMAN_TEST_HIGH
int loop = 1;
while (loop--)
bman_test_high();
#endif
#ifdef CONFIG_FSL_BMAN_TEST_THRESH
bman_test_thresh();
#endif
return 0;
}
static void test_exit(void)
{
}
module_init(test_init);
module_exit(test_exit);

View File

@ -0,0 +1,44 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/fsl_bman.h>
void bman_test_high(void);
void bman_test_thresh(void);

View File

@ -0,0 +1,183 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman_test.h"
#include "bman_private.h"
/*************/
/* constants */
/*************/
#define PORTAL_OPAQUE ((void *)0xf00dbeef)
#define POOL_OPAQUE ((void *)0xdeadabba)
#define NUM_BUFS 93
#define LOOPS 3
#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
/***************/
/* global vars */
/***************/
static struct bman_pool *pool;
static int depleted;
static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
static int bufs_received;
/* Predeclare the callback so we can instantiate pool parameters */
static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
/**********************/
/* internal functions */
/**********************/
static void bufs_init(void)
{
int i;
for (i = 0; i < NUM_BUFS; i++)
bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
bufs_received = 0;
}
static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
{
if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
/* On SoCs with Bman revison 2.0, Bman only respects the 40
* LS-bits of buffer addresses, masking off the upper 8-bits on
* release commands. The API provides for 48-bit addresses
* because some SoCs support all 48-bits. When generating
* garbage addresses for testing, we either need to zero the
* upper 8-bits when releasing to Bman (otherwise we'll be
* disappointed when the buffers we acquire back from Bman
* don't match), or we need to mask the upper 8-bits off when
* comparing. We do the latter.
*/
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
< (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
return -1;
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
> (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
return 1;
} else {
if (bm_buffer_get64(a) < bm_buffer_get64(b))
return -1;
if (bm_buffer_get64(a) > bm_buffer_get64(b))
return 1;
}
return 0;
}
static void bufs_confirm(void)
{
int i, j;
for (i = 0; i < NUM_BUFS; i++) {
int matches = 0;
for (j = 0; j < NUM_BUFS; j++)
if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
matches++;
BUG_ON(matches != 1);
}
}
/********/
/* test */
/********/
static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
void *pool_ctx, int __depleted)
{
BUG_ON(__pool != pool);
BUG_ON(pool_ctx != POOL_OPAQUE);
depleted = __depleted;
}
void bman_test_high(void)
{
struct bman_pool_params pparams = {
.flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
.cb = depletion_cb,
.cb_ctx = POOL_OPAQUE,
};
int i, loops = LOOPS;
struct bm_buffer tmp_buf;
bufs_init();
pr_info("BMAN: --- starting high-level test ---\n");
pool = bman_new_pool(&pparams);
BUG_ON(!pool);
/*******************/
/* Release buffers */
/*******************/
do_loop:
i = 0;
while (i < NUM_BUFS) {
u32 flags = BMAN_RELEASE_FLAG_WAIT;
int num = 8;
if ((i + num) > NUM_BUFS)
num = NUM_BUFS - i;
if ((i + num) == NUM_BUFS)
flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
if (bman_release(pool, bufs_in + i, num, flags))
panic("bman_release() failed\n");
i += num;
}
/*******************/
/* Acquire buffers */
/*******************/
while (i > 0) {
int tmp, num = 8;
if (num > i)
num = i;
tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
BUG_ON(tmp != num);
i -= num;
}
i = bman_acquire(pool, &tmp_buf, 1, 0);
BUG_ON(i > 0);
bufs_confirm();
if (--loops)
goto do_loop;
/************/
/* Clean up */
/************/
bman_free_pool(pool);
pr_info("BMAN: --- finished high-level test ---\n");
}

View File

@ -0,0 +1,196 @@
/* Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman_test.h"
/* Test constants */
#define TEST_NUMBUFS 129728
#define TEST_EXIT 129536
#define TEST_ENTRY 129024
struct affine_test_data {
struct task_struct *t;
int cpu;
int expect_affinity;
int drain;
int num_enter;
int num_exit;
struct list_head node;
struct completion wakethread;
struct completion wakeparent;
};
static void cb_depletion(struct bman_portal *portal,
struct bman_pool *pool,
void *opaque,
int depleted)
{
struct affine_test_data *data = opaque;
int c = smp_processor_id();
pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
/* We should be executing on the CPU of the thread that owns the pool if
* and that CPU has an affine portal (ie. it isn't slaved). */
BUG_ON((c != data->cpu) && data->expect_affinity);
BUG_ON((c == data->cpu) && !data->expect_affinity);
if (depleted)
data->num_enter++;
else
data->num_exit++;
}
/* Params used to set up a pool, this also dynamically allocates a BPID */
static const struct bman_pool_params params_nocb = {
.flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
.thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
};
/* Params used to set up each cpu's pool with callbacks enabled */
static struct bman_pool_params params_cb = {
.bpid = 0, /* will be replaced to match pool_nocb */
.flags = BMAN_POOL_FLAG_DEPLETION,
.cb = cb_depletion
};
static struct bman_pool *pool_nocb;
static LIST_HEAD(threads);
static int affine_test(void *__data)
{
struct bman_pool *pool;
struct affine_test_data *data = __data;
struct bman_pool_params my_params = params_cb;
pr_info("thread %d: starting\n", data->cpu);
/* create the pool */
my_params.cb_ctx = data;
pool = bman_new_pool(&my_params);
BUG_ON(!pool);
complete(&data->wakeparent);
wait_for_completion(&data->wakethread);
init_completion(&data->wakethread);
/* if we're the drainer, we get signalled for that */
if (data->drain) {
struct bm_buffer buf;
int ret;
pr_info("thread %d: draining...\n", data->cpu);
do {
ret = bman_acquire(pool, &buf, 1, 0);
} while (ret > 0);
pr_info("thread %d: draining done.\n", data->cpu);
complete(&data->wakeparent);
wait_for_completion(&data->wakethread);
init_completion(&data->wakethread);
}
/* cleanup */
bman_free_pool(pool);
while (!kthread_should_stop())
cpu_relax();
pr_info("thread %d: exiting\n", data->cpu);
return 0;
}
static struct affine_test_data *start_affine_test(int cpu, int drain)
{
struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
data->cpu = cpu;
data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
data->drain = drain;
data->num_enter = 0;
data->num_exit = 0;
init_completion(&data->wakethread);
init_completion(&data->wakeparent);
list_add_tail(&data->node, &threads);
data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
BUG_ON(IS_ERR(data->t));
kthread_bind(data->t, cpu);
wake_up_process(data->t);
return data;
}
void bman_test_thresh(void)
{
int loop = TEST_NUMBUFS;
int ret, num_cpus = 0;
struct affine_test_data *data, *drainer = NULL;
pr_info("bman_test_thresh: start\n");
/* allocate a BPID and seed it */
pool_nocb = bman_new_pool(&params_nocb);
BUG_ON(!pool_nocb);
while (loop--) {
struct bm_buffer buf;
bm_buffer_set64(&buf, 0x0badbeef + loop);
ret = bman_release(pool_nocb, &buf, 1,
BMAN_RELEASE_FLAG_WAIT);
BUG_ON(ret);
}
while (!bman_rcr_is_empty())
cpu_relax();
pr_info("bman_test_thresh: buffers are in\n");
/* create threads and wait for them to create pools */
params_cb.bpid = bman_get_params(pool_nocb)->bpid;
for_each_cpu(loop, cpu_online_mask) {
data = start_affine_test(loop, drainer ? 0 : 1);
BUG_ON(!data);
if (!drainer)
drainer = data;
num_cpus++;
wait_for_completion(&data->wakeparent);
}
/* signal the drainer to start draining */
complete(&drainer->wakethread);
wait_for_completion(&drainer->wakeparent);
init_completion(&drainer->wakeparent);
/* tear down */
list_for_each_entry_safe(data, drainer, &threads, node) {
complete(&data->wakethread);
ret = kthread_stop(data->t);
BUG_ON(ret);
list_del(&data->node);
/* check that we get the expected callbacks (and no others) */
BUG_ON(data->num_enter != 1);
BUG_ON(data->num_exit != 0);
kfree(data);
}
bman_free_pool(pool_nocb);
pr_info("bman_test_thresh: done\n");
}

View File

@ -0,0 +1,706 @@
/* Copyright 2009-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dpa_sys.h"
#include <linux/fsl_qman.h>
#include <linux/fsl_bman.h>
/* Qman and Bman APIs are front-ends to the common code; */
static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
* FQIDs (probably from user-space), it can filter out those that aren't in the
* OOS state (better to leak a h/w resource than to crash). This function
* returns the number of invalid IDs that were not released. */
static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
int (*is_valid)(u32 id))
{
int valid_mode = 0;
u32 loop = id, total_invalid = 0;
while (loop < (id + count)) {
int isvalid = is_valid ? is_valid(loop) : 1;
if (!valid_mode) {
/* We're looking for a valid ID to terminate an invalid
* range */
if (isvalid) {
/* We finished a range of invalid IDs, a valid
* range is now underway */
valid_mode = 1;
count -= (loop - id);
id = loop;
} else
total_invalid++;
} else {
/* We're looking for an invalid ID to terminate a
* valid range */
if (!isvalid) {
/* Release the range of valid IDs, an unvalid
* range is now underway */
if (loop > id)
dpa_alloc_free(alloc, id, loop - id);
valid_mode = 0;
}
}
loop++;
}
/* Release any unterminated range of valid IDs */
if (valid_mode && count)
dpa_alloc_free(alloc, id, count);
return total_invalid;
}
/* BPID allocator front-end */
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
{
return dpa_alloc_new(&bpalloc, result, count, align, partial);
}
EXPORT_SYMBOL(bman_alloc_bpid_range);
static int bp_cleanup(u32 bpid)
{
return bman_shutdown_pool(bpid) == 0;
}
void bman_release_bpid_range(u32 bpid, u32 count)
{
u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
if (total_invalid)
pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
bpid, bpid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(bman_release_bpid_range);
void bman_seed_bpid_range(u32 bpid, u32 count)
{
dpa_alloc_seed(&bpalloc, bpid, count);
}
EXPORT_SYMBOL(bman_seed_bpid_range);
int bman_reserve_bpid_range(u32 bpid, u32 count)
{
return dpa_alloc_reserve(&bpalloc, bpid, count);
}
EXPORT_SYMBOL(bman_reserve_bpid_range);
/* FQID allocator front-end */
int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
{
return dpa_alloc_new(&fqalloc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_fqid_range);
static int fq_cleanup(u32 fqid)
{
return qman_shutdown_fq(fqid) == 0;
}
void qman_release_fqid_range(u32 fqid, u32 count)
{
u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
if (total_invalid)
pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
fqid, fqid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_fqid_range);
int qman_reserve_fqid_range(u32 fqid, u32 count)
{
return dpa_alloc_reserve(&fqalloc, fqid, count);
}
EXPORT_SYMBOL(qman_reserve_fqid_range);
void qman_seed_fqid_range(u32 fqid, u32 count)
{
dpa_alloc_seed(&fqalloc, fqid, count);
}
EXPORT_SYMBOL(qman_seed_fqid_range);
/* Pool-channel allocator front-end */
int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
{
return dpa_alloc_new(&qpalloc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_pool_range);
static int qpool_cleanup(u32 qp)
{
/* We query all FQDs starting from
* FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
* whose destination channel is the pool-channel being released.
* When a non-OOS FQD is found we attempt to clean it up */
struct qman_fq fq = {
.fqid = 1
};
int err;
do {
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
if (err)
/* FQID range exceeded, found no problems */
return 1;
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
BUG_ON(err);
if (fqd.dest.channel == qp) {
/* The channel is the FQ's target, clean it */
if (qman_shutdown_fq(fq.fqid) != 0)
/* Couldn't shut down the FQ
so the pool must be leaked */
return 0;
}
}
/* Move to the next FQID */
fq.fqid++;
} while (1);
}
void qman_release_pool_range(u32 qp, u32 count)
{
u32 total_invalid = release_id_range(&qpalloc, qp,
count, qpool_cleanup);
if (total_invalid) {
/* Pool channels are almost always used individually */
if (count == 1)
pr_err("Pool channel 0x%x had %d leaks\n",
qp, total_invalid);
else
pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
qp, qp + count - 1, count, total_invalid);
}
}
EXPORT_SYMBOL(qman_release_pool_range);
void qman_seed_pool_range(u32 poolid, u32 count)
{
dpa_alloc_seed(&qpalloc, poolid, count);
}
EXPORT_SYMBOL(qman_seed_pool_range);
int qman_reserve_pool_range(u32 poolid, u32 count)
{
return dpa_alloc_reserve(&qpalloc, poolid, count);
}
EXPORT_SYMBOL(qman_reserve_pool_range);
/* CGR ID allocator front-end */
int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
{
return dpa_alloc_new(&cgralloc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_cgrid_range);
static int cqr_cleanup(u32 cgrid)
{
/* We query all FQDs starting from
* FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
* whose CGR is the CGR being released.
*/
struct qman_fq fq = {
.fqid = 1
};
int err;
do {
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
if (err)
/* FQID range exceeded, found no problems */
return 1;
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
BUG_ON(err);
if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
(fqd.cgid == cgrid)) {
pr_err("CRGID 0x%x is being used by FQID 0x%x,"
" CGR will be leaked\n",
cgrid, fq.fqid);
return 1;
}
}
/* Move to the next FQID */
fq.fqid++;
} while (1);
}
void qman_release_cgrid_range(u32 cgrid, u32 count)
{
u32 total_invalid = release_id_range(&cgralloc, cgrid,
count, cqr_cleanup);
if (total_invalid)
pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
cgrid, cgrid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_cgrid_range);
void qman_seed_cgrid_range(u32 cgrid, u32 count)
{
dpa_alloc_seed(&cgralloc, cgrid, count);
}
EXPORT_SYMBOL(qman_seed_cgrid_range);
/* CEETM CHANNEL ID allocator front-end */
int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
int partial)
{
return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
int partial)
{
return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
{
u32 total_invalid;
total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
NULL);
if (total_invalid)
pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
channelid, channelid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
{
dpa_alloc_seed(&ceetm0_challoc, channelid, count);
}
EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
{
u32 total_invalid;
total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
NULL);
if (total_invalid)
pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
channelid, channelid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
{
dpa_alloc_seed(&ceetm1_challoc, channelid, count);
}
EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
/* CEETM LFQID allocator front-end */
int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
int partial)
{
return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
int partial)
{
return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
}
EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
{
u32 total_invalid;
total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
NULL);
if (total_invalid)
pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
lfqid, lfqid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
{
dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
}
EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
{
u32 total_invalid;
total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
NULL);
if (total_invalid)
pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
lfqid, lfqid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
{
dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
}
EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
/* Everything else is the common backend to all the allocators */
/* The allocator is a (possibly-empty) list of these; */
struct alloc_node {
struct list_head list;
u32 base;
u32 num;
/* refcount and is_alloced are only set
when the node is in the used list */
unsigned int refcount;
int is_alloced;
};
/* #define DPA_ALLOC_DEBUG */
#ifdef DPA_ALLOC_DEBUG
#define DPRINT pr_info
static void DUMP(struct dpa_alloc *alloc)
{
int off = 0;
char buf[256];
struct alloc_node *p;
pr_info("Free Nodes\n");
list_for_each_entry(p, &alloc->free, list) {
if (off < 255)
off += snprintf(buf + off, 255-off, "{%d,%d}",
p->base, p->base + p->num - 1);
}
pr_info("%s\n", buf);
off = 0;
pr_info("Used Nodes\n");
list_for_each_entry(p, &alloc->used, list) {
if (off < 255)
off += snprintf(buf + off, 255-off, "{%d,%d}",
p->base, p->base + p->num - 1);
}
pr_info("%s\n", buf);
}
#else
#define DPRINT(x...)
#define DUMP(a)
#endif
int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
int partial)
{
struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
u32 base, next_best_base = 0, num = 0, next_best_num = 0;
struct alloc_node *margin_left, *margin_right;
*result = (u32)-1;
DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
DUMP(alloc);
/* If 'align' is 0, it should behave as though it was 1 */
if (!align)
align = 1;
margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
if (!margin_left)
goto err;
margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
if (!margin_right) {
kfree(margin_left);
goto err;
}
spin_lock_irq(&alloc->lock);
list_for_each_entry(i, &alloc->free, list) {
base = (i->base + align - 1) / align;
base *= align;
if ((base - i->base) >= i->num)
/* alignment is impossible, regardless of count */
continue;
num = i->num - (base - i->base);
if (num >= count) {
/* this one will do nicely */
num = count;
goto done;
}
if (num > next_best_num) {
next_best = i;
next_best_base = base;
next_best_num = num;
}
}
if (partial && next_best) {
i = next_best;
base = next_best_base;
num = next_best_num;
} else
i = NULL;
done:
if (i) {
if (base != i->base) {
margin_left->base = i->base;
margin_left->num = base - i->base;
list_add_tail(&margin_left->list, &i->list);
} else
kfree(margin_left);
if ((base + num) < (i->base + i->num)) {
margin_right->base = base + num;
margin_right->num = (i->base + i->num) -
(base + num);
list_add(&margin_right->list, &i->list);
} else
kfree(margin_right);
list_del(&i->list);
kfree(i);
*result = base;
} else {
spin_unlock_irq(&alloc->lock);
kfree(margin_left);
kfree(margin_right);
}
err:
DPRINT("returning %d\n", i ? num : -ENOMEM);
DUMP(alloc);
if (!i)
return -ENOMEM;
/* Add the allocation to the used list with a refcount of 1 */
used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
if (!used_node) {
spin_unlock_irq(&alloc->lock);
return -ENOMEM;
}
used_node->base = *result;
used_node->num = num;
used_node->refcount = 1;
used_node->is_alloced = 1;
list_add_tail(&used_node->list, &alloc->used);
spin_unlock_irq(&alloc->lock);
return (int)num;
}
/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
* forcing error-handling on to users in the deallocation path. */
static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
{
struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
BUG_ON(!node);
DPRINT("release_range(%d,%d)\n", base_id, count);
DUMP(alloc);
BUG_ON(!count);
spin_lock_irq(&alloc->lock);
node->base = base_id;
node->num = count;
list_for_each_entry(i, &alloc->free, list) {
if (i->base >= node->base) {
/* BUG_ON(any overlapping) */
BUG_ON(i->base < (node->base + node->num));
list_add_tail(&node->list, &i->list);
goto done;
}
}
list_add_tail(&node->list, &alloc->free);
done:
/* Merge to the left */
i = list_entry(node->list.prev, struct alloc_node, list);
if (node->list.prev != &alloc->free) {
BUG_ON((i->base + i->num) > node->base);
if ((i->base + i->num) == node->base) {
node->base = i->base;
node->num += i->num;
list_del(&i->list);
kfree(i);
}
}
/* Merge to the right */
i = list_entry(node->list.next, struct alloc_node, list);
if (node->list.next != &alloc->free) {
BUG_ON((node->base + node->num) > i->base);
if ((node->base + node->num) == i->base) {
node->num += i->num;
list_del(&i->list);
kfree(i);
}
}
spin_unlock_irq(&alloc->lock);
DUMP(alloc);
}
void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
{
struct alloc_node *i = NULL;
spin_lock_irq(&alloc->lock);
/* First find the node in the used list and decrement its ref count */
list_for_each_entry(i, &alloc->used, list) {
if (i->base == base_id && i->num == count) {
--i->refcount;
if (i->refcount == 0) {
list_del(&i->list);
spin_unlock_irq(&alloc->lock);
if (i->is_alloced)
_dpa_alloc_free(alloc, base_id, count);
kfree(i);
return;
}
spin_unlock_irq(&alloc->lock);
return;
}
}
/* Couldn't find the allocation */
pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
base_id, count);
spin_unlock_irq(&alloc->lock);
}
void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
{
/* Same as free but no previous allocation checking is needed */
_dpa_alloc_free(alloc, base_id, count);
}
int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
{
struct alloc_node *i = NULL, *used_node;
DPRINT("alloc_reserve(%d,%d)\n", base, num);
DUMP(alloc);
spin_lock_irq(&alloc->lock);
/* Check for the node in the used list.
If found, increase it's refcount */
list_for_each_entry(i, &alloc->used, list) {
if ((i->base == base) && (i->num == num)) {
++i->refcount;
spin_unlock_irq(&alloc->lock);
return 0;
}
if ((base >= i->base) && (base < (i->base + i->num))) {
/* This is an attempt to reserve a region that was
already reserved or alloced with a different
base or num */
pr_err("Cannot reserve %d - %d, it overlaps with"
" existing reservation from %d - %d\n",
base, base + num - 1, i->base,
i->base + i->num - 1);
spin_unlock_irq(&alloc->lock);
return -1;
}
}
/* Check to make sure this ID isn't in the free list */
list_for_each_entry(i, &alloc->free, list) {
if ((base >= i->base) && (base < (i->base + i->num))) {
/* yep, the reservation is within this node */
pr_err("Cannot reserve %d - %d, it overlaps with"
" free range %d - %d and must be alloced\n",
base, base + num - 1,
i->base, i->base + i->num - 1);
spin_unlock_irq(&alloc->lock);
return -1;
}
}
/* Add the allocation to the used list with a refcount of 1 */
used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
if (!used_node) {
spin_unlock_irq(&alloc->lock);
return -ENOMEM;
}
used_node->base = base;
used_node->num = num;
used_node->refcount = 1;
used_node->is_alloced = 0;
list_add_tail(&used_node->list, &alloc->used);
spin_unlock_irq(&alloc->lock);
return 0;
}
int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
{
struct alloc_node *i = NULL;
DPRINT("alloc_pop()\n");
DUMP(alloc);
spin_lock_irq(&alloc->lock);
if (!list_empty(&alloc->free)) {
i = list_entry(alloc->free.next, struct alloc_node, list);
list_del(&i->list);
}
spin_unlock_irq(&alloc->lock);
DPRINT("returning %d\n", i ? 0 : -ENOMEM);
DUMP(alloc);
if (!i)
return -ENOMEM;
*result = i->base;
*count = i->num;
kfree(i);
return 0;
}
int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
{
struct alloc_node *i = NULL;
int res = 0;
DPRINT("alloc_check()\n");
spin_lock_irq(&list_head->lock);
list_for_each_entry(i, &list_head->free, list) {
if ((item >= i->base) && (item < (i->base + i->num))) {
res = 1;
break;
}
}
spin_unlock_irq(&list_head->lock);
return res;
}

View File

@ -0,0 +1,259 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DPA_SYS_H
#define DPA_SYS_H
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/kthread.h>
#include <linux/memblock.h>
#include <linux/completion.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/uio_driver.h>
#include <linux/smp.h>
#include <linux/fsl_hypervisor.h>
#include <linux/vmalloc.h>
#include <linux/ctype.h>
#include <linux/math64.h>
#include <linux/bitops.h>
#include <linux/fsl_usdpaa.h>
/* When copying aligned words or shorts, try to avoid memcpy() */
#define CONFIG_TRY_BETTER_MEMCPY
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
#define DPA_PORTAL_CE 0
#define DPA_PORTAL_CI 1
/***********************/
/* Misc inline assists */
/***********************/
#if defined CONFIG_PPC32
#include "dpa_sys_ppc32.h"
#elif defined CONFIG_PPC64
#include "dpa_sys_ppc64.h"
#elif defined CONFIG_ARM
#include "dpa_sys_arm.h"
#elif defined CONFIG_ARM64
#include "dpa_sys_arm64.h"
#endif
#ifdef CONFIG_FSL_DPA_CHECKING
#define DPA_ASSERT(x) \
do { \
if (!(x)) { \
pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
__stringify_1(x)); \
dump_stack(); \
panic("assertion failure"); \
} \
} while (0)
#else
#define DPA_ASSERT(x)
#endif
/* memcpy() stuff - when you know alignments in advance */
#ifdef CONFIG_TRY_BETTER_MEMCPY
static inline void copy_words(void *dest, const void *src, size_t sz)
{
u32 *__dest = dest;
const u32 *__src = src;
size_t __sz = sz >> 2;
BUG_ON((unsigned long)dest & 0x3);
BUG_ON((unsigned long)src & 0x3);
BUG_ON(sz & 0x3);
while (__sz--)
*(__dest++) = *(__src++);
}
static inline void copy_shorts(void *dest, const void *src, size_t sz)
{
u16 *__dest = dest;
const u16 *__src = src;
size_t __sz = sz >> 1;
BUG_ON((unsigned long)dest & 0x1);
BUG_ON((unsigned long)src & 0x1);
BUG_ON(sz & 0x1);
while (__sz--)
*(__dest++) = *(__src++);
}
static inline void copy_bytes(void *dest, const void *src, size_t sz)
{
u8 *__dest = dest;
const u8 *__src = src;
while (sz--)
*(__dest++) = *(__src++);
}
#else
#define copy_words memcpy
#define copy_shorts memcpy
#define copy_bytes memcpy
#endif
/************/
/* RB-trees */
/************/
/* We encapsulate RB-trees so that its easier to use non-linux forms in
* non-linux systems. This also encapsulates the extra plumbing that linux code
* usually provides when using RB-trees. This encapsulation assumes that the
* data type held by the tree is u32. */
struct dpa_rbtree {
struct rb_root root;
};
#define DPA_RBTREE { .root = RB_ROOT }
static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
{
tree->root = RB_ROOT;
}
#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
{ \
struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
while (*p) { \
u32 item; \
parent = *p; \
item = rb_entry(parent, type, node_field)->val_field; \
if (obj->val_field < item) \
p = &parent->rb_left; \
else if (obj->val_field > item) \
p = &parent->rb_right; \
else \
return -EBUSY; \
} \
rb_link_node(&obj->node_field, parent, p); \
rb_insert_color(&obj->node_field, &tree->root); \
return 0; \
} \
static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
{ \
rb_erase(&obj->node_field, &tree->root); \
} \
static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
{ \
type *ret; \
struct rb_node *p = tree->root.rb_node; \
while (p) { \
ret = rb_entry(p, type, node_field); \
if (val < ret->val_field) \
p = p->rb_left; \
else if (val > ret->val_field) \
p = p->rb_right; \
else \
return ret; \
} \
return NULL; \
}
/************/
/* Bootargs */
/************/
/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
* though; a comma-separated list of items, each item being a cpu index and/or a
* range of cpu indices, and each item optionally be prefixed by "s" to indicate
* that the portal associated with that cpu should be shared. See bman_driver.c
* for more specifics. */
static int __parse_portals_cpu(const char **s, unsigned int *cpu)
{
*cpu = 0;
if (!isdigit(**s))
return -EINVAL;
while (isdigit(**s))
*cpu = *cpu * 10 + (*((*s)++) - '0');
return 0;
}
static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
struct cpumask *want_unshared,
const char *argname)
{
const char *s = str;
unsigned int shared, cpu1, cpu2, loop;
keep_going:
if (*s == 's') {
shared = 1;
s++;
} else
shared = 0;
if (__parse_portals_cpu(&s, &cpu1))
goto err;
if (*s == '-') {
s++;
if (__parse_portals_cpu(&s, &cpu2))
goto err;
if (cpu2 < cpu1)
goto err;
} else
cpu2 = cpu1;
for (loop = cpu1; loop <= cpu2; loop++)
cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
if (*s == ',') {
s++;
goto keep_going;
} else if ((*s == '\0') || isspace(*s))
return 0;
err:
pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
(unsigned long)s - (unsigned long)str);
return -EINVAL;
}
#ifdef CONFIG_FSL_USDPAA
/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
int usdpaa_get_portal_config(struct file *filp, void *cinh,
enum usdpaa_portal_type ptype, unsigned int *irq,
void **iir_reg);
#endif
#endif /* DPA_SYS_H */

View File

@ -0,0 +1,95 @@
/* Copyright 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DPA_SYS_ARM_H
#define DPA_SYS_ARM_H
#include <asm/cacheflush.h>
#include <asm/barrier.h>
/* Implementation of ARM specific routines */
/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
* barriers and that dcb*() won't fall victim to compiler or execution
* reordering with respect to other code/instructions that manipulate the same
* cacheline. */
#define hwsync() { asm volatile("dmb st" : : : "memory"); }
#define lwsync() { asm volatile("dmb st" : : : "memory"); }
#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
#define dcbf_64(p) \
do { \
dcbf((u32)p); \
} while (0)
/* Commonly used combo */
#define dcbit_ro(p) \
do { \
dcbi((u32)p); \
dcbt_ro((u32)p); \
} while (0)
static inline u64 mfatb(void)
{
return get_cycles();
}
static inline u32 in_be32(volatile void *addr)
{
return be32_to_cpu(*((volatile u32 *) addr));
}
static inline void out_be32(void *addr, u32 val)
{
*((u32 *) addr) = cpu_to_be32(val);
}
static inline void set_bits(unsigned long mask, volatile unsigned long *p)
{
*p |= mask;
}
static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
{
*p &= ~mask;
}
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
{
__cpuc_flush_dcache_area((void *) start, stop - start);
}
#define hard_smp_processor_id() raw_smp_processor_id()
#endif

View File

@ -0,0 +1,102 @@
/* Copyright 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DPA_SYS_ARM64_H
#define DPA_SYS_ARM64_H
#include <asm/cacheflush.h>
#include <asm/barrier.h>
/* Implementation of ARM 64 bit specific routines */
/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
* barriers and that dcb*() won't fall victim to compiler or execution
* reordering with respect to other code/instructions that manipulate the same
* cacheline. */
#define hwsync() { asm volatile("dmb st" : : : "memory"); }
#define lwsync() { asm volatile("dmb st" : : : "memory"); }
#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); }
#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); }
#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
#define dcbz_64(p) \
do { \
dcbz(p); \
} while (0)
#define dcbf_64(p) \
do { \
dcbf(p); \
} while (0)
/* Commonly used combo */
#define dcbit_ro(p) \
do { \
dcbi(p); \
dcbt_ro(p); \
} while (0)
static inline u64 mfatb(void)
{
return get_cycles();
}
static inline u32 in_be32(volatile void *addr)
{
return be32_to_cpu(*((volatile u32 *) addr));
}
static inline void out_be32(void *addr, u32 val)
{
*((u32 *) addr) = cpu_to_be32(val);
}
static inline void set_bits(unsigned long mask, volatile unsigned long *p)
{
*p |= mask;
}
static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
{
*p &= ~mask;
}
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
{
__flush_dcache_area((void *) start, stop - start);
}
#define hard_smp_processor_id() raw_smp_processor_id()
#endif

View File

@ -0,0 +1,70 @@
/* Copyright 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DPA_SYS_PPC32_H
#define DPA_SYS_PPC32_H
/* Implementation of PowerPC 32 bit specific routines */
/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
* barriers and that dcb*() won't fall victim to compiler or execution
* reordering with respect to other code/instructions that manipulate the same
* cacheline. */
#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
#define dcbi(p) dcbf(p)
#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
#define dcbz_64(p) dcbzl(p)
#define dcbf_64(p) dcbf(p)
/* Commonly used combo */
#define dcbit_ro(p) \
do { \
dcbi(p); \
dcbt_ro(p); \
} while (0)
static inline u64 mfatb(void)
{
u32 hi, lo, chk;
do {
hi = mfspr(SPRN_ATBU);
lo = mfspr(SPRN_ATBL);
chk = mfspr(SPRN_ATBU);
} while (unlikely(hi != chk));
return ((u64)hi << 32) | (u64)lo;
}
#endif

View File

@ -0,0 +1,79 @@
/* Copyright 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DPA_SYS_PPC64_H
#define DPA_SYS_PPC64_H
/* Implementation of PowerPC 64 bit specific routines */
/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
* barriers and that dcb*() won't fall victim to compiler or execution
* reordering with respect to other code/instructions that manipulate the same
* cacheline. */
#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
#define dcbi(p) dcbf(p)
#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
#define dcbz_64(p) \
do { \
dcbz((void*)p + 32); \
dcbz(p); \
} while (0)
#define dcbf_64(p) \
do { \
dcbf((void*)p + 32); \
dcbf(p); \
} while (0)
/* Commonly used combo */
#define dcbit_ro(p) \
do { \
dcbi(p); \
dcbi((void*)p + 32); \
dcbt_ro(p); \
dcbt_ro((void*)p + 32); \
} while (0)
static inline u64 mfatb(void)
{
u32 hi, lo, chk;
do {
hi = mfspr(SPRN_ATBU);
lo = mfspr(SPRN_ATBL);
chk = mfspr(SPRN_ATBU);
} while (unlikely(hi != chk));
return ((u64)hi << 32) | (u64)lo;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,289 @@
/* Copyright (c) 2013 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* define a device that allows USPDAA processes to open a file
descriptor and specify which IRQ it wants to montior using an ioctl()
When an IRQ is received, the device becomes readable so that a process
can use read() or select() type calls to monitor for IRQs */
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/fsl_usdpaa.h>
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include "qman_low.h"
#include "bman_low.h"
struct usdpaa_irq_ctx {
int irq_set; /* Set to true once the irq is set via ioctl */
unsigned int irq_num;
u32 last_irq_count; /* Last value returned from read */
u32 irq_count; /* Number of irqs since last read */
wait_queue_head_t wait_queue; /* Waiting processes */
spinlock_t lock;
void *inhibit_addr; /* inhibit register address */
struct file *usdpaa_filp;
char irq_name[128];
};
static int usdpaa_irq_open(struct inode *inode, struct file *filp)
{
struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->irq_set = 0;
ctx->irq_count = 0;
ctx->last_irq_count = 0;
init_waitqueue_head(&ctx->wait_queue);
spin_lock_init(&ctx->lock);
filp->private_data = ctx;
return 0;
}
static int usdpaa_irq_release(struct inode *inode, struct file *filp)
{
struct usdpaa_irq_ctx *ctx = filp->private_data;
if (ctx->irq_set) {
/* Inhibit the IRQ */
out_be32(ctx->inhibit_addr, 0x1);
irq_set_affinity_hint(ctx->irq_num, NULL);
free_irq(ctx->irq_num, ctx);
ctx->irq_set = 0;
fput(ctx->usdpaa_filp);
}
kfree(filp->private_data);
return 0;
}
static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
{
unsigned long flags;
struct usdpaa_irq_ctx *ctx = _ctx;
spin_lock_irqsave(&ctx->lock, flags);
++ctx->irq_count;
spin_unlock_irqrestore(&ctx->lock, flags);
wake_up_all(&ctx->wait_queue);
/* Set the inhibit register. This will be reenabled
once the USDPAA code handles the IRQ */
out_be32(ctx->inhibit_addr, 0x1);
pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
return IRQ_HANDLED;
}
static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
{
struct usdpaa_irq_ctx *ctx = fp->private_data;
int ret;
if (ctx->irq_set) {
pr_debug("Setting USDPAA IRQ when it was already set!\n");
return -EBUSY;
}
ctx->usdpaa_filp = fget(irq_map->fd);
if (!ctx->usdpaa_filp) {
pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
return -EINVAL;
}
ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
irq_map->type, &ctx->irq_num,
&ctx->inhibit_addr);
if (ret) {
pr_debug("USDPAA IRQ couldn't identify portal\n");
fput(ctx->usdpaa_filp);
return ret;
}
ctx->irq_set = 1;
snprintf(ctx->irq_name, sizeof(ctx->irq_name),
"usdpaa_irq %d", ctx->irq_num);
ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
ctx->irq_name, ctx);
if (ret) {
pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
ctx->irq_num, ret);
ctx->irq_set = 0;
fput(ctx->usdpaa_filp);
return ret;
}
ret = irq_set_affinity(ctx->irq_num, &current->cpus_allowed);
if (ret)
pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
ret = irq_set_affinity_hint(ctx->irq_num, &current->cpus_allowed);
if (ret)
pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
return 0;
}
static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct usdpaa_ioctl_irq_map irq_map;
if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
return -EINVAL;
}
ret = copy_from_user(&irq_map, (void __user *)arg,
sizeof(irq_map));
if (ret)
return ret;
return map_irq(fp, &irq_map);
}
static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
size_t count, loff_t *offp)
{
struct usdpaa_irq_ctx *ctx = filp->private_data;
int ret;
if (!ctx->irq_set) {
pr_debug("Reading USDPAA IRQ before it was set\n");
return -EINVAL;
}
if (count < sizeof(ctx->irq_count)) {
pr_debug("USDPAA IRQ Read too small\n");
return -EINVAL;
}
if (ctx->irq_count == ctx->last_irq_count) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(ctx->wait_queue,
ctx->irq_count != ctx->last_irq_count);
if (ret == -ERESTARTSYS)
return ret;
}
ctx->last_irq_count = ctx->irq_count;
if (copy_to_user(buff, &ctx->last_irq_count,
sizeof(ctx->last_irq_count)))
return -EFAULT;
return sizeof(ctx->irq_count);
}
static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
{
struct usdpaa_irq_ctx *ctx = filp->private_data;
unsigned int ret = 0;
unsigned long flags;
if (!ctx->irq_set)
return POLLHUP;
poll_wait(filp, &ctx->wait_queue, wait);
spin_lock_irqsave(&ctx->lock, flags);
if (ctx->irq_count != ctx->last_irq_count)
ret |= POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
unsigned long arg)
{
#ifdef CONFIG_COMPAT
void __user *a = (void __user *)arg;
#endif
switch (cmd) {
#ifdef CONFIG_COMPAT
case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
{
struct compat_ioctl_irq_map input;
struct usdpaa_ioctl_irq_map converted;
if (copy_from_user(&input, a, sizeof(input)))
return -EFAULT;
converted.type = input.type;
converted.fd = input.fd;
converted.portal_cinh = compat_ptr(input.portal_cinh);
return map_irq(fp, &converted);
}
#endif
default:
return usdpaa_irq_ioctl(fp, cmd, arg);
}
}
static const struct file_operations usdpaa_irq_fops = {
.open = usdpaa_irq_open,
.release = usdpaa_irq_release,
.unlocked_ioctl = usdpaa_irq_ioctl,
.compat_ioctl = usdpaa_irq_ioctl_compat,
.read = usdpaa_irq_read,
.poll = usdpaa_irq_poll
};
static struct miscdevice usdpaa_miscdev = {
.name = "fsl-usdpaa-irq",
.fops = &usdpaa_irq_fops,
.minor = MISC_DYNAMIC_MINOR,
};
static int __init usdpaa_irq_init(void)
{
int ret;
pr_info("Freescale USDPAA process IRQ driver\n");
ret = misc_register(&usdpaa_miscdev);
if (ret)
pr_err("fsl-usdpaa-irq: failed to register misc device\n");
return ret;
}
static void __exit usdpaa_irq_exit(void)
{
misc_deregister(&usdpaa_miscdev);
}
module_init(usdpaa_irq_init);
module_exit(usdpaa_irq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale Semiconductor");
MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");

View File

@ -0,0 +1,88 @@
/* Copyright 2013 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/time.h>
#include "qman_private.h"
#include "bman_private.h"
__init void qman_init_early(void);
__init void bman_init_early(void);
static __init int qbman_init(void)
{
struct device_node *dn;
u32 is_portal_available;
bman_init();
qman_init();
is_portal_available = 0;
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
if (!of_device_is_available(dn))
continue;
else
is_portal_available = 1;
}
if (!qman_have_ccsr() && is_portal_available) {
struct qman_fq fq = {
.fqid = 1
};
struct qm_mcr_queryfq_np np;
int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
struct timespec nowts, diffts, startts = current_kernel_time();
/* Loop while querying given fqid succeeds or time out */
while (1) {
err = qman_query_fq_np(&fq, &np);
if (!err) {
/* success, control-plane has configured QMan */
break;
} else if (err != -ERANGE) {
pr_err("QMan: I/O error, continuing anyway\n");
break;
}
nowts = current_kernel_time();
diffts = timespec_sub(nowts, startts);
if (diffts.tv_sec > 0) {
if (!retry--) {
pr_err("QMan: time out, control-plane"
" dead?\n");
break;
}
pr_warn("QMan: polling for the control-plane"
" (%d)\n", retry);
}
}
}
bman_resource_init();
qman_resource_init();
return 0;
}
subsys_initcall(qbman_init);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,961 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman_private.h"
#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpu.h>
#endif
/* Global variable containing revision id (even on non-control plane systems
* where CCSR isn't available) */
u16 qman_ip_rev;
EXPORT_SYMBOL(qman_ip_rev);
u8 qman_ip_cfg;
EXPORT_SYMBOL(qman_ip_cfg);
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
EXPORT_SYMBOL(qm_channel_pool1);
u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
EXPORT_SYMBOL(qm_channel_caam);
u16 qm_channel_pme = QMAN_CHANNEL_PME;
EXPORT_SYMBOL(qm_channel_pme);
u16 qm_channel_dce = QMAN_CHANNEL_DCE;
EXPORT_SYMBOL(qm_channel_dce);
u16 qman_portal_max;
EXPORT_SYMBOL(qman_portal_max);
u32 qman_clk;
struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
/* the qman ceetm instances on the given SoC */
u8 num_ceetms;
/* For these variables, and the portal-initialisation logic, the
* comments in bman_driver.c apply here so won't be repeated. */
static struct qman_portal *shared_portals[NR_CPUS];
static int num_shared_portals;
static int shared_portals_idx;
static LIST_HEAD(unused_pcfgs);
static DEFINE_SPINLOCK(unused_pcfgs_lock);
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 pools_sdqcr;
#define STR_ERR_NOPROP "No '%s' property in node %s\n"
#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
#define STR_FQID_RANGE "fsl,fqid-range"
#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
#define STR_CGRID_RANGE "fsl,cgrid-range"
/* A "fsl,fqid-range" node; release the given range to the allocator */
static __init int fsl_fqid_range_init(struct device_node *node)
{
int ret;
const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
if (!range) {
pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
return -EINVAL;
}
qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
pr_info("Qman: FQID allocator includes range %d:%d\n",
be32_to_cpu(range[0]), be32_to_cpu(range[1]));
return 0;
}
/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
{
int ret;
const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
if (!chanid) {
pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
return -EINVAL;
}
for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
return 0;
}
/* A "fsl,pool-channel-range" node; release the given range to the allocator */
static __init int fsl_pool_channel_range_init(struct device_node *node)
{
int ret;
const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
if (!chanid) {
pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
return -EINVAL;
}
qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
pr_info("Qman: pool channel allocator includes range %d:%d\n",
be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
return 0;
}
/* A "fsl,cgrid-range" node; release the given range to the allocator */
static __init int fsl_cgrid_range_init(struct device_node *node)
{
struct qman_cgr cgr;
int ret, errors = 0;
const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
if (!range) {
pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
return -EINVAL;
}
qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
pr_info("Qman: CGRID allocator includes range %d:%d\n",
be32_to_cpu(range[0]), be32_to_cpu(range[1]));
for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
if (ret)
errors++;
}
if (errors)
pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
errors, (errors > 1) ? "s" : "", range[0], range[1]);
return 0;
}
static __init int fsl_ceetm_init(struct device_node *node)
{
enum qm_dc_portal dcp_portal;
struct qm_ceetm_sp *sp;
struct qm_ceetm_lni *lni;
int ret, i;
const u32 *range;
/* Find LFQID range */
range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
if (!range) {
pr_err("No fsl,ceetm-lfqid-range in node %s\n",
node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
" %s\n", node->full_name);
return -EINVAL;
}
dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
if (dcp_portal > qm_dc_portal_fman1) {
pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
return -EINVAL;
}
if (dcp_portal == qm_dc_portal_fman0)
qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
if (dcp_portal == qm_dc_portal_fman1)
qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
" 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
qman_ceetms[dcp_portal].idx = dcp_portal;
INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
/* Find Sub-portal range */
range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
if (!range) {
pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
node->full_name);
return -EINVAL;
}
for (i = 0; i < be32_to_cpu(range[1]); i++) {
sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (!sp) {
pr_err("Can't alloc memory for sub-portal %d\n",
range[0] + i);
return -ENOMEM;
}
sp->idx = be32_to_cpu(range[0]) + i;
sp->dcp_idx = dcp_portal;
sp->is_claimed = 0;
list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
sp++;
}
pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
/* Find LNI range */
range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
if (!range) {
pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
node->full_name);
return -EINVAL;
}
for (i = 0; i < be32_to_cpu(range[1]); i++) {
lni = kzalloc(sizeof(*lni), GFP_KERNEL);
if (!lni) {
pr_err("Can't alloc memory for LNI %d\n",
range[0] + i);
return -ENOMEM;
}
lni->idx = be32_to_cpu(range[0]) + i;
lni->dcp_idx = dcp_portal;
lni->is_claimed = 0;
INIT_LIST_HEAD(&lni->channels);
list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
lni++;
}
pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
/* Find CEETM channel range */
range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
if (!range) {
pr_err("No fsl,ceetm-channel-range in node %s\n",
node->full_name);
return -EINVAL;
}
if (ret != 8) {
pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
"%s\n", node->full_name);
return -EINVAL;
}
if (dcp_portal == qm_dc_portal_fman0)
qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
if (dcp_portal == qm_dc_portal_fman1)
qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
pr_debug("Qman: The channel allocator of CEETM %d includes"
" range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
/* Set CEETM PRES register */
ret = qman_ceetm_set_prescaler(dcp_portal);
if (ret)
return ret;
return 0;
}
static void qman_get_ip_revision(struct device_node *dn)
{
u16 ip_rev = 0;
u8 ip_cfg = QMAN_REV_CFG_0;
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
if (!of_device_is_available(dn))
continue;
if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
BUG_ON(1);
} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
ip_rev = QMAN_REV11;
qman_portal_max = 10;
} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
ip_rev = QMAN_REV12;
qman_portal_max = 10;
} else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
ip_rev = QMAN_REV20;
qman_portal_max = 3;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.0.0")) {
ip_rev = QMAN_REV30;
qman_portal_max = 50;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.0.1")) {
ip_rev = QMAN_REV30;
qman_portal_max = 25;
ip_cfg = QMAN_REV_CFG_1;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.1.0")) {
ip_rev = QMAN_REV31;
qman_portal_max = 50;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.1.1")) {
ip_rev = QMAN_REV31;
qman_portal_max = 25;
ip_cfg = QMAN_REV_CFG_1;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.1.2")) {
ip_rev = QMAN_REV31;
qman_portal_max = 18;
ip_cfg = QMAN_REV_CFG_2;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.1.3")) {
ip_rev = QMAN_REV31;
qman_portal_max = 10;
ip_cfg = QMAN_REV_CFG_3;
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.2.0")) {
ip_rev = QMAN_REV32;
qman_portal_max = 10;
ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
} else if (of_device_is_compatible(dn,
"fsl,qman-portal-3.2.1")) {
ip_rev = QMAN_REV32;
qman_portal_max = 10;
ip_cfg = QMAN_REV_CFG_3;
} else {
pr_warn("unknown QMan version in portal node,"
"default to rev1.1\n");
ip_rev = QMAN_REV11;
qman_portal_max = 10;
}
if (!qman_ip_rev) {
if (ip_rev) {
qman_ip_rev = ip_rev;
qman_ip_cfg = ip_cfg;
} else {
pr_warn("unknown Qman version,"
" default to rev1.1\n");
qman_ip_rev = QMAN_REV11;
qman_ip_cfg = QMAN_REV_CFG_0;
}
} else if (ip_rev && (qman_ip_rev != ip_rev))
pr_warn("Revision=0x%04x, but portal '%s' has"
" 0x%04x\n",
qman_ip_rev, dn->full_name, ip_rev);
if (qman_ip_rev == ip_rev)
break;
}
}
/* Parse a portal node, perform generic mapping duties and return the config. It
* is not known at this stage for what purpose (or even if) the portal will be
* used. */
static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
{
struct qm_portal_config *pcfg;
const u32 *index_p;
u32 index, channel;
int irq, ret;
resource_size_t len;
pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) {
pr_err("can't allocate portal config");
return NULL;
}
/*
* This is a *horrible hack*, but the IOMMU/PAMU driver needs a
* 'struct device' in order to get the PAMU stashing setup and the QMan
* portal [driver] won't function at all without ring stashing
*
* Making the QMan portal driver nice and proper is part of the
* upstreaming effort
*/
pcfg->dev.bus = &platform_bus_type;
pcfg->dev.of_node = node;
#ifdef CONFIG_FSL_PAMU
pcfg->dev.archdata.iommu_domain = NULL;
#endif
ret = of_address_to_resource(node, DPA_PORTAL_CE,
&pcfg->addr_phys[DPA_PORTAL_CE]);
if (ret) {
pr_err("Can't get %s property '%s'\n", node->full_name,
"reg::CE");
goto err;
}
ret = of_address_to_resource(node, DPA_PORTAL_CI,
&pcfg->addr_phys[DPA_PORTAL_CI]);
if (ret) {
pr_err("Can't get %s property '%s'\n", node->full_name,
"reg::CI");
goto err;
}
index_p = of_get_property(node, "cell-index", &ret);
if (!index_p || (ret != 4)) {
pr_err("Can't get %s property '%s'\n", node->full_name,
"cell-index");
goto err;
}
index = be32_to_cpu(*index_p);
if (index >= qman_portal_max) {
pr_err("QMan portal index %d is beyond max (%d)\n",
index, qman_portal_max);
goto err;
}
channel = index + QM_CHANNEL_SWPORTAL0;
pcfg->public_cfg.channel = channel;
pcfg->public_cfg.cpu = -1;
irq = irq_of_parse_and_map(node, 0);
if (irq == 0) {
pr_err("Can't get %s property '%s'\n", node->full_name,
"interrupts");
goto err;
}
pcfg->public_cfg.irq = irq;
pcfg->public_cfg.index = index;
#ifdef CONFIG_FSL_QMAN_CONFIG
/* We need the same LIODN offset for all portals */
qman_liodn_fixup(pcfg->public_cfg.channel);
#endif
len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
if (len != (unsigned long)len)
goto err;
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
pcfg->addr_phys[DPA_PORTAL_CE].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
pcfg->addr_phys[DPA_PORTAL_CI].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
#else
pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
pcfg->addr_phys[DPA_PORTAL_CE].start,
(unsigned long)len,
0);
pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
pcfg->addr_phys[DPA_PORTAL_CI].start,
resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
#endif
return pcfg;
err:
kfree(pcfg);
return NULL;
}
static struct qm_portal_config *get_pcfg(struct list_head *list)
{
struct qm_portal_config *pcfg;
if (list_empty(list))
return NULL;
pcfg = list_entry(list->prev, struct qm_portal_config, list);
list_del(&pcfg->list);
return pcfg;
}
static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
{
struct qm_portal_config *pcfg;
if (list_empty(list))
return NULL;
list_for_each_entry(pcfg, list, list) {
if (pcfg->public_cfg.index == idx) {
list_del(&pcfg->list);
return pcfg;
}
}
return NULL;
}
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
#ifdef CONFIG_FSL_PAMU
int ret;
int window_count = 1;
struct iommu_domain_geometry geom_attr;
struct pamu_stash_attribute stash_attr;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
if (!pcfg->iommu_domain) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
__func__);
goto _no_iommu;
}
geom_attr.aperture_start = 0;
geom_attr.aperture_end =
((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
geom_attr.force_aperture = true;
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
&geom_attr);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
__func__, ret);
goto _iommu_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
&window_count);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
__func__, ret);
goto _iommu_domain_free;
}
stash_attr.cpu = cpu;
stash_attr.cache = PAMU_ATTR_CACHE_L1;
/* set stash information for the window */
stash_attr.window = 0;
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_STASH,
&stash_attr);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
__func__, ret);
goto _iommu_domain_free;
}
ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
__func__, ret);
goto _iommu_domain_free;
}
ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
__func__, ret);
goto _iommu_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
&window_count);
if (ret < 0) {
pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
__func__, ret);
goto _iommu_detach_device;
}
_no_iommu:
#endif
#ifdef CONFIG_FSL_QMAN_CONFIG
if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
#endif
pr_warn("Failed to set QMan portal's stash request queue\n");
return;
#ifdef CONFIG_FSL_PAMU
_iommu_detach_device:
iommu_detach_device(pcfg->iommu_domain, NULL);
_iommu_domain_free:
iommu_domain_free(pcfg->iommu_domain);
#endif
}
struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
{
struct qm_portal_config *ret;
spin_lock(&unused_pcfgs_lock);
if (idx == QBMAN_ANY_PORTAL_IDX)
ret = get_pcfg(&unused_pcfgs);
else
ret = get_pcfg_idx(&unused_pcfgs, idx);
spin_unlock(&unused_pcfgs_lock);
/* Bind stashing LIODNs to the CPU we are currently executing on, and
* set the portal to use the stashing request queue corresonding to the
* cpu as well. The user-space driver assumption is that the pthread has
* to already be affine to one cpu only before opening a portal. If that
* check is circumvented, the only risk is a performance degradation -
* stashing will go to whatever cpu they happened to be running on when
* opening the device file, and if that isn't the cpu they subsequently
* bind to and do their polling on, tough. */
if (ret)
portal_set_cpu(ret, hard_smp_processor_id());
return ret;
}
struct qm_portal_config *qm_get_unused_portal(void)
{
return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
}
void qm_put_unused_portal(struct qm_portal_config *pcfg)
{
spin_lock(&unused_pcfgs_lock);
list_add(&pcfg->list, &unused_pcfgs);
spin_unlock(&unused_pcfgs_lock);
}
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
{
struct qman_portal *p;
pcfg->iommu_domain = NULL;
portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
p = qman_create_affine_portal(pcfg, NULL);
if (p) {
u32 irq_sources = 0;
/* Determine what should be interrupt-vs-poll driven */
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
#endif
#ifdef CONFIG_FSL_DPA_PIRQ_FAST
irq_sources |= QM_PIRQ_DQRI;
#endif
qman_p_irqsource_add(p, irq_sources);
pr_info("Qman portal %sinitialised, cpu %d\n",
pcfg->public_cfg.is_shared ? "(shared) " : "",
pcfg->public_cfg.cpu);
} else
pr_crit("Qman portal failure on cpu %d\n",
pcfg->public_cfg.cpu);
return p;
}
static void init_slave(int cpu)
{
struct qman_portal *p;
struct cpumask oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
if (!p)
pr_err("Qman slave portal failure on cpu %d\n", cpu);
else
pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
set_cpus_allowed_ptr(current, &oldmask);
if (shared_portals_idx >= num_shared_portals)
shared_portals_idx = 0;
}
static struct cpumask want_unshared __initdata;
static struct cpumask want_shared __initdata;
static int __init parse_qportals(char *str)
{
return parse_portals_bootarg(str, &want_shared, &want_unshared,
"qportals");
}
__setup("qportals=", parse_qportals);
static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
unsigned int cpu)
{
#ifdef CONFIG_FSL_PAMU
struct pamu_stash_attribute stash_attr;
int ret;
if (pcfg->iommu_domain) {
stash_attr.cpu = cpu;
stash_attr.cache = PAMU_ATTR_CACHE_L1;
/* set stash information for the window */
stash_attr.window = 0;
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
if (ret < 0) {
pr_err("Failed to update pamu stash setting\n");
return;
}
}
#endif
#ifdef CONFIG_FSL_QMAN_CONFIG
if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
pr_warn("Failed to update portal's stash request queue\n");
#endif
}
static int qman_offline_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
p = (struct qman_portal *)affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
qman_portal_update_sdest(pcfg, 0);
}
}
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static int qman_online_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
p = (struct qman_portal *)affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
qman_portal_update_sdest(pcfg, cpu);
}
}
return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
__init int qman_init(void)
{
struct cpumask slave_cpus;
struct cpumask unshared_cpus = *cpu_none_mask;
struct cpumask shared_cpus = *cpu_none_mask;
LIST_HEAD(unshared_pcfgs);
LIST_HEAD(shared_pcfgs);
struct device_node *dn;
struct qm_portal_config *pcfg;
struct qman_portal *p;
int cpu, ret;
const u32 *clk;
struct cpumask offline_cpus;
/* Initialise the Qman (CCSR) device */
for_each_compatible_node(dn, NULL, "fsl,qman") {
if (!qman_init_ccsr(dn))
pr_info("Qman err interrupt handler present\n");
else
pr_err("Qman CCSR setup failed\n");
clk = of_get_property(dn, "clock-frequency", NULL);
if (!clk)
pr_warn("Can't find Qman clock frequency\n");
else
qman_clk = be32_to_cpu(*clk);
}
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
/* Setup lookup table for FQ demux */
ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
if (ret)
return ret;
#endif
/* Get qman ip revision */
qman_get_ip_revision(dn);
if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
qm_channel_pme = QMAN_CHANNEL_PME_REV3;
}
if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
/*
* Parse the ceetm node to get how many ceetm instances are supported
* on the current silicon. num_ceetms must be confirmed before portals
* are intiailized.
*/
num_ceetms = 0;
for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
num_ceetms++;
/* Parse pool channels into the SDQCR mask. (Must happen before portals
* are initialised.) */
for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
ret = fsl_pool_channel_range_sdqcr(dn);
if (ret)
return ret;
}
memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
/* Initialise portals. See bman_driver.c for comments */
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
if (!of_device_is_available(dn))
continue;
pcfg = parse_pcfg(dn);
if (pcfg) {
pcfg->public_cfg.pools = pools_sdqcr;
list_add_tail(&pcfg->list, &unused_pcfgs);
}
}
for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, &want_shared)) {
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &shared_pcfgs);
cpumask_set_cpu(cpu, &shared_cpus);
}
if (cpumask_test_cpu(cpu, &want_unshared)) {
if (cpumask_test_cpu(cpu, &shared_cpus))
continue;
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &unshared_pcfgs);
cpumask_set_cpu(cpu, &unshared_cpus);
}
}
if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
for_each_online_cpu(cpu) {
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
pcfg->public_cfg.cpu = cpu;
list_add_tail(&pcfg->list, &unshared_pcfgs);
cpumask_set_cpu(cpu, &unshared_cpus);
}
}
cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
if (cpumask_empty(&slave_cpus)) {
if (!list_empty(&shared_pcfgs)) {
cpumask_or(&unshared_cpus, &unshared_cpus,
&shared_cpus);
cpumask_clear(&shared_cpus);
list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
INIT_LIST_HEAD(&shared_pcfgs);
}
} else {
if (list_empty(&shared_pcfgs)) {
pcfg = get_pcfg(&unshared_pcfgs);
if (!pcfg) {
pr_crit("No QMan portals available!\n");
return 0;
}
cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
list_add_tail(&pcfg->list, &shared_pcfgs);
}
}
list_for_each_entry(pcfg, &unshared_pcfgs, list) {
pcfg->public_cfg.is_shared = 0;
p = init_pcfg(pcfg);
if (!p) {
pr_crit("Unable to configure portals\n");
return 0;
}
}
list_for_each_entry(pcfg, &shared_pcfgs, list) {
pcfg->public_cfg.is_shared = 1;
p = init_pcfg(pcfg);
if (p)
shared_portals[num_shared_portals++] = p;
}
if (!cpumask_empty(&slave_cpus))
for_each_cpu(cpu, &slave_cpus)
init_slave(cpu);
pr_info("Qman portals initialised\n");
cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
for_each_cpu(cpu, &offline_cpus)
qman_offline_cpu(cpu);
#ifdef CONFIG_HOTPLUG_CPU
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"soc/qman_portal:online",
qman_online_cpu, qman_offline_cpu);
if (ret < 0) {
pr_err("qman: failed to register hotplug callbacks.\n");
return ret;
}
#endif
return 0;
}
__init int qman_resource_init(void)
{
struct device_node *dn;
int ret;
/* Initialise FQID allocation ranges */
for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
ret = fsl_fqid_range_init(dn);
if (ret)
return ret;
}
/* Initialise CGRID allocation ranges */
for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
ret = fsl_cgrid_range_init(dn);
if (ret)
return ret;
}
/* Parse pool channels into the allocator. (Must happen after portals
* are initialised.) */
for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
ret = fsl_pool_channel_range_init(dn);
if (ret)
return ret;
}
/* Parse CEETM */
for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
ret = fsl_ceetm_init(dn);
if (ret)
return ret;
}
return 0;
}
#ifdef CONFIG_SUSPEND
void suspend_unused_qportal(void)
{
struct qm_portal_config *pcfg;
if (list_empty(&unused_pcfgs))
return;
list_for_each_entry(pcfg, &unused_pcfgs, list) {
#ifdef CONFIG_PM_DEBUG
pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
#endif
/* save isdr, disable all via isdr, clear isr */
pcfg->saved_isdr =
__raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
__raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
0xe08);
__raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
0xe00);
}
return;
}
void resume_unused_qportal(void)
{
struct qm_portal_config *pcfg;
if (list_empty(&unused_pcfgs))
return;
list_for_each_entry(pcfg, &unused_pcfgs, list) {
#ifdef CONFIG_PM_DEBUG
pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
#endif
/* restore isdr */
__raw_writel(pcfg->saved_isdr,
pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
}
return;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,398 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dpa_sys.h"
#include <linux/fsl_qman.h>
#include <linux/iommu.h>
#if defined(CONFIG_FSL_PAMU)
#include <asm/fsl_pamu_stash.h>
#endif
#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
#endif
#define QBMAN_ANY_PORTAL_IDX 0xffffffff
/* ----------------- */
/* Congestion Groups */
/* ----------------- */
/* This wrapper represents a bit-array for the state of the 256 Qman congestion
* groups. Is also used as a *mask* for congestion groups, eg. so we ignore
* those that don't concern us. We harness the structure and accessor details
* already used in the management command to query congestion groups. */
struct qman_cgrs {
struct __qm_mcr_querycongestion q;
};
static inline void qman_cgrs_init(struct qman_cgrs *c)
{
memset(c, 0, sizeof(*c));
}
static inline void qman_cgrs_fill(struct qman_cgrs *c)
{
memset(c, 0xff, sizeof(*c));
}
static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
{
return QM_MCR_QUERYCONGESTION(&c->q, num);
}
static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
{
c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
}
static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
{
c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
}
static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
{
while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
;
return num;
}
static inline void qman_cgrs_cp(struct qman_cgrs *dest,
const struct qman_cgrs *src)
{
*dest = *src;
}
static inline void qman_cgrs_and(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
u32 *_d = dest->q.__state;
const u32 *_a = a->q.__state;
const u32 *_b = b->q.__state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) & *(_b++);
}
static inline void qman_cgrs_xor(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
u32 *_d = dest->q.__state;
const u32 *_a = a->q.__state;
const u32 *_b = b->q.__state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) ^ *(_b++);
}
/* ----------------------- */
/* CEETM Congestion Groups */
/* ----------------------- */
/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
* congestion groups.
*/
struct qman_ccgrs {
struct __qm_mcr_querycongestion q[2];
};
static inline void qman_ccgrs_init(struct qman_ccgrs *c)
{
memset(c, 0, sizeof(*c));
}
static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
{
memset(c, 0xff, sizeof(*c));
}
static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
{
if (num < __CGR_NUM)
return QM_MCR_QUERYCONGESTION(&c->q[0], num);
else
return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
}
static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
{
while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
;
return num;
}
static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
const struct qman_ccgrs *src)
{
*dest = *src;
}
static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
const struct qman_ccgrs *a, const struct qman_ccgrs *b)
{
int ret, i;
u32 *_d;
const u32 *_a, *_b;
for (i = 0; i < 2; i++) {
_d = dest->q[i].__state;
_a = a->q[i].__state;
_b = b->q[i].__state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) & *(_b++);
}
}
static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
const struct qman_ccgrs *a, const struct qman_ccgrs *b)
{
int ret, i;
u32 *_d;
const u32 *_a, *_b;
for (i = 0; i < 2; i++) {
_d = dest->q[i].__state;
_a = a->q[i].__state;
_b = b->q[i].__state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) ^ *(_b++);
}
}
/* used by CCSR and portal interrupt code */
enum qm_isr_reg {
qm_isr_status = 0,
qm_isr_enable = 1,
qm_isr_disable = 2,
qm_isr_inhibit = 3
};
struct qm_portal_config {
/* Corenet portal addresses;
* [0]==cache-enabled, [1]==cache-inhibited. */
__iomem void *addr_virt[2];
struct resource addr_phys[2];
struct device dev;
struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */
struct list_head list;
/* User-visible portal configuration settings */
struct qman_portal_config public_cfg;
/* power management saved data */
u32 saved_isdr;
};
/* Revision info (for errata and feature handling) */
#define QMAN_REV11 0x0101
#define QMAN_REV12 0x0102
#define QMAN_REV20 0x0200
#define QMAN_REV30 0x0300
#define QMAN_REV31 0x0301
#define QMAN_REV32 0x0302
/* QMan REV_2 register contains the Cfg option */
#define QMAN_REV_CFG_0 0x0
#define QMAN_REV_CFG_1 0x1
#define QMAN_REV_CFG_2 0x2
#define QMAN_REV_CFG_3 0x3
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
extern u8 qman_ip_cfg;
extern u32 qman_clk;
extern u16 qman_portal_max;
#ifdef CONFIG_FSL_QMAN_CONFIG
/* Hooks from qman_driver.c to qman_config.c */
int qman_init_ccsr(struct device_node *node);
void qman_liodn_fixup(u16 channel);
int qman_set_sdest(u16 channel, unsigned int cpu_idx);
size_t get_qman_fqd_size(void);
#else
static inline size_t get_qman_fqd_size(void)
{
return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
}
#endif
int qm_set_wpm(int wpm);
int qm_get_wpm(int *wpm);
/* Hooks from qman_driver.c in to qman_high.c */
struct qman_portal *qman_create_portal(
struct qman_portal *portal,
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
int cpu);
const struct qm_portal_config *qman_destroy_affine_portal(void);
void qman_destroy_portal(struct qman_portal *qm);
/* Hooks from fsl_usdpaa.c to qman_driver.c */
struct qm_portal_config *qm_get_unused_portal(void);
struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
void qm_put_unused_portal(struct qm_portal_config *pcfg);
void qm_set_liodns(struct qm_portal_config *pcfg);
/* This CGR feature is supported by h/w and required by unit-tests and the
* debugfs hooks, so is implemented in the driver. However it allows an explicit
* corruption of h/w fields by s/w that are usually incorruptible (because the
* counters are usually maintained entirely within h/w). As such, we declare
* this API internally. */
int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
struct qm_mcr_cgrtestwrite *result);
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
/* If the fq object pointer is greater than the size of context_b field,
* than a lookup table is required. */
int qman_setup_fq_lookup_table(size_t num_entries);
#endif
/*************************************************/
/* QMan s/w corenet portal, low-level i/face */
/*************************************************/
/* Note: most functions are only used by the high-level interface, so are
* inlined from qman_low.h. The stuff below is for use by other parts of the
* driver. */
/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
* dequeue TYPE. Choose TOKEN (8-bit).
* If SOURCE == CHANNELS,
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
* You can choose DEDICATED_PRECEDENCE if the portal channel should have
* priority.
* If SOURCE == SPECIFICWQ,
* Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
* channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
* work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
* same value.
*/
#define QM_SDQCR_SOURCE_CHANNELS 0x0
#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
#define QM_SDQCR_COUNT_EXACT1 0x0
#define QM_SDQCR_COUNT_UPTO3 0x20000000
#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
#define QM_SDQCR_TYPE_MASK 0x03000000
#define QM_SDQCR_TYPE_NULL 0x0
#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
#define QM_SDQCR_TYPE_ACTIVE 0x03000000
#define QM_SDQCR_TOKEN_MASK 0x00ff0000
#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
#define QM_VDQCR_FQID_MASK 0x00ffffff
#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
* If MODE==SCHEDULED
* Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
* If CHANNELS,
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
* You can choose DEDICATED_PRECEDENCE if the portal channel should have
* priority.
* If SPECIFICWQ,
* Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
* channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
* work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
* same value.
* If MODE==UNSCHEDULED
* Choose FQID().
*/
#define QM_PDQCR_MODE_SCHEDULED 0x0
#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
#define QM_PDQCR_COUNT_EXACT1 0x0
#define QM_PDQCR_COUNT_UPTO3 0x20000000
#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
#define QM_PDQCR_TYPE_MASK 0x03000000
#define QM_PDQCR_TYPE_NULL 0x0
#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
#define QM_PDQCR_TYPE_ACTIVE 0x03000000
#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
/* Used by all portal interrupt registers except 'inhibit'
* Channels with frame availability
*/
#define QM_PIRQ_DQAVAIL 0x0000ffff
/* The DQAVAIL interrupt fields break down into these bits; */
#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
#define QM_DQAVAIL_MASK 0xffff
/* This mask contains all the "irqsource" bits visible to API users */
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
* the disable register" rather than "disable the ability to write". */
#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
/* TODO: unfortunate name-clash here, reword? */
#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
#ifdef CONFIG_FSL_QMAN_CONFIG
int qman_have_ccsr(void);
#else
#define qman_have_ccsr 0
#endif
__init int qman_init(void);
__init int qman_resource_init(void);
/* CEETM related */
#define QMAN_CEETM_MAX 2
extern u8 num_ceetms;
extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
int qman_ceetm_get_prescaler(u16 *pres);
int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
struct qm_mcr_ceetm_cq_query *cq_query);
int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
struct qm_mcr_ceetm_ccgr_query *response);
int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
extern void *affine_portals[NR_CPUS];
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
/* power management */
#ifdef CONFIG_SUSPEND
void suspend_unused_qportal(void);
void resume_unused_qportal(void);
#endif

View File

@ -0,0 +1,57 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman_test.h"
MODULE_AUTHOR("Geoff Thorpe");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Qman testing");
static int test_init(void)
{
int loop = 1;
while (loop--) {
#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
qman_test_hotpotato();
#endif
#ifdef CONFIG_FSL_QMAN_TEST_HIGH
qman_test_high();
#endif
}
return 0;
}
static void test_exit(void)
{
}
module_init(test_init);
module_exit(test_exit);

View File

@ -0,0 +1,45 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/fsl_qman.h>
void qman_test_hotpotato(void);
void qman_test_high(void);

View File

@ -0,0 +1,216 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman_test.h"
/*************/
/* constants */
/*************/
#define CGR_ID 27
#define POOL_ID 2
#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
#define NUM_ENQUEUES 10
#define NUM_PARTIAL 4
#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
QM_SDQCR_TYPE_PRIO_QOS | \
QM_SDQCR_TOKEN_SET(0x98) | \
QM_SDQCR_CHANNELS_DEDICATED | \
QM_SDQCR_CHANNELS_POOL(POOL_ID))
#define PORTAL_OPAQUE ((void *)0xf00dbeef)
#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
/*************************************/
/* Predeclarations (eg. for fq_base) */
/*************************************/
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *,
const struct qm_dqrr_entry *);
static void cb_ern(struct qman_portal *, struct qman_fq *,
const struct qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *,
const struct qm_mr_entry *);
/***************/
/* global vars */
/***************/
static struct qm_fd fd, fd_dq;
static struct qman_fq fq_base = {
.cb.dqrr = cb_dqrr,
.cb.ern = cb_ern,
.cb.fqs = cb_fqs
};
static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
static int retire_complete, sdqcr_complete;
/**********************/
/* internal functions */
/**********************/
/* Helpers for initialising and "incrementing" a frame descriptor */
static void fd_init(struct qm_fd *__fd)
{
qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
__fd->format = qm_fd_contig_big;
__fd->length29 = 0x0000ffff;
__fd->cmd = 0xfeedf00d;
}
static void fd_inc(struct qm_fd *__fd)
{
u64 t = qm_fd_addr_get64(__fd);
int z = t >> 40;
t <<= 1;
if (z)
t |= 1;
qm_fd_addr_set64(__fd, t);
__fd->length29--;
__fd->cmd++;
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
{
int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
if (!r)
r = a->format - b->format;
if (!r)
r = a->opaque - b->opaque;
if (!r)
r = a->cmd - b->cmd;
return r;
}
/********/
/* test */
/********/
static void do_enqueues(struct qman_fq *fq)
{
unsigned int loop;
for (loop = 0; loop < NUM_ENQUEUES; loop++) {
if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
(((loop + 1) == NUM_ENQUEUES) ?
QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
panic("qman_enqueue() failed\n");
fd_inc(&fd);
}
}
void qman_test_high(void)
{
unsigned int flags;
int res;
struct qman_fq *fq = &fq_base;
pr_info("qman_test_high starting\n");
fd_init(&fd);
fd_init(&fd_dq);
/* Initialise (parked) FQ */
if (qman_create_fq(0, FQ_FLAGS, fq))
panic("qman_create_fq() failed\n");
if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
panic("qman_init_fq() failed\n");
/* Do enqueues + VDQCR, twice. (Parked FQ) */
do_enqueues(fq);
pr_info("VDQCR (till-empty);\n");
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
QM_VDQCR_NUMFRAMES_TILLEMPTY))
panic("qman_volatile_dequeue() failed\n");
do_enqueues(fq);
pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
panic("qman_volatile_dequeue() failed\n");
pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
NUM_ENQUEUES);
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
panic("qman_volatile_dequeue() failed\n");
do_enqueues(fq);
pr_info("scheduled dequeue (till-empty)\n");
if (qman_schedule_fq(fq))
panic("qman_schedule_fq() failed\n");
wait_event(waitqueue, sdqcr_complete);
/* Retire and OOS the FQ */
res = qman_retire_fq(fq, &flags);
if (res < 0)
panic("qman_retire_fq() failed\n");
wait_event(waitqueue, retire_complete);
if (flags & QMAN_FQ_STATE_BLOCKOOS)
panic("leaking frames\n");
if (qman_oos_fq(fq))
panic("qman_oos_fq() failed\n");
qman_destroy_fq(fq, 0);
pr_info("qman_test_high finished\n");
}
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
if (fd_cmp(&fd_dq, &dq->fd)) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
pr_err("Expected 0x%llx, got 0x%llx\n",
(unsigned long long)fd_dq.length29,
(unsigned long long)dq->fd.length29);
BUG();
}
fd_inc(&fd_dq);
if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
return qman_cb_dqrr_consume;
}
static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
panic("cb_ern() unimplemented");
}
static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
panic("unexpected FQS message");
pr_info("Retirement message received\n");
retire_complete = 1;
wake_up(&waitqueue);
}

View File

@ -0,0 +1,502 @@
/* Copyright 2009-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kthread.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include "qman_test.h"
/* Algorithm:
*
* Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
* an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
* organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
* shuttle a "hot potato" frame around them such that every forwarding action
* moves it from one cpu to another. (The use of more than one handler per cpu
* is to allow enough handlers/FQs to truly test the significance of caching -
* ie. when cache-expiries are occurring.)
*
* The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
* first and last words of the frame data will undergo a transformation step on
* each forwarding action. To achieve this, each handler will be assigned a
* 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
* received by a handler, the mixer of the expected sender is XOR'd into all
* words of the entire frame, which is then validated against the original
* values. Then, before forwarding, the entire frame is XOR'd with the mixer of
* the current handler. Apart from validating that the frame is taking the
* expected path, this also provides some quasi-realistic overheads to each
* forwarding action - dereferencing *all* the frame data, computation, and
* conditional branching. There is a "special" handler designated to act as the
* instigator of the test by creating an enqueuing the "hot potato" frame, and
* to determine when the test has completed by counting HP_LOOPS iterations.
*
* Init phases:
*
* 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
* into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
* handlers and link-list them (but do no other handler setup).
*
* 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
* hp_cpu's 'iterator' to point to its first handler. With each loop,
* allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
* and advance the iterator for the next loop. This includes a final fixup,
* which connects the last handler to the first (and which is why phase 2
* and 3 are separate).
*
* 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
* hp_cpu's 'iterator' to point to its first handler. With each loop,
* initialise FQ objects and advance the iterator for the next loop.
* Moreover, do this initialisation on the cpu it applies to so that Rx FQ
* initialisation targets the correct cpu.
*/
/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
* the fn from irq context, which is too restrictive). */
struct bstrap {
void (*fn)(void);
atomic_t started;
};
static int bstrap_fn(void *__bstrap)
{
struct bstrap *bstrap = __bstrap;
atomic_inc(&bstrap->started);
bstrap->fn();
while (!kthread_should_stop())
msleep(1);
return 0;
}
static int on_all_cpus(void (*fn)(void))
{
int cpu;
for_each_cpu(cpu, cpu_online_mask) {
struct bstrap bstrap = {
.fn = fn,
.started = ATOMIC_INIT(0)
};
struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
"hotpotato%d", cpu);
int ret;
if (IS_ERR(k))
return -ENOMEM;
kthread_bind(k, cpu);
wake_up_process(k);
/* If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
* running the function. So poll until it's started before
* requesting it to stop. */
while (!atomic_read(&bstrap.started))
msleep(10);
ret = kthread_stop(k);
if (ret)
return ret;
}
return 0;
}
struct hp_handler {
/* The following data is stashed when 'rx' is dequeued; */
/* -------------- */
/* The Rx FQ, dequeues of which will stash the entire hp_handler */
struct qman_fq rx;
/* The Tx FQ we should forward to */
struct qman_fq tx;
/* The value we XOR post-dequeue, prior to validating */
u32 rx_mixer;
/* The value we XOR pre-enqueue, after validating */
u32 tx_mixer;
/* what the hotpotato address should be on dequeue */
dma_addr_t addr;
u32 *frame_ptr;
/* The following data isn't (necessarily) stashed on dequeue; */
/* -------------- */
u32 fqid_rx, fqid_tx;
/* list node for linking us into 'hp_cpu' */
struct list_head node;
/* Just to check ... */
unsigned int processor_id;
} ____cacheline_aligned;
struct hp_cpu {
/* identify the cpu we run on; */
unsigned int processor_id;
/* root node for the per-cpu list of handlers */
struct list_head handlers;
/* list node for linking us into 'hp_cpu_list' */
struct list_head node;
/* when repeatedly scanning 'hp_list', each time linking the n'th
* handlers together, this is used as per-cpu iterator state */
struct hp_handler *iterator;
};
/* Each cpu has one of these */
static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
static unsigned int hp_cpu_list_length;
/* the "special" handler, that starts and terminates the test. */
static struct hp_handler *special_handler;
static int loop_counter;
/* handlers are allocated out of this, so they're properly aligned. */
static struct kmem_cache *hp_handler_slab;
/* this is the frame data */
static void *__frame_ptr;
static u32 *frame_ptr;
static dma_addr_t frame_dma;
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
#define HP_PER_CPU 2
#define HP_LOOPS 8
/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
#define HP_NUM_WORDS 80
/* First word of the LFSR-based frame data */
#define HP_FIRST_WORD 0xabbaf00d
static inline u32 do_lfsr(u32 prev)
{
return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
}
static void allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
struct platform_device *pdev = platform_device_alloc("foobar", -1);
if (!pdev)
panic("platform_device_alloc() failed");
if (platform_device_add(pdev))
panic("platform_device_add() failed");
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
panic("kmalloc() failed");
frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
~(unsigned long)63);
for (loop = 0; loop < HP_NUM_WORDS; loop++) {
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
platform_device_del(pdev);
platform_device_put(pdev);
}
static void deallocate_frame_data(void)
{
kfree(__frame_ptr);
}
static inline void process_frame_data(struct hp_handler *handler,
const struct qm_fd *fd)
{
u32 *p = handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
int loop;
if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) {
pr_err("Got 0x%llx expected 0x%llx\n",
qm_fd_addr_get64(fd), handler->addr);
panic("bad frame address");
}
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
*p ^= handler->rx_mixer;
if (*p != lfsr)
panic("corrupt frame data");
*p ^= handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
}
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
process_frame_data(handler, &dqrr->fd);
if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
panic("qman_enqueue() failed");
return qman_cb_dqrr_consume;
}
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
process_frame_data(handler, &dqrr->fd);
if (++loop_counter < HP_LOOPS) {
if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
panic("qman_enqueue() failed");
} else {
pr_info("Received final (%dth) frame\n", loop_counter);
wake_up(&queue);
}
return qman_cb_dqrr_consume;
}
static void create_per_cpu_handlers(void)
{
struct hp_handler *handler;
int loop;
struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
hp_cpu->processor_id = smp_processor_id();
spin_lock(&hp_lock);
list_add_tail(&hp_cpu->node, &hp_cpu_list);
hp_cpu_list_length++;
spin_unlock(&hp_lock);
INIT_LIST_HEAD(&hp_cpu->handlers);
for (loop = 0; loop < HP_PER_CPU; loop++) {
handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
if (!handler)
panic("kmem_cache_alloc() failed");
handler->processor_id = hp_cpu->processor_id;
handler->addr = frame_dma;
handler->frame_ptr = frame_ptr;
list_add_tail(&handler->node, &hp_cpu->handlers);
}
put_cpu_var(hp_cpus);
}
static void destroy_per_cpu_handlers(void)
{
struct list_head *loop, *tmp;
struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
spin_lock(&hp_lock);
list_del(&hp_cpu->node);
spin_unlock(&hp_lock);
list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
u32 flags;
struct hp_handler *handler = list_entry(loop, struct hp_handler,
node);
if (qman_retire_fq(&handler->rx, &flags))
panic("qman_retire_fq(rx) failed");
BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
if (qman_oos_fq(&handler->rx))
panic("qman_oos_fq(rx) failed");
qman_destroy_fq(&handler->rx, 0);
qman_destroy_fq(&handler->tx, 0);
qman_release_fqid(handler->fqid_rx);
list_del(&handler->node);
kmem_cache_free(hp_handler_slab, handler);
}
put_cpu_var(hp_cpus);
}
static inline u8 num_cachelines(u32 offset)
{
u8 res = (offset + (L1_CACHE_BYTES - 1))
/ (L1_CACHE_BYTES);
if (res > 3)
return 3;
return res;
}
#define STASH_DATA_CL \
num_cachelines(HP_NUM_WORDS * 4)
#define STASH_CTX_CL \
num_cachelines(offsetof(struct hp_handler, fqid_rx))
static void init_handler(void *__handler)
{
struct qm_mcc_initfq opts;
struct hp_handler *handler = __handler;
BUG_ON(handler->processor_id != smp_processor_id());
/* Set up rx */
memset(&handler->rx, 0, sizeof(handler->rx));
if (handler == special_handler)
handler->rx.cb.dqrr = special_dqrr;
else
handler->rx.cb.dqrr = normal_dqrr;
if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
panic("qman_create_fq(rx) failed");
memset(&opts, 0, sizeof(opts));
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts))
panic("qman_init_fq(rx) failed");
/* Set up tx */
memset(&handler->tx, 0, sizeof(handler->tx));
if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
&handler->tx))
panic("qman_create_fq(tx) failed");
}
static void init_phase2(void)
{
int loop;
u32 fqid = 0;
u32 lfsr = 0xdeadbeef;
struct hp_cpu *hp_cpu;
struct hp_handler *handler;
for (loop = 0; loop < HP_PER_CPU; loop++) {
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
int ret;
if (!loop)
hp_cpu->iterator = list_first_entry(
&hp_cpu->handlers,
struct hp_handler, node);
else
hp_cpu->iterator = list_entry(
hp_cpu->iterator->node.next,
struct hp_handler, node);
/* Rx FQID is the previous handler's Tx FQID */
hp_cpu->iterator->fqid_rx = fqid;
/* Allocate new FQID for Tx */
ret = qman_alloc_fqid(&fqid);
if (ret)
panic("qman_alloc_fqid() failed");
hp_cpu->iterator->fqid_tx = fqid;
/* Rx mixer is the previous handler's Tx mixer */
hp_cpu->iterator->rx_mixer = lfsr;
/* Get new mixer for Tx */
lfsr = do_lfsr(lfsr);
hp_cpu->iterator->tx_mixer = lfsr;
}
}
/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
handler->fqid_rx = fqid;
handler->rx_mixer = lfsr;
/* and tag it as our "special" handler */
special_handler = handler;
}
static void init_phase3(void)
{
int loop;
struct hp_cpu *hp_cpu;
for (loop = 0; loop < HP_PER_CPU; loop++) {
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
if (!loop)
hp_cpu->iterator = list_first_entry(
&hp_cpu->handlers,
struct hp_handler, node);
else
hp_cpu->iterator = list_entry(
hp_cpu->iterator->node.next,
struct hp_handler, node);
preempt_disable();
if (hp_cpu->processor_id == smp_processor_id())
init_handler(hp_cpu->iterator);
else
smp_call_function_single(hp_cpu->processor_id,
init_handler, hp_cpu->iterator, 1);
preempt_enable();
}
}
}
static void send_first_frame(void *ignore)
{
u32 *p = special_handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
int loop;
struct qm_fd fd;
BUG_ON(special_handler->processor_id != smp_processor_id());
memset(&fd, 0, sizeof(fd));
qm_fd_addr_set64(&fd, special_handler->addr);
fd.format = qm_fd_contig_big;
fd.length29 = HP_NUM_WORDS * 4;
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
if (*p != lfsr)
panic("corrupt frame data");
*p ^= special_handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
pr_info("Sending first frame\n");
if (qman_enqueue(&special_handler->tx, &fd, 0))
panic("qman_enqueue() failed");
}
void qman_test_hotpotato(void)
{
if (cpumask_weight(cpu_online_mask) < 2) {
pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
return;
}
pr_info("qman_test_hotpotato starting\n");
hp_cpu_list_length = 0;
loop_counter = 0;
hp_handler_slab = kmem_cache_create("hp_handler_slab",
sizeof(struct hp_handler), L1_CACHE_BYTES,
SLAB_HWCACHE_ALIGN, NULL);
if (!hp_handler_slab)
panic("kmem_cache_create() failed");
allocate_frame_data();
/* Init phase 1 */
pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
if (on_all_cpus(create_per_cpu_handlers))
panic("on_each_cpu() failed");
pr_info("Number of cpus: %d, total of %d handlers\n",
hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
init_phase2();
init_phase3();
preempt_disable();
if (special_handler->processor_id == smp_processor_id())
send_first_frame(NULL);
else
smp_call_function_single(special_handler->processor_id,
send_first_frame, NULL, 1);
preempt_enable();
wait_event(queue, loop_counter == HP_LOOPS);
deallocate_frame_data();
if (on_all_cpus(destroy_per_cpu_handlers))
panic("on_each_cpu() failed");
kmem_cache_destroy(hp_handler_slab);
pr_info("qman_test_hotpotato finished\n");
}

View File

@ -0,0 +1,129 @@
/* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman_private.h"
/* ----------------- */
/* --- FQID Pool --- */
struct qman_fqid_pool {
/* Base and size of the FQID range */
u32 fqid_base;
u32 total;
/* Number of FQIDs currently "allocated" */
u32 used;
/* Allocation optimisation. When 'used<total', it is the index of an
* available FQID. Otherwise there are no available FQIDs, and this
* will be set when the next deallocation occurs. */
u32 next;
/* A bit-field representation of the FQID range. */
unsigned long *bits;
};
#define QLONG_BYTES sizeof(unsigned long)
#define QLONG_BITS (QLONG_BYTES * 8)
/* Number of 'longs' required for the given number of bits */
#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
/* And in bits */
#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
{
struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
unsigned int i;
BUG_ON(!num);
if (!pool)
return NULL;
pool->fqid_base = fqid_start;
pool->total = num;
pool->used = 0;
pool->next = 0;
pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
if (!pool->bits) {
kfree(pool);
return NULL;
}
/* If num is not an even multiple of QLONG_BITS (or even 8, for
* byte-oriented searching) then we fill the trailing bits with 1, to
* make them look allocated (permanently). */
for (i = num + 1; i < QNUM_BITS(num); i++)
set_bit(i, pool->bits);
return pool;
}
EXPORT_SYMBOL(qman_fqid_pool_create);
int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
{
int ret = pool->used;
kfree(pool->bits);
kfree(pool);
return ret;
}
EXPORT_SYMBOL(qman_fqid_pool_destroy);
int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
{
int ret;
if (pool->used == pool->total)
return -ENOMEM;
*fqid = pool->fqid_base + pool->next;
ret = test_and_set_bit(pool->next, pool->bits);
BUG_ON(ret);
if (++pool->used == pool->total)
return 0;
pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
if (pool->next >= pool->total)
pool->next = find_first_zero_bit(pool->bits, pool->total);
BUG_ON(pool->next >= pool->total);
return 0;
}
EXPORT_SYMBOL(qman_fqid_pool_alloc);
void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
{
int ret;
fqid -= pool->fqid_base;
ret = test_and_clear_bit(fqid, pool->bits);
BUG_ON(!ret);
if (pool->used-- == pool->total)
pool->next = fqid;
}
EXPORT_SYMBOL(qman_fqid_pool_free);
u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
{
return pool->used;
}
EXPORT_SYMBOL(qman_fqid_pool_used);

532
include/linux/fsl_bman.h Normal file
View File

@ -0,0 +1,532 @@
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FSL_BMAN_H
#define FSL_BMAN_H
#ifdef __cplusplus
extern "C" {
#endif
/* Last updated for v00.79 of the BG */
/* Portal processing (interrupt) sources */
#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
/* This wrapper represents a bit-array for the depletion state of the 64 Bman
* buffer pools. */
struct bman_depletion {
u32 __state[2];
};
#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
#define __bmdep_word(x) ((x) >> 5)
#define __bmdep_shift(x) ((x) & 0x1f)
#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
static inline void bman_depletion_init(struct bman_depletion *c)
{
c->__state[0] = c->__state[1] = 0;
}
static inline void bman_depletion_fill(struct bman_depletion *c)
{
c->__state[0] = c->__state[1] = ~0;
}
static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
{
return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
}
static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
{
c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
}
static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
{
c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
}
/* ------------------------------------------------------- */
/* --- Bman data structures (and associated constants) --- */
/* Represents s/w corenet portal mapped data structures */
struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
struct bm_mc_command; /* MC (Management Command) command */
struct bm_mc_result; /* MC result */
/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
* pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
* BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
struct bm_buffer {
union {
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 __reserved1;
u8 bpid;
u16 hi; /* High 16-bits of 48-bit address */
u32 lo; /* Low 32-bits of 48-bit address */
#else
u32 lo;
u16 hi;
u8 bpid;
u8 __reserved;
#endif
};
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u64 __notaddress:16;
u64 addr:48;
#else
u64 addr:48;
u64 __notaddress:16;
#endif
};
u64 opaque;
};
} __aligned(8);
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
{
return buf->addr;
}
static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
{
return (dma_addr_t)buf->addr;
}
/* Macro, so we compile better if 'v' isn't always 64-bit */
#define bm_buffer_set64(buf, v) \
do { \
struct bm_buffer *__buf931 = (buf); \
__buf931->hi = upper_32_bits(v); \
__buf931->lo = lower_32_bits(v); \
} while (0)
/* See 1.5.3.5.4: "Release Command" */
struct bm_rcr_entry {
union {
struct {
u8 __dont_write_directly__verb;
u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
u8 __reserved1[62];
};
struct bm_buffer bufs[8];
};
} __packed;
#define BM_RCR_VERB_VBIT 0x80
#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
/* See 1.5.3.1: "Acquire Command" */
/* See 1.5.3.2: "Query Command" */
struct bm_mcc_acquire {
u8 bpid;
u8 __reserved1[62];
} __packed;
struct bm_mcc_query {
u8 __reserved2[63];
} __packed;
struct bm_mc_command {
u8 __dont_write_directly__verb;
union {
struct bm_mcc_acquire acquire;
struct bm_mcc_query query;
};
} __packed;
#define BM_MCC_VERB_VBIT 0x80
#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
#define BM_MCC_VERB_CMD_ACQUIRE 0x10
#define BM_MCC_VERB_CMD_QUERY 0x40
#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
/* See 1.5.3.3: "Acquire Response" */
/* See 1.5.3.4: "Query Response" */
struct bm_pool_state {
u8 __reserved1[32];
/* "availability state" and "depletion state" */
struct {
u8 __reserved1[8];
/* Access using bman_depletion_***() */
struct bman_depletion state;
} as, ds;
};
struct bm_mc_result {
union {
struct {
u8 verb;
u8 __reserved1[63];
};
union {
struct {
u8 __reserved1;
u8 bpid;
u8 __reserved2[62];
};
struct bm_buffer bufs[8];
} acquire;
struct bm_pool_state query;
};
} __packed;
#define BM_MCR_VERB_VBIT 0x80
#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
#define BM_MCR_VERB_CMD_ERR_ECC 0x70
#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
/* Determine the "availability state" of pool 'p' from a query result 'r' */
#define BM_MCR_QUERY_AVAILABILITY(r, p) \
bman_depletion_get(&r->query.as.state, p)
/* Determine the "depletion state" of pool 'p' from a query result 'r' */
#define BM_MCR_QUERY_DEPLETION(r, p) \
bman_depletion_get(&r->query.ds.state, p)
/*******************************************************************/
/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
/*******************************************************************/
/* Portal and Buffer Pools */
/* ----------------------- */
/* Represents a managed portal */
struct bman_portal;
/* This object type represents Bman buffer pools. */
struct bman_pool;
struct bman_portal_config {
/* This is used for any "core-affine" portals, ie. default portals
* associated to the corresponding cpu. -1 implies that there is no core
* affinity configured. */
int cpu;
/* portal interrupt line */
int irq;
/* the unique index of this portal */
u32 index;
/* Is this portal shared? (If so, it has coarser locking and demuxes
* processing on behalf of other CPUs.) */
int is_shared;
/* These are the buffer pool IDs that may be used via this portal. */
struct bman_depletion mask;
};
/* This callback type is used when handling pool depletion entry/exit. The
* 'cb_ctx' value is the opaque value associated with the pool object in
* bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
* depletion-exit. */
typedef void (*bman_cb_depletion)(struct bman_portal *bm,
struct bman_pool *pool, void *cb_ctx, int depleted);
/* This struct specifies parameters for a bman_pool object. */
struct bman_pool_params {
/* index of the buffer pool to encapsulate (0-63), ignored if
* BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
u32 bpid;
/* bit-mask of BMAN_POOL_FLAG_*** options */
u32 flags;
/* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
bman_cb_depletion cb;
/* opaque user value passed as a parameter to 'cb' */
void *cb_ctx;
/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
* this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
* when run in the control plane (which controls Bman CCSR). This array
* matches the definition of bm_pool_set(). */
u32 thresholds[4];
};
/* Flags to bman_new_pool() */
#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
/* Flags to bman_release() */
#ifdef CONFIG_FSL_DPA_CAN_WAIT
#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
#endif
#endif
#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
/* Flags to bman_acquire() */
#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
/* Portal Management */
/* ----------------- */
/**
* bman_get_portal_config - get portal configuration settings
*
* This returns a read-only view of the current cpu's affine portal settings.
*/
const struct bman_portal_config *bman_get_portal_config(void);
/**
* bman_irqsource_get - return the portal work that is interrupt-driven
*
* Returns a bitmask of BM_PIRQ_**I processing sources that are currently
* enabled for interrupt handling on the current cpu's affine portal. These
* sources will trigger the portal interrupt and the interrupt handler (or a
* tasklet/bottom-half it defers to) will perform the corresponding processing
* work. The bman_poll_***() functions will only process sources that are not in
* this bitmask. If the current CPU is sharing a portal hosted on another CPU,
* this always returns zero.
*/
u32 bman_irqsource_get(void);
/**
* bman_irqsource_add - add processing sources to be interrupt-driven
* @bits: bitmask of BM_PIRQ_**I processing sources
*
* Adds processing sources that should be interrupt-driven (rather than
* processed via bman_poll_***() functions). Returns zero for success, or
* -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
int bman_irqsource_add(u32 bits);
/**
* bman_irqsource_remove - remove processing sources from being interrupt-driven
* @bits: bitmask of BM_PIRQ_**I processing sources
*
* Removes processing sources from being interrupt-driven, so that they will
* instead be processed via bman_poll_***() functions. Returns zero for success,
* or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
int bman_irqsource_remove(u32 bits);
/**
* bman_affine_cpus - return a mask of cpus that have affine portals
*/
const cpumask_t *bman_affine_cpus(void);
/**
* bman_poll_slow - process anything that isn't interrupt-driven.
*
* This function does any portal processing that isn't interrupt-driven. If the
* current CPU is sharing a portal hosted on another CPU, this function will
* return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
* indicating what interrupt sources were actually processed by the call.
*
* NB, unlike the legacy wrapper bman_poll(), this function will
* deterministically check for the presence of portal processing work and do it,
* which implies some latency even if there's nothing to do. The bman_poll()
* wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
* checking for (and doing) portal processing infrequently. Ie. such that
* qman_poll() and bman_poll() can be called from core-processing loops. Use
* bman_poll_slow() when you yourself are deciding when to incur the overhead of
* processing.
*/
u32 bman_poll_slow(void);
/**
* bman_poll - process anything that isn't interrupt-driven.
*
* Dispatcher logic on a cpu can use this to trigger any maintenance of the
* affine portal. This function does whatever processing is not triggered by
* interrupts. This is a legacy wrapper that can be used in core-processing
* loops but mitigates the performance overhead of portal processing by
* adaptively bypassing true portal processing most of the time. (Processing is
* done once every 10 calls if the previous processing revealed that work needed
* to be done, or once very 1000 calls if the previous processing revealed no
* work needed doing.) If you wish to control this yourself, call
* bman_poll_slow() instead, which always checks for portal processing work.
*/
void bman_poll(void);
/**
* bman_rcr_is_empty - Determine if portal's RCR is empty
*
* For use in situations where a cpu-affine caller needs to determine when all
* releases for the local portal have been processed by Bman but can't use the
* BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
* The function forces tracking of RCR consumption (which normally doesn't
* happen until release processing needs to find space to put new release
* commands), and returns zero if the ring still has unprocessed entries,
* non-zero if it is empty.
*/
int bman_rcr_is_empty(void);
/**
* bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
* @result: is set by the API to the base BPID of the allocated range
* @count: the number of BPIDs required
* @align: required alignment of the allocated range
* @partial: non-zero if the API can return fewer than @count BPIDs
*
* Returns the number of buffer pools allocated, or a negative error code. If
* @partial is non zero, the allocation request may return a smaller range of
* BPs than requested (though alignment will be as requested). If @partial is
* zero, the return value will either be 'count' or negative.
*/
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
static inline int bman_alloc_bpid(u32 *result)
{
int ret = bman_alloc_bpid_range(result, 1, 0, 0);
return (ret > 0) ? 0 : ret;
}
/**
* bman_release_bpid_range - Release the specified range of buffer pool IDs
* @bpid: the base BPID of the range to deallocate
* @count: the number of BPIDs in the range
*
* This function can also be used to seed the allocator with ranges of BPIDs
* that it can subsequently allocate from.
*/
void bman_release_bpid_range(u32 bpid, unsigned int count);
static inline void bman_release_bpid(u32 bpid)
{
bman_release_bpid_range(bpid, 1);
}
int bman_reserve_bpid_range(u32 bpid, unsigned int count);
static inline int bman_reserve_bpid(u32 bpid)
{
return bman_reserve_bpid_range(bpid, 1);
}
void bman_seed_bpid_range(u32 bpid, unsigned int count);
int bman_shutdown_pool(u32 bpid);
/* Pool management */
/* --------------- */
/**
* bman_new_pool - Allocates a Buffer Pool object
* @params: parameters specifying the buffer pool ID and behaviour
*
* Creates a pool object for the given @params. A portal and the depletion
* callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
* is set. NB, the fields from @params are copied into the new pool object, so
* the structure provided by the caller can be released or reused after the
* function returns.
*/
struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
/**
* bman_free_pool - Deallocates a Buffer Pool object
* @pool: the pool object to release
*
*/
void bman_free_pool(struct bman_pool *pool);
/**
* bman_get_params - Returns a pool object's parameters.
* @pool: the pool object
*
* The returned pointer refers to state within the pool object so must not be
* modified and can no longer be read once the pool object is destroyed.
*/
const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
/**
* bman_release - Release buffer(s) to the buffer pool
* @pool: the buffer pool object to release to
* @bufs: an array of buffers to release
* @num: the number of buffers in @bufs (1-8)
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
*
* Adds the given buffers to RCR entries. If the portal @p was created with the
* "COMPACT" flag, then it will be using a compaction algorithm to improve
* utilisation of RCR. As such, these buffers may join an existing ring entry
* and/or it may not be issued right away so as to allow future releases to join
* the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
* behaviour by committing the RCR entry (or entries) right away. If the RCR
* ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
* is selected, in which case it will sleep waiting for space to become
* available in RCR. If the function receives a signal before such time (and
* BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
* it returns zero.
*/
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
u32 flags);
/**
* bman_acquire - Acquire buffer(s) from a buffer pool
* @pool: the buffer pool object to acquire from
* @bufs: array for storing the acquired buffers
* @num: the number of buffers desired (@bufs is at least this big)
*
* Issues an "Acquire" command via the portal's management command interface.
* The return value will be the number of buffers obtained from the pool, or a
* negative error code if a h/w error or pool starvation was encountered. In
* the latter case, the content of @bufs is undefined.
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
u32 flags);
/**
* bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
* @pool: the buffer pool object the stockpile belongs
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
*
* Adds stockpile buffers to RCR entries until the stockpile is empty.
* The return value will be a negative error code if a h/w error occurred.
* If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
* -EAGAIN will be returned.
*/
int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
/**
* bman_query_pools - Query all buffer pool states
* @state: storage for the queried availability and depletion states
*/
int bman_query_pools(struct bm_pool_state *state);
#ifdef CONFIG_FSL_BMAN_CONFIG
/**
* bman_query_free_buffers - Query how many free buffers are in buffer pool
* @pool: the buffer pool object to query
*
* Return the number of the free buffers
*/
u32 bman_query_free_buffers(struct bman_pool *pool);
/**
* bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
* @pool: the buffer pool object to which the thresholds will be set
* @thresholds: the new thresholds
*/
int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
#endif
/**
* The below bman_p_***() variant might be called in a situation that the cpu
* which the portal affine to is not online yet.
* @bman_portal specifies which portal the API will use.
*/
int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
#ifdef __cplusplus
}
#endif
#endif /* FSL_BMAN_H */

3888
include/linux/fsl_qman.h Normal file

File diff suppressed because it is too large Load Diff

372
include/linux/fsl_usdpaa.h Normal file
View File

@ -0,0 +1,372 @@
/* Copyright 2011-2012 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef FSL_USDPAA_H
#define FSL_USDPAA_H
#ifdef __cplusplus
extern "C" {
#endif
#include <linux/uaccess.h>
#include <linux/ioctl.h>
#include <linux/fsl_qman.h> /* For "enum qm_channel" */
#include <linux/compat.h>
#ifdef CONFIG_FSL_USDPAA
/******************************/
/* Allocation of resource IDs */
/******************************/
/* This enum is used to distinguish between the type of underlying object being
* manipulated. */
enum usdpaa_id_type {
usdpaa_id_fqid,
usdpaa_id_bpid,
usdpaa_id_qpool,
usdpaa_id_cgrid,
usdpaa_id_ceetm0_lfqid,
usdpaa_id_ceetm0_channelid,
usdpaa_id_ceetm1_lfqid,
usdpaa_id_ceetm1_channelid,
usdpaa_id_max /* <-- not a valid type, represents the number of types */
};
#define USDPAA_IOCTL_MAGIC 'u'
struct usdpaa_ioctl_id_alloc {
uint32_t base; /* Return value, the start of the allocated range */
enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
uint32_t num; /* how many IDs to allocate (and return value) */
uint32_t align; /* must be a power of 2, 0 is treated like 1 */
int partial; /* whether to allow less than 'num' */
};
struct usdpaa_ioctl_id_release {
/* Input; */
enum usdpaa_id_type id_type;
uint32_t base;
uint32_t num;
};
struct usdpaa_ioctl_id_reserve {
enum usdpaa_id_type id_type;
uint32_t base;
uint32_t num;
};
/* ioctl() commands */
#define USDPAA_IOCTL_ID_ALLOC \
_IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
#define USDPAA_IOCTL_ID_RELEASE \
_IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
#define USDPAA_IOCTL_ID_RESERVE \
_IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
/**********************/
/* Mapping DMA memory */
/**********************/
/* Maximum length for a map name, including NULL-terminator */
#define USDPAA_DMA_NAME_MAX 16
/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
* For a sharable and named map, specify _SHARED (whether creating one or
* binding to an existing one). If _SHARED is specified and _CREATE is not, then
* the mapping must already exist. If _SHARED and _CREATE are specified and the
* mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
* specified and the mapping already exists, the mapping will fail unless _LAZY
* is specified. When mapping to a pre-existing sharable map, the length must be
* an exact match. Lengths must be a power-of-4 multiple of page size.
*
* Note that this does not actually map the memory to user-space, that is done
* by a subsequent mmap() using the page offset returned from this ioctl(). The
* ioctl() is what gives the process permission to do this, and a page-offset
* with which to do so.
*/
#define USDPAA_DMA_FLAG_SHARE 0x01
#define USDPAA_DMA_FLAG_CREATE 0x02
#define USDPAA_DMA_FLAG_LAZY 0x04
#define USDPAA_DMA_FLAG_RDONLY 0x08
struct usdpaa_ioctl_dma_map {
/* Output parameters - virtual and physical addresses */
void *ptr;
uint64_t phys_addr;
/* Input parameter, the length of the region to be created (or if
* mapping an existing region, this must match it). Must be a power-of-4
* multiple of page size. */
uint64_t len;
/* Input parameter, the USDPAA_DMA_FLAG_* settings. */
uint32_t flags;
/* If _FLAG_SHARE is specified, the name of the region to be created (or
* of the existing mapping to use). */
char name[USDPAA_DMA_NAME_MAX];
/* If this ioctl() creates the mapping, this is an input parameter
* stating whether the region supports locking. If mapping an existing
* region, this is a return value indicating the same thing. */
int has_locking;
/* In the case of a successful map with _CREATE and _LAZY, this return
* value indicates whether we created the mapped region or whether it
* already existed. */
int did_create;
};
#ifdef CONFIG_COMPAT
struct usdpaa_ioctl_dma_map_compat {
/* Output parameters - virtual and physical addresses */
compat_uptr_t ptr;
uint64_t phys_addr;
/* Input parameter, the length of the region to be created (or if
* mapping an existing region, this must match it). Must be a power-of-4
* multiple of page size. */
uint64_t len;
/* Input parameter, the USDPAA_DMA_FLAG_* settings. */
uint32_t flags;
/* If _FLAG_SHARE is specified, the name of the region to be created (or
* of the existing mapping to use). */
char name[USDPAA_DMA_NAME_MAX];
/* If this ioctl() creates the mapping, this is an input parameter
* stating whether the region supports locking. If mapping an existing
* region, this is a return value indicating the same thing. */
int has_locking;
/* In the case of a successful map with _CREATE and _LAZY, this return
* value indicates whether we created the mapped region or whether it
* already existed. */
int did_create;
};
#define USDPAA_IOCTL_DMA_MAP_COMPAT \
_IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
#endif
#define USDPAA_IOCTL_DMA_MAP \
_IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
/* munmap() does not remove the DMA map, just the user-space mapping to it.
* This ioctl will do both (though you can munmap() before calling the ioctl
* too). */
#define USDPAA_IOCTL_DMA_UNMAP \
_IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
* with a mmap()'d address, and the process will (interruptible) sleep if the
* lock is already held by another process. Process destruction will
* automatically clean up any held locks. */
#define USDPAA_IOCTL_DMA_LOCK \
_IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
#define USDPAA_IOCTL_DMA_UNLOCK \
_IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
/***************************************/
/* Mapping and using QMan/BMan portals */
/***************************************/
enum usdpaa_portal_type {
usdpaa_portal_qman,
usdpaa_portal_bman,
};
#define QBMAN_ANY_PORTAL_IDX 0xffffffff
struct usdpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum usdpaa_portal_type type;
/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
for don't care. The portal index will be populated by the
driver when the ioctl() successfully completes */
uint32_t index;
/* Return value if the map succeeds, this gives the mapped
* cache-inhibited (cinh) and cache-enabled (cena) addresses. */
struct usdpaa_portal_map {
void *cinh;
void *cena;
} addr;
/* Qman-specific return values */
uint16_t channel;
uint32_t pools;
};
#ifdef CONFIG_COMPAT
struct compat_usdpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum usdpaa_portal_type type;
/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
for don't care. The portal index will be populated by the
driver when the ioctl() successfully completes */
uint32_t index;
/* Return value if the map succeeds, this gives the mapped
* cache-inhibited (cinh) and cache-enabled (cena) addresses. */
struct usdpaa_portal_map_compat {
compat_uptr_t cinh;
compat_uptr_t cena;
} addr;
/* Qman-specific return values */
uint16_t channel;
uint32_t pools;
};
#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
_IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
_IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
#endif
#define USDPAA_IOCTL_PORTAL_MAP \
_IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
#define USDPAA_IOCTL_PORTAL_UNMAP \
_IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
struct usdpaa_ioctl_irq_map {
enum usdpaa_portal_type type; /* Type of portal to map */
int fd; /* File descriptor that contains the portal */
void *portal_cinh; /* Cache inhibited area to identify the portal */
};
#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
_IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
#ifdef CONFIG_COMPAT
struct compat_ioctl_irq_map {
enum usdpaa_portal_type type; /* Type of portal to map */
compat_int_t fd; /* File descriptor that contains the portal */
compat_uptr_t portal_cinh; /* Used identify the portal */};
#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
_IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
#endif
/* ioctl to query the amount of DMA memory used in the system */
struct usdpaa_ioctl_dma_used {
uint64_t free_bytes;
uint64_t total_bytes;
};
#define USDPAA_IOCTL_DMA_USED \
_IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
/* ioctl to allocate a raw portal */
struct usdpaa_ioctl_raw_portal {
/* inputs */
enum usdpaa_portal_type type; /* Type of portal to allocate */
/* set to non zero to turn on stashing */
uint8_t enable_stash;
/* Stashing attributes for the portal */
uint32_t cpu;
uint32_t cache;
uint32_t window;
/* Specifies the stash request queue this portal should use */
uint8_t sdest;
/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
* for don't care. The portal index will be populated by the
* driver when the ioctl() successfully completes */
uint32_t index;
/* outputs */
uint64_t cinh;
uint64_t cena;
};
#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
_IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
#define USDPAA_IOCTL_FREE_RAW_PORTAL \
_IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
#ifdef CONFIG_COMPAT
struct compat_ioctl_raw_portal {
/* inputs */
enum usdpaa_portal_type type; /* Type of portal to allocate */
/* set to non zero to turn on stashing */
uint8_t enable_stash;
/* Stashing attributes for the portal */
uint32_t cpu;
uint32_t cache;
uint32_t window;
/* Specifies the stash request queue this portal should use */
uint8_t sdest;
/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
* for don't care. The portal index will be populated by the
* driver when the ioctl() successfully completes */
uint32_t index;
/* outputs */
uint64_t cinh;
uint64_t cena;
};
#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
_IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
_IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
#endif
#ifdef __KERNEL__
/* Early-boot hook */
int __init fsl_usdpaa_init_early(void);
/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
* faults within its ranges via this hook. */
int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
#endif /* __KERNEL__ */
#endif /* CONFIG_FSL_USDPAA */
#ifdef __KERNEL__
/* This interface is needed in a few places and though it's not specific to
* USDPAA as such, creating a new header for it doesn't make any sense. The
* qbman kernel driver implements this interface and uses it as the backend for
* both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
* interface for tracking per-process allocations handed out to user-space. */
struct dpa_alloc {
struct list_head free;
spinlock_t lock;
struct list_head used;
};
#define DECLARE_DPA_ALLOC(name) \
struct dpa_alloc name = { \
.free = { \
.prev = &name.free, \
.next = &name.free \
}, \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.used = { \
.prev = &name.used, \
.next = &name.used \
} \
}
static inline void dpa_alloc_init(struct dpa_alloc *alloc)
{
INIT_LIST_HEAD(&alloc->free);
INIT_LIST_HEAD(&alloc->used);
spin_lock_init(&alloc->lock);
}
int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
int partial);
void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
* desired range is not available, or 0 for success. */
int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
* 'alloc' is empty. */
int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
/* Returns 1 if the specified id is alloced, 0 otherwise */
int dpa_alloc_check(struct dpa_alloc *list, u32 id);
#endif /* __KERNEL__ */
#ifdef __cplusplus
}
#endif
#endif /* FSL_USDPAA_H */