thunderbolt: Add support for DMA tunnels

In addition to PCIe and Display Port tunnels it is also possible to
create tunnels that forward DMA traffic from the host interface adapter
(NHI) to a NULL port that is connected to another domain through a
Thunderbolt cable. These tunnels can be used to carry software messages
such as networking packets.

To support this we introduce another tunnel type (TB_TUNNEL_DMA) that
supports paths from NHI to NULL port and back.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
This commit is contained in:
Mika Westerberg 2018-09-28 16:35:32 +03:00
parent 3b4b3235ca
commit 44242d6c97
6 changed files with 149 additions and 5 deletions

View File

@ -341,7 +341,8 @@ static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
}
}
static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index)
static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
bool clear_fc)
{
struct tb_regs_hop hop;
ktime_t timeout;
@ -369,8 +370,20 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index)
if (ret)
return ret;
if (!hop.pending)
if (!hop.pending) {
if (clear_fc) {
/* Clear flow control */
hop.ingress_fc = 0;
hop.egress_fc = 0;
hop.ingress_shared_buffer = 0;
hop.egress_shared_buffer = 0;
return tb_port_write(port, &hop, TB_CFG_HOPS,
2 * hop_index, 2);
}
return 0;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
@ -384,7 +397,8 @@ static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
for (i = first_hop; i < path->path_length; i++) {
res = __tb_path_deactivate_hop(path->hops[i].in_port,
path->hops[i].in_hop_index);
path->hops[i].in_hop_index,
path->clear_fc);
if (res && res != -ENODEV)
tb_port_warn(path->hops[i].in_port,
"hop deactivation failed for hop %d, index %d\n",
@ -459,7 +473,7 @@ int tb_path_activate(struct tb_path *path)
/* If it is left active deactivate it first */
__tb_path_deactivate_hop(path->hops[i].in_port,
path->hops[i].in_hop_index);
path->hops[i].in_hop_index, path->clear_fc);
/* dword 0 */
hop.next_hop = path->hops[i].next_hop_index;

View File

@ -555,6 +555,28 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
TB_CFG_PORT, 4, 1);
}
/**
* tb_port_set_initial_credits() - Set initial port link credits allocated
* @port: Port to set the initial credits
* @credits: Number of credits to to allocate
*
* Set initial credits value to be used for ingress shared buffering.
*/
int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
if (ret)
return ret;
data &= ~TB_PORT_LCA_MASK;
data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
}
/**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
*

View File

@ -207,6 +207,8 @@ enum tb_path_port {
* @weight: Weight of the path inside the priority group
* @drop_packages: Drop packages from queue tail or head
* @activated: Is the path active
* @clear_fc: Clear all flow control from the path config space entries
* when deactivating this path
* @hops: Path hops
* @path_length: How many hops the path uses
*
@ -227,6 +229,7 @@ struct tb_path {
int weight:4;
bool drop_packages;
bool activated;
bool clear_fc;
struct tb_path_hop *hops;
int path_length;
};
@ -583,6 +586,7 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw)
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid);

View File

@ -215,6 +215,9 @@ struct tb_regs_port_header {
#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
#define TB_PORT_MAX_CREDITS_SHIFT 20
#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
/* DWORD 5 */
#define TB_PORT_LCA_SHIFT 22
#define TB_PORT_LCA_MASK GENMASK(28, 22)
/* Display Port adapter registers */

View File

@ -27,7 +27,10 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
static const char * const tb_tunnel_names[] = { "PCI", "DP" };
#define TB_DMA_PATH_OUT 0
#define TB_DMA_PATH_IN 1
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
@ -471,6 +474,94 @@ err_free:
return NULL;
}
static u32 tb_dma_credits(struct tb_port *nhi)
{
u32 max_credits;
max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
TB_PORT_MAX_CREDITS_SHIFT;
return min(max_credits, 13U);
}
static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
{
struct tb_port *nhi = tunnel->src_port;
u32 credits;
credits = active ? tb_dma_credits(nhi) : 0;
return tb_port_set_initial_credits(nhi, credits);
}
static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
unsigned int efc, u32 credits)
{
int i;
path->egress_fc_enable = efc;
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = isb;
path->priority = 5;
path->weight = 1;
path->clear_fc = true;
for (i = 0; i < path->path_length; i++)
path->hops[i].initial_credits = credits;
}
/**
* tb_tunnel_alloc_dma() - allocate a DMA tunnel
* @tb: Pointer to the domain structure
* @nhi: Host controller port
* @dst: Destination null port which the other domain is connected to
* @transmit_ring: NHI ring number used to send packets towards the
* other domain
* @transmit_path: HopID used for transmitting packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain
* @reveive_path: HopID used for receiving packets
*
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
int receive_path)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
u32 credits;
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
if (!tunnel)
return NULL;
tunnel->activate = tb_dma_activate;
tunnel->src_port = nhi;
tunnel->dst_port = dst;
credits = tb_dma_credits(nhi);
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
credits);
tunnel->paths[TB_DMA_PATH_IN] = path;
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
tunnel->paths[TB_DMA_PATH_OUT] = path;
return tunnel;
}
/**
* tb_tunnel_free() - free a tunnel
* @tunnel: Tunnel to be freed

View File

@ -14,6 +14,7 @@
enum tb_tunnel_type {
TB_TUNNEL_PCI,
TB_TUNNEL_DP,
TB_TUNNEL_DMA,
};
/**
@ -47,6 +48,10 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
int receive_path);
void tb_tunnel_free(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
@ -64,5 +69,10 @@ static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_DP;
}
static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
{
return tunnel->type == TB_TUNNEL_DMA;
}
#endif