thunderbolt: Add support for DMA configuration based mailbox

The DMA (NHI) port of a switch provides access to the NVM of the host
controller (and devices starting from Intel Alpine Ridge). The NVM
contains also more complete DROM for the root switch including vendor
and device identification strings.

This will look for the DMA port capability for each switch and if found
populates sw->dma_port. We then teach tb_drom_read() to read the DROM
information from NVM if available for the root switch.

The DMA port capability also supports upgrading the NVM for both host
controller and devices which will be added in subsequent patches.

This code is based on the work done by Amir Levy and Michael Jamet.

Signed-off-by: Michael Jamet <michael.jamet@intel.com>
Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Mika Westerberg 2017-06-06 15:25:14 +03:00 committed by Greg Kroah-Hartman
parent 2c3c4197c9
commit 3e13676862
6 changed files with 644 additions and 2 deletions

View File

@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
thunderbolt-objs += domain.o
thunderbolt-objs += domain.o dma_port.o

View File

@ -0,0 +1,524 @@
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "dma_port.h"
#include "tb_regs.h"
#define DMA_PORT_CAP 0x3e
#define MAIL_DATA 1
#define MAIL_DATA_DWORDS 16
#define MAIL_IN 17
#define MAIL_IN_CMD_SHIFT 28
#define MAIL_IN_CMD_MASK GENMASK(31, 28)
#define MAIL_IN_CMD_FLASH_WRITE 0x0
#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
#define MAIL_IN_CMD_FLASH_READ 0x2
#define MAIL_IN_CMD_POWER_CYCLE 0x4
#define MAIL_IN_DWORDS_SHIFT 24
#define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
#define MAIL_IN_ADDRESS_SHIFT 2
#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
#define MAIL_IN_CSS BIT(1)
#define MAIL_IN_OP_REQUEST BIT(0)
#define MAIL_OUT 18
#define MAIL_OUT_STATUS_RESPONSE BIT(29)
#define MAIL_OUT_STATUS_CMD_SHIFT 4
#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
#define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
#define MAIL_OUT_STATUS_COMPLETED 0
#define MAIL_OUT_STATUS_ERR_AUTH 1
#define MAIL_OUT_STATUS_ERR_ACCESS 2
#define DMA_PORT_TIMEOUT 5000 /* ms */
#define DMA_PORT_RETRIES 3
/**
* struct tb_dma_port - DMA control port
* @sw: Switch the DMA port belongs to
* @port: Switch port number where DMA capability is found
* @base: Start offset of the mailbox registers
* @buf: Temporary buffer to store a single block
*/
struct tb_dma_port {
struct tb_switch *sw;
u8 port;
u32 base;
u8 *buf;
};
/*
* When the switch is in safe mode it supports very little functionality
* so we don't validate that much here.
*/
static bool dma_port_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
if (pkg->frame.eof == TB_CFG_PKG_ERROR)
return true;
if (pkg->frame.eof != req->response_type)
return false;
if (route != tb_cfg_get_route(req->request))
return false;
if (pkg->frame.size != req->response_size)
return false;
return true;
}
static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
{
memcpy(req->response, pkg->buffer, req->response_size);
return true;
}
static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
u32 port, u32 offset, u32 length, int timeout_msec)
{
struct cfg_read_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.seq = 1,
.port = port,
.space = TB_CFG_PORT,
.offset = offset,
.length = length,
},
};
struct tb_cfg_request *req;
struct cfg_write_pkg reply;
struct tb_cfg_result res;
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = dma_port_match;
req->copy = dma_port_copy;
req->request = &request;
req->request_size = sizeof(request);
req->request_type = TB_CFG_PKG_READ;
req->response = &reply;
req->response_size = 12 + 4 * length;
req->response_type = TB_CFG_PKG_READ;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
if (res.err)
return res.err;
memcpy(buffer, &reply.data, 4 * length);
return 0;
}
static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
u32 port, u32 offset, u32 length, int timeout_msec)
{
struct cfg_write_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.seq = 1,
.port = port,
.space = TB_CFG_PORT,
.offset = offset,
.length = length,
},
};
struct tb_cfg_request *req;
struct cfg_read_pkg reply;
struct tb_cfg_result res;
memcpy(&request.data, buffer, length * 4);
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = dma_port_match;
req->copy = dma_port_copy;
req->request = &request;
req->request_size = 12 + 4 * length;
req->request_type = TB_CFG_PKG_WRITE;
req->response = &reply;
req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_WRITE;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
return res.err;
}
static int dma_find_port(struct tb_switch *sw)
{
int port, ret;
u32 type;
/*
* The DMA (NHI) port is either 3 or 5 depending on the
* controller. Try both starting from 5 which is more common.
*/
port = 5;
ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
DMA_PORT_TIMEOUT);
if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
return port;
port = 3;
ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
DMA_PORT_TIMEOUT);
if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
return port;
return -ENODEV;
}
/**
* dma_port_alloc() - Finds DMA control port from a switch pointed by route
* @sw: Switch from where find the DMA port
*
* Function checks if the switch NHI port supports DMA configuration
* based mailbox capability and if it does, allocates and initializes
* DMA port structure. Returns %NULL if the capabity was not found.
*
* The DMA control port is functional also when the switch is in safe
* mode.
*/
struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
{
struct tb_dma_port *dma;
int port;
port = dma_find_port(sw);
if (port < 0)
return NULL;
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
if (!dma->buf) {
kfree(dma);
return NULL;
}
dma->sw = sw;
dma->port = port;
dma->base = DMA_PORT_CAP;
return dma;
}
/**
* dma_port_free() - Release DMA control port structure
* @dma: DMA control port
*/
void dma_port_free(struct tb_dma_port *dma)
{
if (dma) {
kfree(dma->buf);
kfree(dma);
}
}
static int dma_port_wait_for_completion(struct tb_dma_port *dma,
unsigned int timeout)
{
unsigned long end = jiffies + msecs_to_jiffies(timeout);
struct tb_switch *sw = dma->sw;
do {
int ret;
u32 in;
ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
dma->base + MAIL_IN, 1, 50);
if (ret) {
if (ret != -ETIMEDOUT)
return ret;
} else if (!(in & MAIL_IN_OP_REQUEST)) {
return 0;
}
usleep_range(50, 100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
}
static int status_to_errno(u32 status)
{
switch (status & MAIL_OUT_STATUS_MASK) {
case MAIL_OUT_STATUS_COMPLETED:
return 0;
case MAIL_OUT_STATUS_ERR_AUTH:
return -EINVAL;
case MAIL_OUT_STATUS_ERR_ACCESS:
return -EACCES;
}
return -EIO;
}
static int dma_port_request(struct tb_dma_port *dma, u32 in,
unsigned int timeout)
{
struct tb_switch *sw = dma->sw;
u32 out;
int ret;
ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
ret = dma_port_wait_for_completion(dma, timeout);
if (ret)
return ret;
ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
return status_to_errno(out);
}
static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
void *buf, u32 size)
{
struct tb_switch *sw = dma->sw;
u32 in, dwaddress, dwords;
int ret;
dwaddress = address / 4;
dwords = size / 4;
in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
if (dwords < MAIL_DATA_DWORDS)
in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
in |= MAIL_IN_OP_REQUEST;
ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
if (ret)
return ret;
return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
}
static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
const void *buf, u32 size)
{
struct tb_switch *sw = dma->sw;
u32 in, dwaddress, dwords;
int ret;
dwords = size / 4;
/* Write the block to MAIL_DATA registers */
ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
/* CSS header write is always done to the same magic address */
if (address >= DMA_PORT_CSS_ADDRESS) {
dwaddress = DMA_PORT_CSS_ADDRESS;
in |= MAIL_IN_CSS;
} else {
dwaddress = address / 4;
}
in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
}
/**
* dma_port_flash_read() - Read from active flash region
* @dma: DMA control port
* @address: Address relative to the start of active region
* @buf: Buffer where the data is read
* @size: Size of the buffer
*/
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
unsigned int retries = DMA_PORT_RETRIES;
unsigned int offset;
offset = address & 3;
address = address & ~3;
do {
u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
int ret;
ret = dma_port_flash_read_block(dma, address, dma->buf,
ALIGN(nbytes, 4));
if (ret) {
if (ret == -ETIMEDOUT) {
if (retries--)
continue;
ret = -EIO;
}
return ret;
}
memcpy(buf, dma->buf + offset, nbytes);
size -= nbytes;
address += nbytes;
buf += nbytes;
} while (size > 0);
return 0;
}
/**
* dma_port_flash_write() - Write to non-active flash region
* @dma: DMA control port
* @address: Address relative to the start of non-active region
* @buf: Data to write
* @size: Size of the buffer
*
* Writes block of data to the non-active flash region of the switch. If
* the address is given as %DMA_PORT_CSS_ADDRESS the block is written
* using CSS command.
*/
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
const void *buf, size_t size)
{
unsigned int retries = DMA_PORT_RETRIES;
unsigned int offset;
if (address >= DMA_PORT_CSS_ADDRESS) {
offset = 0;
if (size > DMA_PORT_CSS_MAX_SIZE)
return -E2BIG;
} else {
offset = address & 3;
address = address & ~3;
}
do {
u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
int ret;
memcpy(dma->buf + offset, buf, nbytes);
ret = dma_port_flash_write_block(dma, address, buf, nbytes);
if (ret) {
if (ret == -ETIMEDOUT) {
if (retries--)
continue;
ret = -EIO;
}
return ret;
}
size -= nbytes;
address += nbytes;
buf += nbytes;
} while (size > 0);
return 0;
}
/**
* dma_port_flash_update_auth() - Starts flash authenticate cycle
* @dma: DMA control port
*
* Starts the flash update authentication cycle. If the image in the
* non-active area was valid, the switch starts upgrade process where
* active and non-active area get swapped in the end. Caller should call
* dma_port_flash_update_auth_status() to get status of this command.
* This is because if the switch in question is root switch the
* thunderbolt host controller gets reset as well.
*/
int dma_port_flash_update_auth(struct tb_dma_port *dma)
{
u32 in;
in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, 150);
}
/**
* dma_port_flash_update_auth_status() - Reads status of update auth command
* @dma: DMA control port
* @status: Status code of the operation
*
* The function checks if there is status available from the last update
* auth command. Returns %0 if there is no status and no further
* action is required. If there is status, %1 is returned instead and
* @status holds the failure code.
*
* Negative return means there was an error reading status from the
* switch.
*/
int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
{
struct tb_switch *sw = dma->sw;
u32 out, cmd;
int ret;
ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
/* Check if the status relates to flash update auth */
cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
if (status)
*status = out & MAIL_OUT_STATUS_MASK;
/* Reset is needed in any case */
return 1;
}
return 0;
}
/**
* dma_port_power_cycle() - Power cycles the switch
* @dma: DMA control port
*
* Triggers power cycle to the switch.
*/
int dma_port_power_cycle(struct tb_dma_port *dma)
{
u32 in;
in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, 150);
}

View File

@ -0,0 +1,34 @@
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef DMA_PORT_H_
#define DMA_PORT_H_
#include "tb.h"
struct tb_switch;
struct tb_dma_port;
#define DMA_PORT_CSS_ADDRESS 0x3fffff
#define DMA_PORT_CSS_MAX_SIZE SZ_128
struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
void dma_port_free(struct tb_dma_port *dma);
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size);
int dma_port_flash_update_auth(struct tb_dma_port *dma);
int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
const void *buf, size_t size);
int dma_port_power_cycle(struct tb_dma_port *dma);
#endif

View File

@ -429,6 +429,50 @@ err:
return -EINVAL;
}
static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
{
u32 drom_offset;
int ret;
if (!sw->dma_port)
return -ENODEV;
ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
sw->cap_plug_events + 12, 1);
if (ret)
return ret;
if (!drom_offset)
return -ENODEV;
ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
sizeof(*size));
if (ret)
return ret;
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
sw->drom = kzalloc(*size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
if (ret)
goto err_free;
/*
* Read UID from the minimal DROM because the one in NVM is just
* a placeholder.
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
err_free:
kfree(sw->drom);
sw->drom = NULL;
return ret;
}
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
@ -450,6 +494,10 @@ int tb_drom_read(struct tb_switch *sw)
if (tb_drom_copy_efi(sw, &size) == 0)
goto parse;
/* Non-Apple hardware has the DROM as part of NVM */
if (tb_drom_copy_nvm(sw, &size) == 0)
goto parse;
/*
* The root switch contains only a dummy drom (header only,
* no entries). Hardcode the configuration here.
@ -510,7 +558,8 @@ parse:
header->uid_crc8, crc);
goto err;
}
sw->uid = header->uid;
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;

View File

@ -377,6 +377,8 @@ static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
dma_port_free(sw->dma_port);
kfree(sw->uuid);
kfree(sw->device_name);
kfree(sw->vendor_name);
@ -570,6 +572,25 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
}
static void tb_switch_add_dma_port(struct tb_switch *sw)
{
switch (sw->generation) {
case 3:
break;
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return;
break;
default:
return;
}
sw->dma_port = dma_port_alloc(sw);
}
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@ -586,6 +607,15 @@ int tb_switch_add(struct tb_switch *sw)
{
int i, ret;
/*
* Initialize DMA control port now before we read DROM. Recent
* host controllers have more complete DROM on NVM that includes
* vendor and model identification strings which we then expose
* to the userspace. NVM can be accessed through DMA
* configuration based mailbox.
*/
tb_switch_add_dma_port(sw);
/* read drom */
ret = tb_drom_read(sw);
if (ret) {

View File

@ -12,12 +12,16 @@
#include "tb_regs.h"
#include "ctl.h"
#include "dma_port.h"
/**
* struct tb_switch - a thunderbolt switch
* @dev: Device for the switch
* @config: Switch configuration
* @ports: Ports in this switch
* @dma_port: If the switch has port supporting DMA configuration based
* mailbox this will hold the pointer to that (%NULL
* otherwise).
* @tb: Pointer to the domain the switch belongs to
* @uid: Unique ID of the switch
* @uuid: UUID of the switch (or %NULL if not supported)
@ -34,6 +38,7 @@ struct tb_switch {
struct device dev;
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb_dma_port *dma_port;
struct tb *tb;
u64 uid;
uuid_be *uuid;