IB/hfi1: Rework the IRQ API to be more flexible

The current IRQ API is an all or nothing interface.  This has two
problems:

  1. All IRQs are enabled regardless of use
  2. Moving from general interrupt to MSIx handling is difficult

Introduce a new API to enable/disable specific IRQs or a range of IRQs.

Do not enable and disable all IRQs in one step.

Rework various modules to enable/disable IRQs when needed.

Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Sadanand Warrier <sadanand.warrier@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Michael J. Ruhl 2018-08-15 23:04:22 -07:00 committed by Doug Ledford
parent e63bb50d19
commit a2f7bbdc2d
6 changed files with 167 additions and 109 deletions

View File

@ -1098,9 +1098,9 @@ struct err_reg_info {
const char *desc;
};
#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
/*
* Helpers for building HFI and DC error interrupt table entries. Different
@ -8176,7 +8176,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
/**
* is_rcv_urgent_int() - User receive context urgent IRQ handler
* @dd: valid dd
* @source: logical IRQ source (ofse from IS_RCVURGENT_START)
* @source: logical IRQ source (offset from IS_RCVURGENT_START)
*
* RX block receive urgent interrupt. Source is < 160.
*
@ -8226,7 +8226,7 @@ static const struct is_table is_table[] = {
is_sdma_eng_err_name, is_sdma_eng_err_int },
{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
is_sendctxt_err_name, is_sendctxt_err_int },
{ IS_SDMA_START, IS_SDMA_END,
{ IS_SDMA_START, IS_SDMA_IDLE_END,
is_sdma_eng_name, is_sdma_eng_int },
{ IS_VARIOUS_START, IS_VARIOUS_END,
is_various_name, is_various_int },
@ -8252,7 +8252,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
/* avoids a double compare by walking the table in-order */
for (entry = &is_table[0]; entry->is_name; entry++) {
if (source < entry->end) {
if (source <= entry->end) {
trace_hfi1_interrupt(dd, entry, source);
entry->is_int(dd, source - entry->start);
return;
@ -9646,30 +9646,10 @@ void qsfp_event(struct work_struct *work)
}
}
static void init_qsfp_int(struct hfi1_devdata *dd)
void init_qsfp_int(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd = dd->pport;
u64 qsfp_mask, cce_int_mask;
const int qsfp1_int_smask = QSFP1_INT % 64;
const int qsfp2_int_smask = QSFP2_INT % 64;
/*
* disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
* Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
* therefore just one of QSFP1_INT/QSFP2_INT can be used to find
* the index of the appropriate CSR in the CCEIntMask CSR array
*/
cce_int_mask = read_csr(dd, CCE_INT_MASK +
(8 * (QSFP1_INT / 64)));
if (dd->hfi1_id) {
cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
cce_int_mask);
} else {
cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
cce_int_mask);
}
u64 qsfp_mask;
qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
/* Clear current status to avoid spurious interrupts */
@ -9686,6 +9666,12 @@ static void init_qsfp_int(struct hfi1_devdata *dd)
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
qsfp_mask);
/* Enable the appropriate QSFP IRQ source */
if (!dd->hfi1_id)
set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
else
set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
}
/*
@ -11926,10 +11912,16 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
}
if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
IS_RCVAVAIL_START + rcd->ctxt, true);
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
}
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
IS_RCVAVAIL_START + rcd->ctxt, false);
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
}
if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
@ -12957,57 +12949,65 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
/**
* get_int_mask - get 64 bit int mask
* @dd - the devdata
* @i - the csr (relative to CCE_INT_MASK)
*
* Returns the mask with the urgent interrupt mask
* bit clear for kernel receive contexts.
*/
static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
{
u64 mask = U64_MAX; /* default to no change */
if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
int j = (i - (IS_RCVURGENT_START / 64)) * 64;
int k = !j ? IS_RCVURGENT_START % 64 : 0;
if (j)
j -= IS_RCVURGENT_START % 64;
/* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
/* convert to bit in mask and clear */
mask &= ~BIT_ULL(k);
}
return mask;
}
/* ========================================================================= */
/*
* Enable/disable chip from delivering interrupts.
/**
* read_mod_write() - Calculate the IRQ register index and set/clear the bits
* @dd: valid devdata
* @src: IRQ source to determine register index from
* @bits: the bits to set or clear
* @set: true == set the bits, false == clear the bits
*
*/
void set_intr_state(struct hfi1_devdata *dd, u32 enable)
static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
bool set)
{
int i;
u64 reg;
u16 idx = src / BITS_PER_REGISTER;
/*
* In HFI, the mask needs to be 1 to allow interrupts.
*/
if (enable) {
/* enable all interrupts but urgent on kernel contexts */
for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
u64 mask = get_int_mask(dd, i);
spin_lock(&dd->irq_src_lock);
reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
if (set)
reg |= bits;
else
reg &= ~bits;
write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
spin_unlock(&dd->irq_src_lock);
}
write_csr(dd, CCE_INT_MASK + (8 * i), mask);
/**
* set_intr_bits() - Enable/disable a range (one or more) IRQ sources
* @dd: valid devdata
* @first: first IRQ source to set/clear
* @last: last IRQ source (inclusive) to set/clear
* @set: true == set the bits, false == clear the bits
*
* If first == last, set the exact source.
*/
int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
{
u64 bits = 0;
u64 bit;
u16 src;
if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
return -EINVAL;
if (last < first)
return -ERANGE;
for (src = first; src <= last; src++) {
bit = src % BITS_PER_REGISTER;
/* wrapped to next register? */
if (!bit && bits) {
read_mod_write(dd, src - 1, bits, set);
bits = 0;
}
init_qsfp_int(dd);
} else {
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
bits |= BIT_ULL(bit);
}
read_mod_write(dd, src, bits, set);
return 0;
}
/*
@ -13074,12 +13074,9 @@ void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
* SDMAProgress
* SDMAIdle
*/
remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
msix_intr);
remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
msix_intr);
remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
msix_intr);
remap_intr(dd, IS_SDMA_START + engine, msix_intr);
remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
}
/*
@ -13109,7 +13106,8 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
int ret;
/* mask all interrupts */
set_intr_state(dd, 0);
set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
/* clear all pending interrupts */
clear_all_interrupts(dd);

View File

@ -52,9 +52,7 @@
*/
/* sizes */
#define CCE_NUM_MSIX_VECTORS 256
#define CCE_NUM_INT_CSRS 12
#define CCE_NUM_INT_MAP_CSRS 96
#define BITS_PER_REGISTER (BITS_PER_BYTE * sizeof(u64))
#define NUM_INTERRUPT_SOURCES 768
#define RXE_NUM_CONTEXTS 160
#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
@ -161,34 +159,49 @@
(CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
/* interrupt source numbers */
#define IS_GENERAL_ERR_START 0
#define IS_SDMAENG_ERR_START 16
#define IS_SENDCTXT_ERR_START 32
#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
/* Specific IRQ sources */
#define CCE_ERR_INT 0
#define RXE_ERR_INT 1
#define MISC_ERR_INT 2
#define PIO_ERR_INT 4
#define SDMA_ERR_INT 5
#define EGRESS_ERR_INT 6
#define TXE_ERR_INT 7
#define PBC_INT 240
#define GPIO_ASSERT_INT 241
#define QSFP1_INT 242
#define QSFP2_INT 243
#define TCRIT_INT 244
/* interrupt source ranges */
#define IS_FIRST_SOURCE CCE_ERR_INT
#define IS_GENERAL_ERR_START 0
#define IS_SDMAENG_ERR_START 16
#define IS_SENDCTXT_ERR_START 32
#define IS_SDMA_START 192
#define IS_SDMA_PROGRESS_START 208
#define IS_SDMA_IDLE_START 224
#define IS_VARIOUS_START 240
#define IS_DC_START 248
#define IS_RCVAVAIL_START 256
#define IS_RCVURGENT_START 416
#define IS_SENDCREDIT_START 576
#define IS_RESERVED_START 736
#define IS_MAX_SOURCES 768
#define IS_LAST_SOURCE 767
/* derived interrupt source values */
#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
#define IS_SENDCTXT_ERR_END IS_SDMA_START
#define IS_SDMA_END IS_VARIOUS_START
#define IS_VARIOUS_END IS_DC_START
#define IS_DC_END IS_RCVAVAIL_START
#define IS_RCVAVAIL_END IS_RCVURGENT_START
#define IS_RCVURGENT_END IS_SENDCREDIT_START
#define IS_SENDCREDIT_END IS_RESERVED_START
#define IS_RESERVED_END IS_MAX_SOURCES
/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
#define QSFP1_INT 242
#define QSFP2_INT 243
#define IS_GENERAL_ERR_END 7
#define IS_SDMAENG_ERR_END 31
#define IS_SENDCTXT_ERR_END 191
#define IS_SDMA_END 207
#define IS_SDMA_PROGRESS_END 223
#define IS_SDMA_IDLE_END 239
#define IS_VARIOUS_END 244
#define IS_DC_END 255
#define IS_RCVAVAIL_END 415
#define IS_RCVURGENT_END 575
#define IS_SENDCREDIT_END 735
#define IS_RESERVED_END IS_LAST_SOURCE
/* DCC_CFG_PORT_CONFIG logical link states */
#define LSTATE_DOWN 0x1
@ -1421,6 +1434,8 @@ irqreturn_t sdma_interrupt(int irq, void *data);
irqreturn_t receive_context_interrupt(int irq, void *data);
irqreturn_t receive_context_thread(int irq, void *data);
int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
void init_qsfp_int(struct hfi1_devdata *dd);
void clear_all_interrupts(struct hfi1_devdata *dd);
void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);

View File

@ -639,6 +639,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
set_intr_bits(dd, IS_RCVURGENT_START + uctxt->ctxt,
IS_RCVURGENT_START + uctxt->ctxt, false);
flush_wc();
/* drain user sdma queue */
hfi1_user_sdma_free_queues(fdata, uctxt);
@ -1217,6 +1220,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
fd->uctxt = uctxt;
hfi1_rcd_get(uctxt);
/* Enable the Urgent IRQ for this user context */
set_intr_bits(dd, IS_RCVURGENT_START + uctxt->ctxt,
IS_RCVURGENT_START + uctxt->ctxt, true);
done:
if (uctxt->subctxt_cnt) {
/*

View File

@ -1213,9 +1213,6 @@ struct hfi1_devdata {
struct diag_client *diag_client;
/* MSI-X information */
struct hfi1_msix_info msix_info;
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@ -1229,6 +1226,9 @@ struct hfi1_devdata {
*/
struct timer_list synth_stats_timer;
/* MSI-X information */
struct hfi1_msix_info msix_info;
/*
* device counters
*/
@ -1355,6 +1355,8 @@ struct hfi1_devdata {
/* vnic data */
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
};
static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)

View File

@ -831,6 +831,23 @@ wq_error:
return -ENOMEM;
}
/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* @dd: valid devdata
*
*/
static void enable_general_intr(struct hfi1_devdata *dd)
{
set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
}
/**
* hfi1_init - do the actual initialization sequence on the chip
* @dd: the hfi1_ib device
@ -915,6 +932,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
/* enable IRQ */
hfi1_rcd_put(rcd);
}
@ -953,7 +971,8 @@ done:
HFI1_STATUS_INITTED;
if (!ret) {
/* enable all interrupts from the chip */
set_intr_state(dd, 1);
enable_general_intr(dd);
init_qsfp_int(dd);
/* chip is OK for user apps; mark it as initialized */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@ -1050,8 +1069,8 @@ static void shutdown_device(struct hfi1_devdata *dd)
}
dd->flags &= ~HFI1_INITTED;
/* mask and clean up interrupts, but not errors */
set_intr_state(dd, 0);
/* mask and clean up interrupts */
set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
msix_clean_up_interrupts(dd);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@ -1312,6 +1331,7 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
spin_lock_init(&dd->pio_map_lock);
mutex_init(&dd->dc8051_lock);
init_waitqueue_head(&dd->event_queue);
spin_lock_init(&dd->irq_src_lock);
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {

View File

@ -240,6 +240,21 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
return 0;
}
/**
* enable_sdma_src() - Helper to enable SDMA IRQ srcs
* @dd: valid devdata structure
* @i: index of SDMA engine
*/
static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
{
set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
IS_SDMA_PROGRESS_START + i, true);
set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
true);
}
/**
* msix_request_irqs() - Allocate all MSIx IRQs
* @dd: valid devdata structure
@ -262,6 +277,7 @@ int msix_request_irqs(struct hfi1_devdata *dd)
ret = msix_request_sdma_irq(sde);
if (ret)
return ret;
enable_sdma_srcs(sde->dd, i);
}
for (i = 0; i < dd->n_krcv_queues; i++) {