Merge branch 'sa1100-for-next'; commit 'riscpc^{/ARM: riscpc: enable chained scatterlist support}' into for-arm-soc

This commit is contained in:
Russell King 2019-07-03 11:44:46 +01:00
commit 1f6db18fbd
12 changed files with 169 additions and 181 deletions

View File

@ -1229,7 +1229,7 @@ F: include/uapi/drm/panfrost_drm.h
ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro@f2s.com>
S: Maintained
F: arch/arm/lib/floppydma.S
F: arch/arm/mach-rpc/floppydma.S
F: arch/arm/include/asm/floppy.h
ARM PMU PROFILING AND DEBUGGING

View File

@ -528,7 +528,7 @@ config ARCH_RPC
select ARCH_ACORN
select ARCH_MAY_HAVE_PC_FDC
select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET
select ARM_HAS_SG_CHAIN
select CPU_SA110
select FIQ
select HAVE_IDE

View File

@ -31,7 +31,6 @@ else
endif
ifeq ($(CONFIG_ARCH_RPC),y)
lib-y += ecard.o io-acorn.o floppydma.o
AFLAGS_delay-loop.o += -march=armv4
endif

View File

@ -5,4 +5,5 @@
# Object file lists.
obj-y := dma.o ecard.o fiq.o irq.o riscpc.o time.o
obj-y :=dma.o ecard.o ecard-loader.o fiq.o floppydma.o io-acorn.o irq.o \
riscpc.o time.o

View File

@ -27,10 +27,11 @@
struct iomd_dma {
struct dma_struct dma;
unsigned int state;
unsigned long base; /* Controller base address */
void __iomem *base; /* Controller base address */
int irq; /* Controller IRQ */
struct scatterlist cur_sg; /* Current controller buffer */
unsigned int state;
dma_addr_t cur_addr;
unsigned int cur_len;
dma_addr_t dma_addr;
unsigned int dma_len;
};
@ -53,13 +54,13 @@ typedef enum {
#define CR (IOMD_IO0CR - IOMD_IO0CURA)
#define ST (IOMD_IO0ST - IOMD_IO0CURA)
static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
static void iomd_get_next_sg(struct iomd_dma *idma)
{
unsigned long end, offset, flags = 0;
if (idma->dma.sg) {
sg->dma_address = idma->dma_addr;
offset = sg->dma_address & ~PAGE_MASK;
idma->cur_addr = idma->dma_addr;
offset = idma->cur_addr & ~PAGE_MASK;
end = offset + idma->dma_len;
@ -69,7 +70,7 @@ static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
if (offset + TRANSFER_SIZE >= end)
flags |= DMA_END_L;
sg->length = end - TRANSFER_SIZE;
idma->cur_len = end - TRANSFER_SIZE;
idma->dma_len -= end - offset;
idma->dma_addr += end - offset;
@ -87,52 +88,49 @@ static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
}
} else {
flags = DMA_END_S | DMA_END_L;
sg->dma_address = 0;
sg->length = 0;
idma->cur_addr = 0;
idma->cur_len = 0;
}
sg->length |= flags;
idma->cur_len |= flags;
}
static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
{
struct iomd_dma *idma = dev_id;
unsigned long base = idma->base;
void __iomem *base = idma->base;
unsigned int state = idma->state;
unsigned int status, cur, end;
do {
unsigned int status;
status = iomd_readb(base + ST);
status = readb(base + ST);
if (!(status & DMA_ST_INT))
return IRQ_HANDLED;
goto out;
if ((idma->state ^ status) & DMA_ST_AB)
iomd_get_next_sg(&idma->cur_sg, idma);
if ((state ^ status) & DMA_ST_AB)
iomd_get_next_sg(idma);
switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
case DMA_ST_OFL: /* OIA */
case DMA_ST_AB: /* .IB */
iomd_writel(idma->cur_sg.dma_address, base + CURA);
iomd_writel(idma->cur_sg.length, base + ENDA);
idma->state = DMA_ST_AB;
break;
case DMA_ST_OFL | DMA_ST_AB: /* OIB */
case 0: /* .IA */
iomd_writel(idma->cur_sg.dma_address, base + CURB);
iomd_writel(idma->cur_sg.length, base + ENDB);
idma->state = 0;
break;
// This efficiently implements state = OFL != AB ? AB : 0
state = ((status >> 2) ^ status) & DMA_ST_AB;
if (state) {
cur = CURA;
end = ENDA;
} else {
cur = CURB;
end = ENDB;
}
writel(idma->cur_addr, base + cur);
writel(idma->cur_len, base + end);
if (status & DMA_ST_OFL &&
idma->cur_sg.length == (DMA_END_S|DMA_END_L))
idma->cur_len == (DMA_END_S|DMA_END_L))
break;
} while (1);
idma->state = ~DMA_ST_AB;
disable_irq(irq);
state = ~DMA_ST_AB;
disable_irq_nosync(irq);
out:
idma->state = state;
return IRQ_HANDLED;
}
@ -160,7 +158,7 @@ static struct device isa_dma_dev = {
static void iomd_enable_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
void __iomem *base = idma->base;
unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
if (idma->dma.invalid) {
@ -180,27 +178,30 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
iomd_writeb(DMA_CR_C, dma_base + CR);
idma->dma_addr = idma->dma.sg->dma_address;
idma->dma_len = idma->dma.sg->length;
writeb(DMA_CR_C, base + CR);
idma->state = DMA_ST_AB;
}
if (idma->dma.dma_mode == DMA_MODE_READ)
ctrl |= DMA_CR_D;
iomd_writeb(ctrl, dma_base + CR);
writeb(ctrl, base + CR);
enable_irq(idma->irq);
}
static void iomd_disable_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
void __iomem *base = idma->base;
unsigned long flags;
local_irq_save(flags);
if (idma->state != ~DMA_ST_AB)
disable_irq(idma->irq);
iomd_writeb(0, dma_base + CR);
writeb(0, base + CR);
local_irq_restore(flags);
}
@ -363,17 +364,17 @@ static int __init rpc_dma_init(void)
*/
iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
iomd_dma[DMA_0].base = IOMD_IO0CURA;
iomd_dma[DMA_0].base = IOMD_BASE + IOMD_IO0CURA;
iomd_dma[DMA_0].irq = IRQ_DMA0;
iomd_dma[DMA_1].base = IOMD_IO1CURA;
iomd_dma[DMA_1].base = IOMD_BASE + IOMD_IO1CURA;
iomd_dma[DMA_1].irq = IRQ_DMA1;
iomd_dma[DMA_2].base = IOMD_IO2CURA;
iomd_dma[DMA_2].base = IOMD_BASE + IOMD_IO2CURA;
iomd_dma[DMA_2].irq = IRQ_DMA2;
iomd_dma[DMA_3].base = IOMD_IO3CURA;
iomd_dma[DMA_3].base = IOMD_BASE + IOMD_IO3CURA;
iomd_dma[DMA_3].irq = IRQ_DMA3;
iomd_dma[DMA_S0].base = IOMD_SD0CURA;
iomd_dma[DMA_S0].base = IOMD_BASE + IOMD_SD0CURA;
iomd_dma[DMA_S0].irq = IRQ_DMAS0;
iomd_dma[DMA_S1].base = IOMD_SD1CURA;
iomd_dma[DMA_S1].base = IOMD_BASE + IOMD_SD1CURA;
iomd_dma[DMA_S1].irq = IRQ_DMAS1;
for (i = DMA_0; i <= DMA_S1; i++) {

View File

@ -70,17 +70,21 @@ struct expcard_blacklist {
unsigned short manufacturer;
unsigned short product;
const char *type;
void (*init)(ecard_t *ec);
};
static ecard_t *cards;
static ecard_t *slot_to_expcard[MAX_ECARDS];
static unsigned int ectcr;
static void atomwide_3p_quirk(ecard_t *ec);
/* List of descriptions of cards which don't have an extended
* identification, or chunk directories containing a description.
*/
static struct expcard_blacklist __initdata blacklist[] = {
{ MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }
{ MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" },
{ MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, NULL, atomwide_3p_quirk },
};
asmlinkage extern int
@ -496,18 +500,21 @@ static void ecard_dump_irq_state(void)
printk("Expansion card IRQ state:\n");
for (ec = cards; ec; ec = ec->next) {
const char *claimed;
if (ec->slot_no == 8)
continue;
printk(" %d: %sclaimed, ",
ec->slot_no, ec->claimed ? "" : "not ");
claimed = ec->claimed ? "" : "not ";
if (ec->ops && ec->ops->irqpending &&
ec->ops != &ecard_default_ops)
printk("irq %spending\n",
printk(" %d: %sclaimed irq %spending\n",
ec->slot_no, claimed,
ec->ops->irqpending(ec) ? "" : "not ");
else
printk("irqaddr %p, mask = %02X, status = %02X\n",
printk(" %d: %sclaimed irqaddr %p, mask = %02X, status = %02X\n",
ec->slot_no, claimed,
ec->irqaddr, ec->irqmask, readb(ec->irqaddr));
}
}
@ -868,6 +875,16 @@ void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
}
EXPORT_SYMBOL(ecardm_iomap);
static void atomwide_3p_quirk(ecard_t *ec)
{
void __iomem *addr = __ecard_address(ec, ECARD_IOC, ECARD_SYNC);
unsigned int i;
/* Disable interrupts on each port */
for (i = 0x2000; i <= 0x2800; i += 0x0400)
writeb(0, addr + i + 4);
}
/*
* Probe for an expansion card.
*
@ -924,7 +941,10 @@ static int __init ecard_probe(int slot, unsigned irq, card_type_t type)
for (i = 0; i < ARRAY_SIZE(blacklist); i++)
if (blacklist[i].manufacturer == ec->cid.manufacturer &&
blacklist[i].product == ec->cid.product) {
ec->card_desc = blacklist[i].type;
if (blacklist[i].type)
ec->card_desc = blacklist[i].type;
if (blacklist[i].init)
blacklist[i].init(ec);
break;
}

View File

@ -118,29 +118,22 @@ static void arch_decomp_setup(void)
struct tag *t = (struct tag *)params;
unsigned int nr_pages = 0, page_size = PAGE_SIZE;
if (t->hdr.tag == ATAG_CORE)
{
for (; t->hdr.size; t = tag_next(t))
{
if (t->hdr.tag == ATAG_VIDEOTEXT)
{
if (t->hdr.tag == ATAG_CORE) {
for (; t->hdr.size; t = tag_next(t)) {
if (t->hdr.tag == ATAG_VIDEOTEXT) {
video_num_rows = t->u.videotext.video_lines;
video_num_cols = t->u.videotext.video_cols;
bytes_per_char_h = t->u.videotext.video_points;
bytes_per_char_v = t->u.videotext.video_points;
video_x = t->u.videotext.x;
video_y = t->u.videotext.y;
}
if (t->hdr.tag == ATAG_MEM)
{
} else if (t->hdr.tag == ATAG_VIDEOLFB) {
bytes_per_char_h = t->u.videolfb.lfb_depth;
bytes_per_char_v = 8;
} else if (t->hdr.tag == ATAG_MEM) {
page_size = PAGE_SIZE;
nr_pages += (t->u.mem.size / PAGE_SIZE);
}
}
}
else
{
} else {
nr_pages = params->nr_pages;
page_size = params->page_size;
video_num_rows = params->video_num_rows;

View File

@ -8,117 +8,71 @@
#include <asm/irq.h>
#include <asm/fiq.h>
static void iomd_ack_irq_a(struct irq_data *d)
{
unsigned int val, mask;
// These are offsets from the stat register for each IRQ bank
#define STAT 0x00
#define REQ 0x04
#define CLR 0x04
#define MASK 0x08
mask = 1 << d->irq;
val = iomd_readb(IOMD_IRQMASKA);
iomd_writeb(val & ~mask, IOMD_IRQMASKA);
iomd_writeb(mask, IOMD_IRQCLRA);
static void __iomem *iomd_get_base(struct irq_data *d)
{
void *cd = irq_data_get_irq_chip_data(d);
return (void __iomem *)(unsigned long)cd;
}
static void iomd_mask_irq_a(struct irq_data *d)
static void iomd_set_base_mask(unsigned int irq, void __iomem *base, u32 mask)
{
unsigned int val, mask;
struct irq_data *d = irq_get_irq_data(irq);
mask = 1 << d->irq;
val = iomd_readb(IOMD_IRQMASKA);
iomd_writeb(val & ~mask, IOMD_IRQMASKA);
d->mask = mask;
irq_set_chip_data(irq, (void *)(unsigned long)base);
}
static void iomd_unmask_irq_a(struct irq_data *d)
static void iomd_irq_mask_ack(struct irq_data *d)
{
unsigned int val, mask;
void __iomem *base = iomd_get_base(d);
unsigned int val, mask = d->mask;
mask = 1 << d->irq;
val = iomd_readb(IOMD_IRQMASKA);
iomd_writeb(val | mask, IOMD_IRQMASKA);
val = readb(base + MASK);
writeb(val & ~mask, base + MASK);
writeb(mask, base + CLR);
}
static struct irq_chip iomd_a_chip = {
.irq_ack = iomd_ack_irq_a,
.irq_mask = iomd_mask_irq_a,
.irq_unmask = iomd_unmask_irq_a,
static void iomd_irq_mask(struct irq_data *d)
{
void __iomem *base = iomd_get_base(d);
unsigned int val, mask = d->mask;
val = readb(base + MASK);
writeb(val & ~mask, base + MASK);
}
static void iomd_irq_unmask(struct irq_data *d)
{
void __iomem *base = iomd_get_base(d);
unsigned int val, mask = d->mask;
val = readb(base + MASK);
writeb(val | mask, base + MASK);
}
static struct irq_chip iomd_chip_clr = {
.irq_mask_ack = iomd_irq_mask_ack,
.irq_mask = iomd_irq_mask,
.irq_unmask = iomd_irq_unmask,
};
static void iomd_mask_irq_b(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_IRQMASKB);
iomd_writeb(val & ~mask, IOMD_IRQMASKB);
}
static void iomd_unmask_irq_b(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_IRQMASKB);
iomd_writeb(val | mask, IOMD_IRQMASKB);
}
static struct irq_chip iomd_b_chip = {
.irq_ack = iomd_mask_irq_b,
.irq_mask = iomd_mask_irq_b,
.irq_unmask = iomd_unmask_irq_b,
};
static void iomd_mask_irq_dma(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_DMAMASK);
iomd_writeb(val & ~mask, IOMD_DMAMASK);
}
static void iomd_unmask_irq_dma(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_DMAMASK);
iomd_writeb(val | mask, IOMD_DMAMASK);
}
static struct irq_chip iomd_dma_chip = {
.irq_ack = iomd_mask_irq_dma,
.irq_mask = iomd_mask_irq_dma,
.irq_unmask = iomd_unmask_irq_dma,
};
static void iomd_mask_irq_fiq(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_FIQMASK);
iomd_writeb(val & ~mask, IOMD_FIQMASK);
}
static void iomd_unmask_irq_fiq(struct irq_data *d)
{
unsigned int val, mask;
mask = 1 << (d->irq & 7);
val = iomd_readb(IOMD_FIQMASK);
iomd_writeb(val | mask, IOMD_FIQMASK);
}
static struct irq_chip iomd_fiq_chip = {
.irq_ack = iomd_mask_irq_fiq,
.irq_mask = iomd_mask_irq_fiq,
.irq_unmask = iomd_unmask_irq_fiq,
static struct irq_chip iomd_chip_noclr = {
.irq_mask = iomd_irq_mask,
.irq_unmask = iomd_irq_unmask,
};
extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end;
void __init rpc_init_irq(void)
{
unsigned int irq, clr, set = 0;
unsigned int irq, clr, set;
iomd_writeb(0, IOMD_IRQMASKA);
iomd_writeb(0, IOMD_IRQMASKB);
@ -130,6 +84,7 @@ void __init rpc_init_irq(void)
for (irq = 0; irq < NR_IRQS; irq++) {
clr = IRQ_NOREQUEST;
set = 0;
if (irq <= 6 || (irq >= 9 && irq <= 15))
clr |= IRQ_NOPROBE;
@ -140,30 +95,37 @@ void __init rpc_init_irq(void)
switch (irq) {
case 0 ... 7:
irq_set_chip_and_handler(irq, &iomd_a_chip,
irq_set_chip_and_handler(irq, &iomd_chip_clr,
handle_level_irq);
irq_modify_status(irq, clr, set);
iomd_set_base_mask(irq, IOMD_BASE + IOMD_IRQSTATA,
BIT(irq));
break;
case 8 ... 15:
irq_set_chip_and_handler(irq, &iomd_b_chip,
irq_set_chip_and_handler(irq, &iomd_chip_noclr,
handle_level_irq);
irq_modify_status(irq, clr, set);
iomd_set_base_mask(irq, IOMD_BASE + IOMD_IRQSTATB,
BIT(irq - 8));
break;
case 16 ... 21:
irq_set_chip_and_handler(irq, &iomd_dma_chip,
irq_set_chip_and_handler(irq, &iomd_chip_noclr,
handle_level_irq);
irq_modify_status(irq, clr, set);
iomd_set_base_mask(irq, IOMD_BASE + IOMD_DMASTAT,
BIT(irq - 16));
break;
case 64 ... 71:
irq_set_chip(irq, &iomd_fiq_chip);
irq_set_chip(irq, &iomd_chip_noclr);
irq_modify_status(irq, clr, set);
iomd_set_base_mask(irq, IOMD_BASE + IOMD_FIQSTAT,
BIT(irq - 64));
break;
}
}
init_FIQ(FIQ_START);
}

View File

@ -13,7 +13,7 @@
* 04-Dec-1997 RMK Updated for new arch/arm/time.c
* 13=Jun-2004 DS Moved to arch/arm/common b/c shared w/CLPS7500
*/
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@ -27,11 +27,15 @@
#define RPC_CLOCK_FREQ 2000000
#define RPC_LATCH DIV_ROUND_CLOSEST(RPC_CLOCK_FREQ, HZ)
static u32 ioc_timer_gettimeoffset(void)
static u32 ioc_time;
static u64 ioc_timer_read(struct clocksource *cs)
{
unsigned int count1, count2, status;
long offset;
unsigned long flags;
u32 ticks;
local_irq_save(flags);
ioc_writeb (0, IOC_T0LATCH);
barrier ();
count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
@ -41,27 +45,34 @@ static u32 ioc_timer_gettimeoffset(void)
ioc_writeb (0, IOC_T0LATCH);
barrier ();
count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
ticks = ioc_time + RPC_LATCH - count2;
local_irq_restore(flags);
offset = count2;
if (count2 < count1) {
/*
* We have not had an interrupt between reading count1
* and count2.
* The timer has not reloaded between reading count1 and
* count2, check whether an interrupt was actually pending.
*/
if (status & (1 << 5))
offset -= RPC_LATCH;
ticks += RPC_LATCH;
} else if (count2 > count1) {
/*
* We have just had another interrupt between reading
* count1 and count2.
* The timer has reloaded, so count2 indicates the new
* count since the wrap. The interrupt would not have
* been processed, so add the missed ticks.
*/
offset -= RPC_LATCH;
ticks += RPC_LATCH;
}
offset = (RPC_LATCH - offset) * (tick_nsec / 1000);
return DIV_ROUND_CLOSEST(offset, RPC_LATCH) * 1000;
return ticks;
}
static struct clocksource ioctime_clocksource = {
.read = ioc_timer_read,
.mask = CLOCKSOURCE_MASK(32),
.rating = 100,
};
void __init ioctime_init(void)
{
ioc_writeb(RPC_LATCH & 255, IOC_T0LTCHL);
@ -72,6 +83,7 @@ void __init ioctime_init(void)
static irqreturn_t
ioc_timer_interrupt(int irq, void *dev_id)
{
ioc_time += RPC_LATCH;
timer_tick();
return IRQ_HANDLED;
}
@ -86,7 +98,7 @@ static struct irqaction ioc_timer_irq = {
*/
void __init ioc_timer_init(void)
{
arch_gettimeoffset = ioc_timer_gettimeoffset;
WARN_ON(clocksource_register_hz(&ioctime_clocksource, RPC_CLOCK_FREQ));
ioctime_init();
setup_irq(IRQ_TIMER0, &ioc_timer_irq);
}