[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 2/3] xilinx: Add AXIENET & DMA models
From: |
edgar . iglesias |
Subject: |
[Qemu-devel] [PATCH 2/3] xilinx: Add AXIENET & DMA models |
Date: |
Mon, 14 Mar 2011 14:46:03 +0100 |
From: Edgar E. Iglesias <address@hidden>
Signed-off-by: Edgar E. Iglesias <address@hidden>
---
Makefile.target | 2 +
hw/xilinx_axidma.c | 463 +++++++++++++++++++++++++++
hw/xilinx_axidma.h | 40 +++
hw/xilinx_axienet.c | 857 +++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 1362 insertions(+), 0 deletions(-)
create mode 100644 hw/xilinx_axidma.c
create mode 100644 hw/xilinx_axidma.h
create mode 100644 hw/xilinx_axienet.c
diff --git a/Makefile.target b/Makefile.target
index f0df98e..d11eb4f 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -272,6 +272,8 @@ obj-microblaze-y += xilinx_intc.o
obj-microblaze-y += xilinx_timer.o
obj-microblaze-y += xilinx_uartlite.o
obj-microblaze-y += xilinx_ethlite.o
+obj-microblaze-y += xilinx_axidma.o
+obj-microblaze-y += xilinx_axienet.o
obj-microblaze-$(CONFIG_FDT) += device_tree.o
diff --git a/hw/xilinx_axidma.c b/hw/xilinx_axidma.c
new file mode 100644
index 0000000..ca56625
--- /dev/null
+++ b/hw/xilinx_axidma.c
@@ -0,0 +1,463 @@
+/*
+ * QEMU model of Xilinx AXI-DMA block.
+ *
+ * Copyright (c) 2011 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "sysbus.h"
+#include "qemu-char.h"
+#include "qemu-timer.h"
+#include "qemu-log.h"
+#include "qdev-addr.h"
+
+#include "xilinx_axidma.h"
+
+#define D(x)
+
+#define R_DMACR (0x00 / 4)
+#define R_DMASR (0x04 / 4)
+#define R_CURDESC (0x08 / 4)
+#define R_TAILDESC (0x10 / 4)
+#define R_MAX (0x44 / 4)
+
+struct sdesc
+{
+ uint64_t nxtdesc;
+ uint64_t buffer_address;
+ uint64_t reserved;
+ uint32_t control;
+ uint32_t status;
+ uint32_t app[6];
+};
+
+struct axi_stream
+{
+ QEMUBH *bh;
+ ptimer_state *ptimer;
+ qemu_irq irq;
+
+ int nr;
+
+ struct sdesc desc;
+ int pos;
+ unsigned int complete_cnt;
+ uint32_t regs[R_MAX];
+};
+
+struct xlx_axidma
+{
+ SysBusDevice busdev;
+ uint32_t freqhz;
+ void *dmach;
+
+ struct axi_stream streams[2];
+};
+
+/*
+ * Helper calls to extract info from desriptors and other trivial
+ * state from regs.
+ */
+static inline int stream_desc_sof(struct sdesc *d)
+{
+ return d->control & (1 << 27);
+}
+
+static inline int stream_desc_eof(struct sdesc *d)
+{
+ return d->control & (1 << 26);
+}
+
+static inline int stream_resetting(struct axi_stream *s)
+{
+ return !!(s->regs[R_DMACR] & (1 << 2));
+}
+
+static inline int stream_running(struct axi_stream *s)
+{
+ return s->regs[R_DMACR] & 1;
+}
+
+static inline int stream_halted(struct axi_stream *s)
+{
+ return s->regs[R_DMASR] & 1;
+}
+
+static inline int stream_idle(struct axi_stream *s)
+{
+ return !!(s->regs[R_DMASR] & 2);
+}
+
+static void stream_reset(struct axi_stream *s)
+{
+ s->regs[R_DMASR] = 1; /* starts up halted. */
+ s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshhold. */
+}
+
+/* Mapp an offset addr into a channel index. */
+static inline int streamid_from_addr(target_phys_addr_t addr)
+{
+ int sid;
+
+ sid = addr / (0x30);
+ sid &= 1;
+ return sid;
+}
+
+#if 0
+static void stream_desc_show(struct sdesc *d)
+{
+ qemu_log("buffer_addr = %lx\n", d->buffer_address);
+ qemu_log("nxtdesc = %lx\n", d->nxtdesc);
+ qemu_log("control = %x\n", d->control);
+ qemu_log("status = %x\n", d->control);
+}
+#endif
+
+static void stream_desc_load(struct axi_stream *s, target_phys_addr_t addr)
+{
+ cpu_physical_memory_read(addr, (void *) &s->desc, sizeof s->desc);
+}
+
+static void stream_desc_store(struct axi_stream *s, target_phys_addr_t addr)
+{
+ cpu_physical_memory_write(addr, (void *) &s->desc, sizeof s->desc);
+}
+
+static void stream_update_irq(struct axi_stream *s)
+{
+ unsigned int pending, mask, irq;
+
+ pending = (s->regs[R_DMASR] >> 12) & 0x7;
+ mask = (s->regs[R_DMACR] >> 12) & 0x7;
+
+ irq = pending & mask;
+
+ qemu_set_irq(s->irq, !!irq);
+}
+
+static void stream_reload_complete_cnt(struct axi_stream *s)
+{
+ unsigned int comp_th;
+ comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
+ s->complete_cnt = comp_th;
+}
+
+static void timer_hit(void *opaque)
+{
+ struct axi_stream *s = opaque;
+
+ stream_reload_complete_cnt(s);
+ s->regs[R_DMASR] |= 1 << 13;
+ stream_update_irq(s);
+}
+
+static void stream_complete(struct axi_stream *s)
+{
+ unsigned int comp_delay;
+
+ /* Start the delayed timer. */
+ comp_delay = s->regs[R_DMACR] >> 24;
+ if (comp_delay) {
+ ptimer_stop(s->ptimer);
+ ptimer_set_count(s->ptimer, comp_delay);
+ ptimer_run(s->ptimer, 1);
+ }
+
+ s->complete_cnt--;
+ if (s->complete_cnt == 0) {
+ /* Raise the IOC irq. */
+ s->regs[R_DMASR] |= 1 << 12;
+ stream_reload_complete_cnt(s);
+ }
+}
+
+static void stream_process_mem2s(struct axi_stream *s, struct xlx_dma_ch
*dmach)
+{
+ uint32_t prev_d;
+ unsigned char txbuf[16 * 1024];
+ unsigned int txlen;
+ uint32_t app[6];
+
+ if (!stream_running(s) || stream_idle(s)) {
+ return;
+ }
+
+ while (1) {
+ stream_desc_load(s, s->regs[R_CURDESC]);
+
+ if (s->desc.status & (1 << 31)) {
+ s->regs[R_DMASR] |= 2;
+ break;
+ }
+
+ if (stream_desc_sof(&s->desc)) {
+ s->pos = 0;
+ memcpy(app, s->desc.app, sizeof app);
+ }
+
+ txlen = s->desc.control & ((1 << 23) - 1);
+ if ((txlen + s->pos) > sizeof txbuf) {
+ hw_error("%s: too small internal txbuf! %d\n", __func__,
+ txlen + s->pos);
+ }
+
+ cpu_physical_memory_read(s->desc.buffer_address,
+ txbuf + s->pos, txlen);
+ s->pos += txlen;
+
+ if (stream_desc_eof(&s->desc)) {
+ xlx_dma_push_to_client(dmach, txbuf, s->pos, app);
+ s->pos = 0;
+ stream_complete(s);
+ }
+
+ /* Update the descriptor. */
+ s->desc.status = txlen | (1 << 31); /* Complete. */
+ stream_desc_store(s, s->regs[R_CURDESC]);
+
+ /* Advance. */
+ prev_d = s->regs[R_CURDESC];
+ s->regs[R_CURDESC] = s->desc.nxtdesc;
+ if (prev_d == s->regs[R_TAILDESC]) {
+ s->regs[R_DMASR] |= 2;
+ break;
+ }
+ }
+}
+
+static void stream_process_s2mem(struct axi_stream *s,
+ unsigned char *buf, size_t len, uint32_t *app)
+{
+ uint32_t prev_d;
+ unsigned int rxlen;
+ int pos = 0;
+ int sof = 1;
+
+ if (!stream_running(s) || stream_idle(s)) {
+ return;
+ }
+
+ while (len) {
+ stream_desc_load(s, s->regs[R_CURDESC]);
+
+ if (s->desc.status & (1 << 31)) {
+ s->regs[R_DMASR] |= 2;
+ break;
+ }
+
+ rxlen = s->desc.control & ((1 << 23) - 1);
+ if (rxlen > len) {
+ /* It fits. */
+ rxlen = len;
+ }
+
+ cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
+ len -= rxlen;
+ pos += rxlen;
+
+ /* Update the descriptor. */
+ if (!len) {
+ int i;
+
+ stream_complete(s);
+ for (i = 0; i < 5; i++) {
+ s->desc.app[i] = app[i];
+ }
+ s->desc.status |= 1 << 26; /* EOF. */
+ }
+
+ s->desc.status |= sof << 27; /* SOF. */
+ s->desc.status |= 1 << 31; /* Complete. */
+ stream_desc_store(s, s->regs[R_CURDESC]);
+ sof = 0;
+
+ /* Advance. */
+ prev_d = s->regs[R_CURDESC];
+ s->regs[R_CURDESC] = s->desc.nxtdesc;
+ if (prev_d == s->regs[R_TAILDESC]) {
+ s->regs[R_DMASR] |= 2;
+ break;
+ }
+ }
+}
+
+static
+void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app)
+{
+ struct xlx_axidma *d = opaque;
+ struct axi_stream *s = &d->streams[1];
+
+ if (!app) {
+ hw_error("No stream app data!\n");
+ }
+ stream_process_s2mem(s, buf, len, app);
+ stream_update_irq(s);
+}
+
+static uint32_t axidma_readl (void *opaque, target_phys_addr_t addr)
+{
+ struct xlx_axidma *d = opaque;
+ struct axi_stream *s;
+ uint32_t r = 0;
+ int sid;
+
+ sid = streamid_from_addr(addr);
+ s = &d->streams[sid];
+
+ addr = addr % 0x30;
+ addr >>= 2;
+ switch (addr)
+ {
+ case R_DMACR:
+ /* Simulate one cycles reset delay. */
+ s->regs[addr] &= ~(1 << 2);
+ r = s->regs[addr];
+ break;
+ case R_DMASR:
+ s->regs[addr] &= 0xffff;
+ s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
+ s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
+ r = s->regs[addr];
+ break;
+ default:
+ if (addr < ARRAY_SIZE(s->regs))
+ r = s->regs[addr];
+ D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, sid, addr * 4, r));
+ break;
+ }
+ return r;
+
+}
+
+static void
+axidma_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
+{
+ struct xlx_axidma *d = opaque;
+ struct axi_stream *s;
+ int sid;
+
+ sid = streamid_from_addr(addr);
+ s = &d->streams[sid];
+
+ addr = addr % 0x30;
+ addr >>= 2;
+ switch (addr)
+ {
+ case R_DMACR:
+ /* Tailptr mode is always on. */
+ value |= 2;
+ /* Remember our previous reset state. */
+ value |= (s->regs[addr] & (1 << 2));
+ s->regs[addr] = value;
+
+ if (value & (1 << 2)) {
+ stream_reset(s);
+ }
+
+ if ((value & 1) && !stream_resetting(s)) {
+ /* Start processing. */
+ s->regs[R_DMASR] &= ~3;
+ }
+ stream_reload_complete_cnt(s);
+ break;
+
+ case R_DMASR:
+ /* Mask away write to clear irq lines. */
+ value &= ~(value & (7 << 12));
+ s->regs[addr] = value;
+ break;
+
+ case R_TAILDESC:
+ s->regs[addr] = value;
+ s->regs[R_DMASR] &= ~2; /* Not idle. */
+ if (!sid) {
+ stream_process_mem2s(s, d->dmach);
+ }
+ break;
+ default:
+ D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, sid, addr * 4, value));
+ if (addr < ARRAY_SIZE(s->regs))
+ s->regs[addr] = value;
+ break;
+ }
+ stream_update_irq(s);
+}
+
+static CPUReadMemoryFunc * const axidma_read[] = {
+ &axidma_readl,
+ &axidma_readl,
+ &axidma_readl,
+};
+
+static CPUWriteMemoryFunc * const axidma_write[] = {
+ &axidma_writel,
+ &axidma_writel,
+ &axidma_writel,
+};
+
+static int xilinx_axidma_init(SysBusDevice *dev)
+{
+ struct xlx_axidma *s = FROM_SYSBUS(typeof (*s), dev);
+ int axidma_regs;
+ int i;
+
+ sysbus_init_irq(dev, &s->streams[1].irq);
+ sysbus_init_irq(dev, &s->streams[0].irq);
+
+ if (!s->dmach) {
+ hw_error("Unconnected DMA channel.\n");
+ }
+
+ xlx_dma_connect_dma(s->dmach, s, axidma_push);
+
+ axidma_regs = cpu_register_io_memory(axidma_read, axidma_write, s,
+ DEVICE_NATIVE_ENDIAN);
+ sysbus_init_mmio(dev, R_MAX * 4, axidma_regs);
+
+ for (i = 0; i < 2; i++) {
+ stream_reset(&s->streams[i]);
+ s->streams[i].nr = i;
+ s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
+ s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
+ ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
+ }
+ return 0;
+}
+
+static SysBusDeviceInfo axidma_info = {
+ .init = xilinx_axidma_init,
+ .qdev.name = "xilinx,axidma",
+ .qdev.size = sizeof(struct xlx_axidma),
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_UINT32("freqhz", struct xlx_axidma, freqhz, 50000000),
+ DEFINE_PROP_PTR("dmach", struct xlx_axidma, dmach),
+ DEFINE_PROP_END_OF_LIST(),
+ }
+};
+
+static void xilinx_axidma_register(void)
+{
+ sysbus_register_withprop(&axidma_info);
+}
+
+device_init(xilinx_axidma_register)
diff --git a/hw/xilinx_axidma.h b/hw/xilinx_axidma.h
new file mode 100644
index 0000000..ed4a8c6
--- /dev/null
+++ b/hw/xilinx_axidma.h
@@ -0,0 +1,40 @@
+/* AXI DMA connection. Used until qdev provides a generic way. */
+typedef void (*dma_push_fn)(void *opaque,
+ unsigned char *buf, size_t len, uint32_t *app);
+
+struct xlx_dma_ch
+{
+ void *dma;
+ void *client;
+
+ dma_push_fn to_dma;
+ dma_push_fn to_client;
+};
+
+static inline void xlx_dma_connect_client(struct xlx_dma_ch *dmach,
+ void *c, dma_push_fn f)
+{
+ dmach->client = c;
+ dmach->to_client = f;
+}
+
+static inline void xlx_dma_connect_dma(struct xlx_dma_ch *dmach,
+ void *d, dma_push_fn f)
+{
+ dmach->dma = d;
+ dmach->to_dma = f;
+}
+
+static inline
+void xlx_dma_push_to_dma(struct xlx_dma_ch *dmach,
+ uint8_t *buf, size_t len, uint32_t *app)
+{
+ dmach->to_dma(dmach->dma, buf, len, app);
+}
+static inline
+void xlx_dma_push_to_client(struct xlx_dma_ch *dmach,
+ uint8_t *buf, size_t len, uint32_t *app)
+{
+ dmach->to_client(dmach->client, buf, len, app);
+}
+
diff --git a/hw/xilinx_axienet.c b/hw/xilinx_axienet.c
new file mode 100644
index 0000000..a35b8f2
--- /dev/null
+++ b/hw/xilinx_axienet.c
@@ -0,0 +1,857 @@
+/*
+ * QEMU model of Xilinx AXI-Ethernet.
+ *
+ * Copyright (c) 2011 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "sysbus.h"
+#include "qemu-char.h"
+#include "qemu-log.h"
+#include "net.h"
+#include "net/checksum.h"
+
+#include "xilinx_axidma.h"
+
+#define DPHY(x)
+
+/* Advertisement control register. */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+
+struct qemu_phy
+{
+ uint32_t regs[32];
+
+ int link;
+
+ unsigned int (*read)(struct qemu_phy *phy, unsigned int req);
+ void (*write)(struct qemu_phy *phy, unsigned int req,
+ unsigned int data);
+};
+
+static unsigned int tdk_read(struct qemu_phy *phy, unsigned int req)
+{
+ int regnum;
+ unsigned r = 0;
+
+ regnum = req & 0x1f;
+
+ switch (regnum) {
+ case 1:
+ if (!phy->link)
+ break;
+ /* MR1. */
+ /* Speeds and modes. */
+ r |= (1 << 13) | (1 << 14);
+ r |= (1 << 11) | (1 << 12);
+ r |= (1 << 5); /* Autoneg complete. */
+ r |= (1 << 3); /* Autoneg able. */
+ r |= (1 << 2); /* link. */
+ r |= (1 << 1); /* link. */
+ break;
+ case 5:
+ /* Link partner ability.
+ We are kind; always agree with whatever best mode
+ the guest advertises. */
+ r = 1 << 14; /* Success. */
+ /* Copy advertised modes. */
+ r |= phy->regs[4] & (15 << 5);
+ /* Autoneg support. */
+ r |= 1;
+ break;
+ case 17:
+ /* Marvel PHY on many xilinx boards. */
+ r = 0x8000; /* 1000Mb */
+ break;
+ case 18:
+ {
+ /* Diagnostics reg. */
+ int duplex = 0;
+ int speed_100 = 0;
+
+ if (!phy->link)
+ break;
+
+ /* Are we advertising 100 half or 100 duplex ? */
+ speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
+ speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
+
+ /* Are we advertising 10 duplex or 100 duplex ? */
+ duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
+ duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
+ r = (speed_100 << 10) | (duplex << 11);
+ }
+ break;
+
+ default:
+ r = phy->regs[regnum];
+ break;
+ }
+ DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
+ return r;
+}
+
+static void
+tdk_write(struct qemu_phy *phy, unsigned int req, unsigned int data)
+{
+ int regnum;
+
+ regnum = req & 0x1f;
+ DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
+ switch (regnum) {
+ default:
+ phy->regs[regnum] = data;
+ break;
+ }
+}
+
+static void
+tdk_init(struct qemu_phy *phy)
+{
+ phy->regs[0] = 0x3100;
+ /* PHY Id. */
+ phy->regs[2] = 0x0300;
+ phy->regs[3] = 0xe400;
+ /* Autonegotiation advertisement reg. */
+ phy->regs[4] = 0x01E1;
+ phy->link = 1;
+
+ phy->read = tdk_read;
+ phy->write = tdk_write;
+}
+
+struct qemu_mdio
+{
+ /* bus. */
+ int mdc;
+ int mdio;
+
+ /* decoder. */
+ enum {
+ PREAMBLE,
+ SOF,
+ OPC,
+ ADDR,
+ REQ,
+ TURNAROUND,
+ DATA
+ } state;
+ unsigned int drive;
+
+ unsigned int cnt;
+ unsigned int addr;
+ unsigned int opc;
+ unsigned int req;
+ unsigned int data;
+
+ struct qemu_phy *devs[32];
+};
+
+static void
+mdio_attach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr)
+{
+ bus->devs[addr & 0x1f] = phy;
+}
+
+#ifdef USE_THIS_DEAD_CODE
+static void
+mdio_detach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr)
+{
+ bus->devs[addr & 0x1f] = NULL;
+}
+#endif
+
+static uint16_t mdio_read_req(struct qemu_mdio *bus, unsigned int addr,
+ unsigned int reg)
+{
+ struct qemu_phy *phy;
+ uint16_t data;
+
+ phy = bus->devs[addr];
+ if (phy && phy->read)
+ data = phy->read(phy, reg);
+ else
+ data = 0xffff;
+ DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
+ return data;
+}
+
+static void mdio_write_req(struct qemu_mdio *bus, unsigned int addr,
+ unsigned int reg, uint16_t data)
+{
+ struct qemu_phy *phy;
+
+ DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
+ phy = bus->devs[addr];
+ if (phy && phy->write)
+ phy->write(phy, reg, data);
+}
+
+#define DENET(x)
+
+#define R_RAF (0x000 / 4)
+#define RAF_EMCF_EN (1 << 12)
+#define RAF_NEWFUNC_EN (1 << 11)
+
+#define R_IS (0x00C / 4)
+#define R_IP (0x010 / 4)
+#define R_IE (0x014 / 4)
+#define R_UAWL (0x020 / 4)
+#define R_UAWU (0x024 / 4)
+#define R_PPST (0x030 / 4)
+
+#define R_STATS_RX_BYTESL (0x200 / 4)
+#define R_STATS_RX_BYTESH (0x204 / 4)
+#define R_STATS_TX_BYTESL (0x208 / 4)
+#define R_STATS_TX_BYTESH (0x20C / 4)
+#define R_STATS_RXL (0x290 / 4)
+#define R_STATS_RXH (0x294 / 4)
+#define R_STATS_RX_BCASTL (0x2a0 / 4)
+#define R_STATS_RX_BCASTH (0x2a4 / 4)
+#define R_STATS_RX_MCASTL (0x2a8 / 4)
+#define R_STATS_RX_MCASTH (0x2ac / 4)
+
+#define R_RCW0 (0x400 / 4)
+#define R_RCW1 (0x404 / 4)
+#define R_TC (0x408 / 4)
+#define R_EMMC (0x410 / 4)
+#define R_PHYC (0x414 / 4)
+
+#define R_MC (0x500 / 4)
+#define MC_EN (1 << 6)
+
+#define R_MCR (0x504 / 4)
+#define R_MWD (0x508 / 4)
+#define R_MRD (0x50c / 4)
+#define R_MIS (0x600 / 4)
+#define R_MIP (0x620 / 4)
+#define R_MIE (0x640 / 4)
+#define R_MIC (0x640 / 4)
+
+#define R_UAW0 (0x700 / 4)
+#define R_UAW1 (0x704 / 4)
+#define R_FMI (0x708 / 4)
+#define R_AF0 (0x710 / 4)
+#define R_AF1 (0x714 / 4)
+#define R_MAX (0x34 / 4)
+
+/* Indirect registers. */
+struct temac
+{
+ struct qemu_mdio mdio_bus;
+ struct qemu_phy phy;
+
+ void *parent;
+};
+
+struct xlx_axienet
+{
+ SysBusDevice busdev;
+ qemu_irq irq;
+ void *dmach;
+ NICState *nic;
+ NICConf conf;
+
+
+ uint32_t c_rxmem;
+ uint32_t c_txmem;
+ uint32_t c_phyaddr;
+
+ struct temac temac;
+
+ /* MII regs. */
+ union {
+ uint32_t regs[4];
+ struct {
+ uint32_t mc;
+ uint32_t mcr;
+ uint32_t mwd;
+ uint32_t mrd;
+ };
+ } mii;
+
+ struct {
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+
+ uint64_t rx;
+ uint64_t rx_bcast;
+ uint64_t rx_mcast;
+ } stats;
+
+ /* Receive configuration words. */
+ uint32_t rcw[2];
+ /* Transmit config. */
+ uint32_t tc;
+ uint32_t emmc;
+ uint32_t phyc;
+
+ /* Unicast Address Word. */
+ uint32_t uaw[2];
+ /* Unicast address filter used with extended mcast. */
+ uint32_t ext_uaw[2];
+ uint32_t fmi;
+
+ uint32_t regs[R_MAX];
+
+ /* Multicast filter addrs. */
+ uint32_t maddr[4][2];
+ /* 32K x 1 lookup filter. */
+ uint32_t ext_mtable[1024];
+
+
+ uint8_t *rxmem;
+};
+
+static void axienet_rx_reset(struct xlx_axienet *s)
+{
+ s->rcw[1] = (1 << 30) | (1 << 29) | (1 << 28) | (1 << 27);
+}
+
+static void axienet_tx_reset(struct xlx_axienet *s)
+{
+ s->tc = (1 << 30) | (1 << 28) | (1 << 27);
+}
+
+static inline int axienet_rx_resetting(struct xlx_axienet *s)
+{
+ return s->rcw[1] & (1 << 31);
+}
+
+static inline int axienet_rx_enabled(struct xlx_axienet *s)
+{
+ return s->rcw[1] & (1 << 28);
+}
+
+static inline int axienet_extmcf_enabled(struct xlx_axienet *s)
+{
+ return !!(s->regs[R_RAF] & RAF_EMCF_EN);
+}
+
+static inline int axienet_newfunc_enabled(struct xlx_axienet *s)
+{
+ return !!(s->regs[R_RAF] & RAF_EMCF_EN);
+}
+
+static void axienet_reset (struct xlx_axienet *s)
+{
+ axienet_rx_reset(s);
+ axienet_tx_reset(s);
+
+ s->regs[R_PPST] = 1 | (1 << 7);
+ s->regs[R_IS] = (1 << 1) | (1 << 6) | (1 << 7) | (1 << 8);
+
+ s->emmc = (1 << 30);
+}
+
+static void enet_update_irq(struct xlx_axienet *s)
+{
+ s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
+ qemu_set_irq(s->irq, !!s->regs[R_IP]);
+}
+
+static uint32_t enet_readl (void *opaque, target_phys_addr_t addr)
+{
+ struct xlx_axienet *s = opaque;
+ uint32_t r = 0;
+ addr >>= 2;
+
+ switch (addr)
+ {
+ case R_RCW0:
+ case R_RCW1:
+ r = s->rcw[addr & 1];
+ break;
+
+ case R_TC:
+ r = s->tc;
+ break;
+
+ case R_EMMC:
+ r = s->emmc;
+ break;
+
+ case R_PHYC:
+ r = s->phyc;
+ break;
+
+ case R_MCR:
+ r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
+ break;
+
+ case R_STATS_RX_BYTESL:
+ case R_STATS_RX_BYTESH:
+ r = s->stats.rx_bytes >> (32 * (addr & 1));
+ break;
+
+ case R_STATS_TX_BYTESL:
+ case R_STATS_TX_BYTESH:
+ r = s->stats.tx_bytes >> (32 * (addr & 1));
+ break;
+
+ case R_STATS_RXL:
+ case R_STATS_RXH:
+ r = s->stats.rx >> (32 * (addr & 1));
+ break;
+ case R_STATS_RX_BCASTL:
+ case R_STATS_RX_BCASTH:
+ r = s->stats.rx_bcast >> (32 * (addr & 1));
+ break;
+ case R_STATS_RX_MCASTL:
+ case R_STATS_RX_MCASTH:
+ r = s->stats.rx_mcast >> (32 * (addr & 1));
+ break;
+
+ case R_MC:
+ case R_MWD:
+ case R_MRD:
+ r = s->mii.regs[addr & 3];
+ break;
+
+ case R_UAW0:
+ case R_UAW1:
+ r = s->uaw[addr & 1];
+ break;
+
+ case R_UAWU:
+ case R_UAWL:
+ r = s->ext_uaw[addr & 1];
+ break;
+
+ case R_FMI:
+ r = s->fmi;
+ break;
+
+ case R_AF0:
+ case R_AF1:
+ r = s->maddr[s->fmi & 3][addr & 1];
+ break;
+
+ case 0x8000 ... 0x83ff:
+ r = s->ext_mtable[addr - 0x8000];
+ break;
+
+ default:
+ if (addr < ARRAY_SIZE(s->regs)) {
+ r = s->regs[addr];
+ }
+ DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, addr * 4, r));
+ break;
+ }
+ return r;
+}
+
+static void
+enet_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
+{
+ struct xlx_axienet *s = opaque;
+ struct temac *t = &s->temac;
+
+ addr >>= 2;
+ switch (addr)
+ {
+ case R_RCW0:
+ case R_RCW1:
+ s->rcw[addr & 1] = value;
+ if ((addr & 1) && value & (1 << 31)) {
+ axienet_rx_reset(s);
+ }
+ break;
+
+ case R_TC:
+ s->tc = value;
+ if (value & (1 << 31)) {
+ axienet_tx_reset(s);
+ }
+ break;
+
+ case R_EMMC:
+ s->emmc = value;
+ break;
+
+ case R_PHYC:
+ s->phyc = value;
+ break;
+
+ case R_MC:
+ value &= ((1 < 7) - 1);
+
+ /* Enable the MII. */
+ if (value & MC_EN) {
+ unsigned int miiclkdiv = value & ((1 << 6) - 1);
+ if (!miiclkdiv) {
+ qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
+ }
+ }
+ s->mii.mc = value;
+ break;
+
+ case R_MCR: {
+ unsigned int phyaddr = (value >> 24) & 0x1f;
+ unsigned int regaddr = (value >> 16) & 0x1f;
+ unsigned int op = (value >> 14) & 3;
+ unsigned int initiate = (value >> 11) & 1;
+
+ if (initiate) {
+ if (op == 1) {
+ mdio_write_req(&t->mdio_bus, phyaddr, regaddr,
s->mii.mwd);
+ } else if (op == 2) {
+ s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr,
regaddr);
+ } else {
+ qemu_log("AXIENET: invalid MDIO OP=%d\n", op);
+ }
+ }
+ s->mii.mcr = value;
+ break;
+ }
+
+ case R_MWD:
+ case R_MRD:
+ s->mii.regs[addr & 3] = value;
+ break;
+
+
+ case R_UAW0:
+ case R_UAW1:
+ s->uaw[addr & 1] = value;
+ break;
+
+ case R_UAWL:
+ case R_UAWU:
+ s->ext_uaw[addr & 1] = value;
+ break;
+
+ case R_FMI:
+ s->fmi = value;
+ break;
+
+ case R_AF0:
+ case R_AF1:
+ s->maddr[s->fmi & 3][addr & 1] = value;
+ break;
+
+ case 0x8000 ... 0x83ff:
+ s->ext_mtable[addr - 0x8000] = value;
+ break;
+
+ default:
+ DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, addr * 4, value));
+ if (addr < ARRAY_SIZE(s->regs))
+ s->regs[addr] = value;
+ break;
+ }
+ enet_update_irq(s);
+}
+
+static CPUReadMemoryFunc * const enet_read[] = {
+ &enet_readl,
+ &enet_readl,
+ &enet_readl,
+};
+
+static CPUWriteMemoryFunc * const enet_write[] = {
+ &enet_writel,
+ &enet_writel,
+ &enet_writel,
+};
+
+static int eth_can_rx(VLANClientState *nc)
+{
+ struct xlx_axienet *s = DO_UPCAST(NICState, nc, nc)->opaque;
+
+ /* RX enabled? */
+ return !axienet_rx_resetting(s) && axienet_rx_enabled(s);
+}
+
+static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
+{
+ int match = 1;
+
+ if (memcmp(buf, &f0, 4)) {
+ match = 0;
+ }
+
+ if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
+ match = 0;
+ }
+
+ return match;
+}
+
+static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
+{
+ struct xlx_axienet *s = DO_UPCAST(NICState, nc, nc)->opaque;
+ const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
+ uint32_t app[6] = {0};
+ int promisc = s->fmi & (1 << 31);
+ int unicast, broadcast, multicast, ip_multicast = 0;
+ uint32_t csum32;
+ uint16_t csum16;
+ int i;
+
+ s = s;
+ DENET(qemu_log("%s: %zd bytes\n", __func__, size));
+
+ unicast = ~buf[0] & 0x1;
+ broadcast = memcmp(buf, sa_bcast, 6) == 0;
+ multicast = !unicast && !broadcast;
+ if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
+ ip_multicast = 1;
+ }
+
+ /* Jumbo or vlan sizes ? */
+ if (!(s->rcw[1] & (1 << 30))) {
+ if (size > 1518 && size <= 1522 && !(s->rcw[1] & (1 << 27))) {
+ return size;
+ }
+ }
+
+ /* Basic Address filters. If you want to use the extended filters
+ you'll generally have to place the ethernet mac into promiscuous mode
+ to avoid the basic filtering from dropping most frames. */
+ if (!promisc) {
+ if (unicast) {
+ if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
+ return size;
+ }
+ } else {
+ if (broadcast) {
+ /* Broadcast. */
+ if (s->regs[R_RAF] & 4) {
+ return size;
+ }
+ } else {
+ int drop = 1;
+
+ /* Multicast. */
+ if (s->regs[R_RAF] & 2) {
+ return size;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
+ drop = 0;
+ break;
+ }
+ }
+
+ if (drop) {
+ return size;
+ }
+ }
+ }
+ }
+
+ /* Extended mcast filtering enabled? */
+ if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
+ if (unicast) {
+ if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
+ return size;
+ }
+ } else {
+ if (broadcast) {
+ /* Broadcast. ??? */
+ if (s->regs[R_RAF] & 4) {
+ return size;
+ }
+ } else {
+ int idx, bit;
+
+ /* Multicast. */
+ if (!memcmp(buf, sa_ipmcast, 3)) {
+ return size;
+ }
+
+ idx = (buf[4] & 0x7f) << 8;
+ idx |= buf[5];
+
+ bit = 1 << (idx & 0x1f);
+ idx >>= 5;
+
+ if (!(s->ext_mtable[idx] & bit)) {
+ return size;
+ }
+ }
+ }
+ }
+
+ if (size < 12) {
+ s->regs[R_IS] |= 1 << 3;
+ enet_update_irq(s);
+ return -1;
+ }
+
+ if (size > s->c_rxmem) {
+ size = s->c_rxmem;
+ }
+
+ memcpy(s->rxmem, buf, size);
+ memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
+
+ if (s->rcw[1] & (1 << 29)) {
+ size += 4; /* fcs is inband. */
+ }
+
+ app[0] = 5 << 28;
+ csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
+ /* Fold it once. */
+ csum32 = (csum32 & 0xffff) + (csum32 >> 16);
+ /* And twice to get rid of possible carries. */
+ csum16 = (csum32 & 0xffff) + (csum32 >> 16);
+ app[3] = csum16;
+ app[4] = size & 0xffff;
+
+ s->stats.rx_bytes += size;
+ s->stats.rx++;
+ if (multicast) {
+ s->stats.rx_mcast++;
+ app[2] |= 1 | (ip_multicast << 1);
+ } else if (broadcast) {
+ s->stats.rx_bcast++;
+ app[2] |= 1 << 3;
+ }
+
+ /* Good frame. */
+ app[2] |= 1 << 6;
+
+ xlx_dma_push_to_dma(s->dmach, (void *)s->rxmem, size, app);
+
+ s->regs[R_IS] |= 1 << 2;
+ enet_update_irq(s);
+ return size;
+}
+
+static void eth_cleanup(VLANClientState *nc)
+{
+ /* FIXME. */
+ struct xlx_axienet *s = DO_UPCAST(NICState, nc, nc)->opaque;
+ qemu_free(s->rxmem);
+ qemu_free(s);
+}
+
+static void
+axienet_stream_push(void *opaque, uint8_t *buf, size_t size, uint32_t *hdr)
+{
+ struct xlx_axienet *s = opaque;
+
+ /* TX enable ? */
+ if (!(s->tc & (1 << 28))) {
+ return;
+ }
+
+ /* Jumbo or vlan sizes ? */
+ if (!(s->tc & (1 << 30))) {
+ if (size > 1518 && size <= 1522 && !(s->tc & (1 << 27))) {
+ return;
+ }
+ }
+
+ if (hdr[0] & 1) {
+ unsigned int start_off = hdr[1] >> 16;
+ unsigned int write_off = hdr[1] & 0xffff;
+ uint32_t tmp_csum;
+ uint16_t csum;
+
+ tmp_csum = net_checksum_add(size - start_off,
+ (uint8_t *)buf + start_off);
+ /* Accumulate the seed. */
+ tmp_csum += hdr[2] & 0xffff;
+
+ /* Fold the 32bit partial checksum. */
+ csum = net_checksum_finish(tmp_csum);
+
+ /* Writeback. */
+ buf[write_off] = csum >> 8;
+ buf[write_off + 1] = csum & 0xff;
+ }
+
+ qemu_send_packet(&s->nic->nc, buf, size);
+
+ s->stats.tx_bytes += size;
+ s->regs[R_IS] |= 1 << 5;
+ enet_update_irq(s);
+}
+
+static NetClientInfo net_xilinx_enet_info = {
+ .type = NET_CLIENT_TYPE_NIC,
+ .size = sizeof(NICState),
+ .can_receive = eth_can_rx,
+ .receive = eth_rx,
+ .cleanup = eth_cleanup,
+};
+
+static int xilinx_enet_init(SysBusDevice *dev)
+{
+ struct xlx_axienet *s = FROM_SYSBUS(typeof (*s), dev);
+ int enet_regs;
+
+ sysbus_init_irq(dev, &s->irq);
+
+ if (!s->dmach) {
+ hw_error("Unconnected Xilinx Ethernet MAC.\n");
+ }
+
+ xlx_dma_connect_client(s->dmach, s, axienet_stream_push);
+
+ enet_regs = cpu_register_io_memory(enet_read, enet_write, s,
+ DEVICE_LITTLE_ENDIAN);
+ sysbus_init_mmio(dev, 0x40000, enet_regs);
+
+ qemu_macaddr_default_if_unset(&s->conf.macaddr);
+ s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
+ dev->qdev.info->name, dev->qdev.id, s);
+ qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a);
+
+ tdk_init(&s->temac.phy);
+ mdio_attach(&s->temac.mdio_bus, &s->temac.phy, s->c_phyaddr);
+
+ s->temac.parent = s;
+
+ s->rxmem = qemu_malloc(s->c_rxmem);
+ axienet_reset(s);
+
+ return 0;
+}
+
+static SysBusDeviceInfo xilinx_enet_info = {
+ .init = xilinx_enet_init,
+ .qdev.name = "xilinx,axienet",
+ .qdev.size = sizeof(struct xlx_axienet),
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_UINT32("phyaddr", struct xlx_axienet, c_phyaddr, 7),
+ DEFINE_PROP_UINT32("c_rxmem", struct xlx_axienet, c_rxmem, 0x1000),
+ DEFINE_PROP_UINT32("c_txmem", struct xlx_axienet, c_txmem, 0x1000),
+ DEFINE_PROP_PTR("dmach", struct xlx_axienet, dmach),
+ DEFINE_NIC_PROPERTIES(struct xlx_axienet, conf),
+ DEFINE_PROP_END_OF_LIST(),
+ }
+};
+static void xilinx_enet_register(void)
+{
+ sysbus_register_withprop(&xilinx_enet_info);
+}
+
+device_init(xilinx_enet_register)
--
1.7.3.4