- 根目录:
- drivers
- net
- tulip
- de2104x.c
#define DRV_NAME "de2104x"
#define DRV_VERSION "0.7"
#define DRV_RELDATE "Mar 17, 2004"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/compiler.h>
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
static char version[] =
KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param (debug, int, 0);
MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
defined(CONFIG_SPARC) || defined(__ia64__) || \
defined(__sh__) || defined(__mips__)
static int rx_copybreak = 1518;
#else
static int rx_copybreak = 100;
#endif
module_param (rx_copybreak, int, 0);
MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
#define PFX DRV_NAME ": "
#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
#ifndef CONFIG_DE2104X_DSL
#define DSL 0
#else
#define DSL CONFIG_DE2104X_DSL
#endif
#define DE_RX_RING_SIZE 64
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
(sizeof(struct de_desc) * DE_TX_RING_SIZE))
#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
#define TX_BUFFS_AVAIL(CP) \
(((CP)->tx_tail <= (CP)->tx_head) ? \
(CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
(CP)->tx_tail - (CP)->tx_head - 1)
#define PKT_BUF_SZ 1536
#define RX_OFFSET 2
#define DE_SETUP_SKB ((struct sk_buff *) 1)
#define DE_DUMMY_SKB ((struct sk_buff *) 2)
#define DE_SETUP_FRAME_WORDS 96
#define DE_EEPROM_WORDS 256
#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
#define DE_MAX_MEDIA 5
#define DE_MEDIA_TP_AUTO 0
#define DE_MEDIA_BNC 1
#define DE_MEDIA_AUI 2
#define DE_MEDIA_TP 3
#define DE_MEDIA_TP_FD 4
#define DE_MEDIA_INVALID DE_MAX_MEDIA
#define DE_MEDIA_FIRST 0
#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
#define DE_TIMER_LINK (60 * HZ)
#define DE_TIMER_NO_LINK (5 * HZ)
#define DE_NUM_REGS 16
#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
#define DE_REGS_VER 1
#define TX_TIMEOUT (6*HZ)
#define FULL_DUPLEX_MAGIC 0x6969
enum {
BusMode = 0x00,
TxPoll = 0x08,
RxPoll = 0x10,
RxRingAddr = 0x18,
TxRingAddr = 0x20,
MacStatus = 0x28,
MacMode = 0x30,
IntrMask = 0x38,
RxMissed = 0x40,
ROMCmd = 0x48,
CSR11 = 0x58,
SIAStatus = 0x60,
CSR13 = 0x68,
CSR14 = 0x70,
CSR15 = 0x78,
PCIPM = 0x40,
CmdReset = (1 << 0),
CacheAlign16 = 0x00008000,
BurstLen4 = 0x00000400,
DescSkipLen = (DSL << 2),
NormalTxPoll = (1 << 0),
NormalRxPoll = (1 << 0),
DescOwn = (1 << 31),
RxError = (1 << 15),
RxErrLong = (1 << 7),
RxErrCRC = (1 << 1),
RxErrFIFO = (1 << 0),
RxErrRunt = (1 << 11),
RxErrFrame = (1 << 14),
RingEnd = (1 << 25),
FirstFrag = (1 << 29),
LastFrag = (1 << 30),
TxError = (1 << 15),
TxFIFOUnder = (1 << 1),
TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
TxMaxCol = (1 << 8),
TxOWC = (1 << 9),
TxJabber = (1 << 14),
SetupFrame = (1 << 27),
TxSwInt = (1 << 31),
IntrOK = (1 << 16),
IntrErr = (1 << 15),
RxIntr = (1 << 6),
RxEmpty = (1 << 7),
TxIntr = (1 << 0),
TxEmpty = (1 << 2),
PciErr = (1 << 13),
TxState = (1 << 22) | (1 << 21) | (1 << 20),
RxState = (1 << 19) | (1 << 18) | (1 << 17),
LinkFail = (1 << 12),
LinkPass = (1 << 4),
RxStopped = (1 << 8),
TxStopped = (1 << 1),
TxEnable = (1 << 13),
RxEnable = (1 << 1),
RxTx = TxEnable | RxEnable,
FullDuplex = (1 << 9),
AcceptAllMulticast = (1 << 7),
AcceptAllPhys = (1 << 6),
BOCnt = (1 << 5),
MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
EE_SHIFT_CLK = 0x02,
EE_CS = 0x01,
EE_DATA_WRITE = 0x04,
EE_WRITE_0 = 0x01,
EE_WRITE_1 = 0x05,
EE_DATA_READ = 0x08,
EE_ENB = (0x4800 | EE_CS),
EE_READ_CMD = 6,
RxMissedOver = (1 << 16),
RxMissedMask = 0xffff,
SROMC0InfoLeaf = 27,
MediaBlockMask = 0x3f,
MediaCustomCSRs = (1 << 6),
PM_Sleep = (1 << 31),
PM_Snooze = (1 << 30),
PM_Mask = PM_Sleep | PM_Snooze,
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
NonselPortActive = (1 << 9),
SelPortActive = (1 << 8),
LinkFailStatus = (1 << 2),
NetCxnErr = (1 << 1),
};
static const u32 de_intr_mask =
IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
LinkPass | LinkFail | PciErr;
static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
struct de_srom_media_block {
u8 opts;
u16 csr13;
u16 csr14;
u16 csr15;
} __packed;
struct de_srom_info_leaf {
u16 default_media;
u8 n_blocks;
u8 unused;
} __packed;
struct de_desc {
__le32 opts1;
__le32 opts2;
__le32 addr1;
__le32 addr2;
#if DSL
__le32 skip[DSL];
#endif
};
struct media_info {
u16 type;
u16 csr13;
u16 csr14;
u16 csr15;
};
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
struct de_private {
unsigned tx_head;
unsigned tx_tail;
unsigned rx_tail;
void __iomem *regs;
struct net_device *dev;
spinlock_t lock;
struct de_desc *rx_ring;
struct de_desc *tx_ring;
struct ring_info tx_skb[DE_TX_RING_SIZE];
struct ring_info rx_skb[DE_RX_RING_SIZE];
unsigned rx_buf_sz;
dma_addr_t ring_dma;
u32 msg_enable;
struct net_device_stats net_stats;
struct pci_dev *pdev;
u16 setup_frame[DE_SETUP_FRAME_WORDS];
u32 media_type;
u32 media_supported;
u32 media_advertise;
struct media_info media[DE_MAX_MEDIA];
struct timer_list media_timer;
u8 *ee_data;
unsigned board_idx;
unsigned de21040 : 1;
unsigned media_lock : 1;
};
static void de_set_rx_mode (struct net_device *dev);
static void de_tx (struct de_private *de);
static void de_clean_rings (struct de_private *de);
static void de_media_interrupt (struct de_private *de, u32 status);
static void de21040_media_timer (unsigned long data);
static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ },
};
MODULE_DEVICE_TABLE(pci, de_pci_tbl);
static const char * const media_name[DE_MAX_MEDIA] = {
"10baseT auto",
"BNC",
"AUI",
"10baseT-HD",
"10baseT-FD"
};
static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
#define dr32(reg) ioread32(de->regs + (reg))
#define dw32(reg, val) iowrite32((val), de->regs + (reg))
static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
u32 status, u32 len)
{
if (netif_msg_rx_err (de))
printk (KERN_DEBUG
"%s: rx err, slot %d status 0x%x len %d\n",
de->dev->name, rx_tail, status, len);
if ((status & 0x38000300) != 0x0300) {
if ((status & 0xffff) != 0x7fff) {
if (netif_msg_rx_err(de))
dev_warn(&de->dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
de->net_stats.rx_errors++;
if (status & 0x0890) de->net_stats.rx_length_errors++;
if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
}
}
static void de_rx (struct de_private *de)
{
unsigned rx_tail = de->rx_tail;
unsigned rx_work = DE_RX_RING_SIZE;
unsigned drop = 0;
int rc;
while (--rx_work) {
u32 status, len;
dma_addr_t mapping;
struct sk_buff *skb, *copy_skb;
unsigned copying_skb, buflen;
skb = de->rx_skb[rx_tail].skb;
BUG_ON(!skb);
rmb();
status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
if (status & DescOwn)
break;
len = ((status >> 16) & 0x7ff) - 4;
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
de->net_stats.rx_dropped++;
goto rx_next;
}
if (unlikely((status & 0x38008300) != 0x0300)) {
de_rx_err_acct(de, rx_tail, status, len);
goto rx_next;
}
copying_skb = (len <= rx_copybreak);
if (unlikely(netif_msg_rx_status(de)))
printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
de->dev->name, rx_tail, status, len,
copying_skb);
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = dev_alloc_skb (buflen);
if (unlikely(!copy_skb)) {
de->net_stats.rx_dropped++;
drop = 1;
rx_work = 100;
goto rx_next;
}
if (!copying_skb) {
pci_unmap_single(de->pdev, mapping,
buflen, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
mapping =
de->rx_skb[rx_tail].mapping =
pci_map_single(de->pdev, copy_skb->data,
buflen, PCI_DMA_FROMDEVICE);
de->rx_skb[rx_tail].skb = copy_skb;
} else {
pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
skb_reserve(copy_skb, RX_OFFSET);
skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
len);
pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
skb = copy_skb;
}
skb->protocol = eth_type_trans (skb, de->dev);
de->net_stats.rx_packets++;
de->net_stats.rx_bytes += skb->len;
rc = netif_rx (skb);
if (rc == NET_RX_DROP)
drop = 1;
rx_next:
if (rx_tail == (DE_RX_RING_SIZE - 1))
de->rx_ring[rx_tail].opts2 =
cpu_to_le32(RingEnd | de->rx_buf_sz);
else
de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
wmb();
de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
rx_tail = NEXT_RX(rx_tail);
}
if (!rx_work)
dev_warn(&de->dev->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
static irqreturn_t de_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct de_private *de = netdev_priv(dev);
u32 status;
status = dr32(MacStatus);
if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
return IRQ_NONE;
if (netif_msg_intr(de))
printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
dev->name, status, dr32(MacMode),
de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
if (status & (RxIntr | RxEmpty)) {
de_rx(de);
if (status & RxEmpty)
dw32(RxPoll, NormalRxPoll);
}
spin_lock(&de->lock);
if (status & (TxIntr | TxEmpty))
de_tx(de);
if (status & (LinkPass | LinkFail))
de_media_interrupt(de, status);
spin_unlock(&de->lock);
if (status & PciErr) {
u16 pci_status;
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
dev_err(&de->dev->dev,
"PCI bus error, status=%08x, PCI status=%04x\n",
status, pci_status);
}
return IRQ_HANDLED;
}
static void de_tx (struct de_private *de)
{
unsigned tx_head = de->tx_head;
unsigned tx_tail = de->tx_tail;
while (tx_tail != tx_head) {
struct sk_buff *skb;
u32 status;
rmb();
status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
if (status & DescOwn)
break;
skb = de->tx_skb[tx_tail].skb;
BUG_ON(!skb);
if (unlikely(skb == DE_DUMMY_SKB))
goto next;
if (unlikely(skb == DE_SETUP_SKB)) {
pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
sizeof(de->setup_frame), PCI_DMA_TODEVICE);
goto next;
}
pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
skb->len, PCI_DMA_TODEVICE);
if (status & LastFrag) {
if (status & TxError) {
if (netif_msg_tx_err(de))
printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
de->dev->name, status);
de->net_stats.tx_errors++;
if (status & TxOWC)
de->net_stats.tx_window_errors++;
if (status & TxMaxCol)
de->net_stats.tx_aborted_errors++;
if (status & TxLinkFail)
de->net_stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
de->net_stats.tx_fifo_errors++;
} else {
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
if (netif_msg_tx_done(de))
printk(KERN_DEBUG "%s: tx done, slot %d\n",
de->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
next:
de->tx_skb[tx_tail].skb = NULL;
tx_tail = NEXT_TX(tx_tail);
}
de->tx_tail = tx_tail;
if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
netif_wake_queue(de->dev);
}
static netdev_tx_t de_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
unsigned int entry, tx_free;
u32 mapping, len, flags = FirstFrag | LastFrag;
struct de_desc *txd;
spin_lock_irq(&de->lock);
tx_free = TX_BUFFS_AVAIL(de);
if (tx_free == 0) {
netif_stop_queue(dev);
spin_unlock_irq(&de->lock);
return NETDEV_TX_BUSY;
}
tx_free--;
entry = de->tx_head;
txd = &de->tx_ring[entry];
len = skb->len;
mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
if (entry == (DE_TX_RING_SIZE - 1))
flags |= RingEnd;
if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
flags |= TxSwInt;
flags |= len;
txd->opts2 = cpu_to_le32(flags);
txd->addr1 = cpu_to_le32(mapping);
de->tx_skb[entry].skb = skb;
de->tx_skb[entry].mapping = mapping;
wmb();
txd->opts1 = cpu_to_le32(DescOwn);
wmb();
de->tx_head = NEXT_TX(entry);
if (netif_msg_tx_queued(de))
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len);
if (tx_free == 0)
netif_stop_queue(dev);
spin_unlock_irq(&de->lock);
dw32(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
}
#undef set_bit_le
#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table);
netdev_for_each_mc_addr(ha, dev) {
int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
*setup_frm++ = hash_table[i];
}
setup_frm = &de->setup_frame[13*6];
eaddrs = (u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
}
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct netdev_hw_addr *ha;
u16 *eaddrs;
netdev_for_each_mc_addr(ha, dev) {
eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &de->setup_frame[15*6];
eaddrs = (u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
}
static void __de_set_rx_mode (struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u32 macmode;
unsigned int entry;
u32 mapping;
struct de_desc *txd;
struct de_desc *dummy_txd = NULL;
macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
if (dev->flags & IFF_PROMISC) {
macmode |= AcceptAllMulticast | AcceptAllPhys;
goto out;
}
if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
macmode |= AcceptAllMulticast;
goto out;
}
if (netdev_mc_count(dev) > 14)
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
entry = de->tx_head;
if (entry != 0) {
de->tx_skb[entry].skb = DE_DUMMY_SKB;
dummy_txd = &de->tx_ring[entry];
dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
cpu_to_le32(RingEnd) : 0;
dummy_txd->addr1 = 0;
entry = NEXT_TX(entry);
}
de->tx_skb[entry].skb = DE_SETUP_SKB;
de->tx_skb[entry].mapping = mapping =
pci_map_single (de->pdev, de->setup_frame,
sizeof (de->setup_frame), PCI_DMA_TODEVICE);
txd = &de->tx_ring[entry];
if (entry == (DE_TX_RING_SIZE - 1))
txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
else
txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
txd->addr1 = cpu_to_le32(mapping);
wmb();
txd->opts1 = cpu_to_le32(DescOwn);
wmb();
if (dummy_txd) {
dummy_txd->opts1 = cpu_to_le32(DescOwn);
wmb();
}
de->tx_head = NEXT_TX(entry);
if (TX_BUFFS_AVAIL(de) == 0)
netif_stop_queue(dev);
dw32(TxPoll, NormalTxPoll);
out:
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
}
static void de_set_rx_mode (struct net_device *dev)
{
unsigned long flags;
struct de_private *de = netdev_priv(dev);
spin_lock_irqsave (&de->lock, flags);
__de_set_rx_mode(dev);
spin_unlock_irqrestore (&de->lock, flags);
}
static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
{
if (unlikely(rx_missed & RxMissedOver))
de->net_stats.rx_missed_errors += RxMissedMask;
else
de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
}
static void __de_get_stats(struct de_private *de)
{
u32 tmp = dr32(RxMissed);
de_rx_missed(de, tmp);
}
static struct net_device_stats *de_get_stats(struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
spin_lock_irq(&de->lock);
if (netif_running(dev) && netif_device_present(dev))
__de_get_stats(de);
spin_unlock_irq(&de->lock);
return &de->net_stats;
}
static inline int de_is_running (struct de_private *de)
{
return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
}
static void de_stop_rxtx (struct de_private *de)
{
u32 macmode;
unsigned int i = 1300/100;
macmode = dr32(MacMode);
if (macmode & RxTx) {
dw32(MacMode, macmode & ~RxTx);
dr32(MacMode);
}
while (--i) {
if (!de_is_running(de))
return;
udelay(100);
}
dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
{
u32 macmode;
macmode = dr32(MacMode);
if ((macmode & RxTx) != RxTx) {
dw32(MacMode, macmode | RxTx);
dr32(MacMode);
}
}
static void de_stop_hw (struct de_private *de)
{
udelay(5);
dw32(IntrMask, 0);
de_stop_rxtx(de);
dw32(MacStatus, dr32(MacStatus));
udelay(10);
de->rx_tail = 0;
de->tx_head = de->tx_tail = 0;
}
static void de_link_up(struct de_private *de)
{
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
if (netif_msg_link(de))
dev_info(&de->dev->dev, "link up, media %s\n",
media_name[de->media_type]);
}
}
static void de_link_down(struct de_private *de)
{
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
if (netif_msg_link(de))
dev_info(&de->dev->dev, "link down\n");
}
}
static void de_set_media (struct de_private *de)
{
unsigned media = de->media_type;
u32 macmode = dr32(MacMode);
if (de_is_running(de))
dev_warn(&de->dev->dev,
"chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
dw32(CSR13, 0);
dw32(CSR14, de->media[media].csr14);
dw32(CSR15, de->media[media].csr15);
dw32(CSR13, de->media[media].csr13);
mdelay(10);
if (media == DE_MEDIA_TP_FD)
macmode |= FullDuplex;
else
macmode &= ~FullDuplex;
if (netif_msg_link(de))
dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
if (netif_msg_hw(de)) {
dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
dr32(MacMode), dr32(SIAStatus),
dr32(CSR13), dr32(CSR14), dr32(CSR15));
dev_info(&de->dev->dev,
"set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
macmode, de->media[media].csr13,
de->media[media].csr14, de->media[media].csr15);
}
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
}
static void de_next_media (struct de_private *de, const u32 *media,
unsigned int n_media)
{
unsigned int i;
for (i = 0; i < n_media; i++) {
if (de_ok_to_advertise(de, media[i])) {
de->media_type = media[i];
return;
}
}
}
static void de21040_media_timer (unsigned long data)
{
struct de_private *de = (struct de_private *) data;
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
carrier = (status & NetCxnErr) ? 0 : 1;
if (carrier) {
if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
goto no_link_yet;
de->media_timer.expires = jiffies + DE_TIMER_LINK;
add_timer(&de->media_timer);
if (!netif_carrier_ok(dev))
de_link_up(de);
else
if (netif_msg_timer(de))
dev_info(&dev->dev, "%s link ok, status %x\n",
media_name[de->media_type], status);
return;
}
de_link_down(de);
if (de->media_lock)
return;
if (de->media_type == DE_MEDIA_AUI) {
static const u32 next_state = DE_MEDIA_TP;
de_next_media(de, &next_state, 1);
} else {
static const u32 next_state = DE_MEDIA_AUI;
de_next_media(de, &next_state, 1);
}
spin_lock_irqsave(&de->lock, flags);
de_stop_rxtx(de);
spin_unlock_irqrestore(&de->lock, flags);
de_set_media(de);
de_start_rxtx(de);
no_link_yet:
de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
add_timer(&de->media_timer);
if (netif_msg_timer(de))
dev_info(&dev->dev, "no link, trying media %s, status %x\n",
media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
{
switch (new_media) {
case DE_MEDIA_TP_AUTO:
if (!(de->media_advertise & ADVERTISED_Autoneg))
return 0;
if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
return 0;
break;
case DE_MEDIA_BNC:
if (!(de->media_advertise & ADVERTISED_BNC))
return 0;
break;
case DE_MEDIA_AUI:
if (!(de->media_advertise & ADVERTISED_AUI))
return 0;
break;
case DE_MEDIA_TP:
if (!(de->media_advertise & ADVERTISED_10baseT_Half))
return 0;
break;
case DE_MEDIA_TP_FD:
if (!(de->media_advertise & ADVERTISED_10baseT_Full))
return 0;
break;
}
return 1;
}
static void de21041_media_timer (unsigned long data)
{
struct de_private *de = (struct de_private *) data;
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
dw32(SIAStatus, NonselPortActive | SelPortActive);
carrier = (status & NetCxnErr) ? 0 : 1;
if (carrier) {
if ((de->media_type == DE_MEDIA_TP_AUTO ||
de->media_type == DE_MEDIA_TP ||
de->media_type == DE_MEDIA_TP_FD) &&
(status & LinkFailStatus))
goto no_link_yet;
de->media_timer.expires = jiffies + DE_TIMER_LINK;
add_timer(&de->media_timer);
if (!netif_carrier_ok(dev))
de_link_up(de);
else
if (netif_msg_timer(de))
dev_info(&dev->dev,
"%s link ok, mode %x status %x\n",
media_name[de->media_type],
dr32(MacMode), status);
return;
}
de_link_down(de);
if (de->media_lock)
goto set_media;
if (status & NonselPortActive) {
unsigned int have_media = 1;
if (de->media_type == DE_MEDIA_AUI ||
de->media_type == DE_MEDIA_BNC) {
if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
de->media_type = DE_MEDIA_TP_AUTO;
else
have_media = 0;
}
else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
de_ok_to_advertise(de, DE_MEDIA_BNC))
de->media_type = DE_MEDIA_BNC;
else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
de_ok_to_advertise(de, DE_MEDIA_AUI))
de->media_type = DE_MEDIA_AUI;
else
have_media = 0;
if (have_media)
goto set_media;
}
if (de->media_type == DE_MEDIA_AUI) {
static const u32 next_states[] = {
DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
};
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else if (de->media_type == DE_MEDIA_BNC) {
static const u32 next_states[] = {
DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
};
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else {
static const u32 next_states[] = {
DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
};
de_next_media(de, next_states, ARRAY_SIZE(next_states));
}
set_media:
spin_lock_irqsave(&de->lock, flags);
de_stop_rxtx(de);
spin_unlock_irqrestore(&de->lock, flags);
de_set_media(de);
de_start_rxtx(de);
no_link_yet:
de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
add_timer(&de->media_timer);
if (netif_msg_timer(de))
dev_info(&dev->dev, "no link, trying media %s, status %x\n",
media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
{
if (status & LinkPass) {
if ((de->media_type == DE_MEDIA_AUI ||
de->media_type == DE_MEDIA_BNC) &&
(de->media_lock ||
!de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
return;
if ((de->media_type == DE_MEDIA_AUI ||
de->media_type == DE_MEDIA_BNC)) {
de->media_type = DE_MEDIA_TP_AUTO;
de_stop_rxtx(de);
de_set_media(de);
de_start_rxtx(de);
}
de_link_up(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
BUG_ON(!(status & LinkFail));
if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
de->media_type != DE_MEDIA_BNC) {
de_link_down(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
}
}
static int de_reset_mac (struct de_private *de)
{
u32 status, tmp;
if (dr32(BusMode) == 0xffffffff)
return -EBUSY;
dw32 (BusMode, CmdReset);
mdelay (1);
dw32 (BusMode, de_bus_mode);
mdelay (1);
for (tmp = 0; tmp < 5; tmp++) {
dr32 (BusMode);
mdelay (1);
}
mdelay (1);
status = dr32(MacStatus);
if (status & (RxState | TxState))
return -EBUSY;
if (status == 0xffffffff)
return -ENODEV;
return 0;
}
static void de_adapter_wake (struct de_private *de)
{
u32 pmctl;
if (de->de21040)
return;
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
if (pmctl & PM_Mask) {
pmctl &= ~PM_Mask;
pci_write_config_dword(de->pdev, PCIPM, pmctl);
msleep(10);
}
}
static void de_adapter_sleep (struct de_private *de)
{
u32 pmctl;
if (de->de21040)
return;
dw32(CSR13, 0);
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
pmctl |= PM_Sleep;
pci_write_config_dword(de->pdev, PCIPM, pmctl);
}
static int de_init_hw (struct de_private *de)
{
struct net_device *dev = de->dev;
u32 macmode;
int rc;
de_adapter_wake(de);
macmode = dr32(MacMode) & ~MacModeClear;
rc = de_reset_mac(de);
if (rc)
return rc;
de_set_media(de);
dw32(RxRingAddr, de->ring_dma);
dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
dw32(MacMode, RxTx | macmode);
dr32(RxMissed);
dw32(IntrMask, de_intr_mask);
de_set_rx_mode(dev);
return 0;
}
static int de_refill_rx (struct de_private *de)
{
unsigned i;
for (i = 0; i < DE_RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = dev_alloc_skb(de->rx_buf_sz);
if (!skb)
goto err_out;
skb->dev = de->dev;
de->rx_skb[i].mapping = pci_map_single(de->pdev,
skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
de->rx_skb[i].skb = skb;
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
if (i == (DE_RX_RING_SIZE - 1))
de->rx_ring[i].opts2 =
cpu_to_le32(RingEnd | de->rx_buf_sz);
else
de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
de->rx_ring[i].addr2 = 0;
}
return 0;
err_out:
de_clean_rings(de);
return -ENOMEM;
}
static int de_init_rings (struct de_private *de)
{
memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
de->rx_tail = 0;
de->tx_head = de->tx_tail = 0;
return de_refill_rx (de);
}
static int de_alloc_rings (struct de_private *de)
{
de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
if (!de->rx_ring)
return -ENOMEM;
de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
return de_init_rings(de);
}
static void de_clean_rings (struct de_private *de)
{
unsigned i;
memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
wmb();
memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
wmb();
for (i = 0; i < DE_RX_RING_SIZE; i++) {
if (de->rx_skb[i].skb) {
pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
de->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(de->rx_skb[i].skb);
}
}
for (i = 0; i < DE_TX_RING_SIZE; i++) {
struct sk_buff *skb = de->tx_skb[i].skb;
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
de->net_stats.tx_dropped++;
pci_unmap_single(de->pdev,
de->tx_skb[i].mapping,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
} else {
pci_unmap_single(de->pdev,
de->tx_skb[i].mapping,
sizeof(de->setup_frame),
PCI_DMA_TODEVICE);
}
}
}
memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
}
static void de_free_rings (struct de_private *de)
{
de_clean_rings(de);
pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
de->rx_ring = NULL;
de->tx_ring = NULL;
}
static int de_open (struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
int rc;
if (netif_msg_ifup(de))
printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
rc = de_alloc_rings(de);
if (rc) {
dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
dw32(IntrMask, 0);
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
netif_start_queue(dev);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
return 0;
err_out_free_irq:
free_irq(dev->irq, dev);
err_out_free:
de_free_rings(de);
return rc;
}
static int de_close (struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
unsigned long flags;
if (netif_msg_ifdown(de))
printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
del_timer_sync(&de->media_timer);
spin_lock_irqsave(&de->lock, flags);
de_stop_hw(de);
netif_stop_queue(dev);
netif_carrier_off(dev);
spin_unlock_irqrestore(&de->lock, flags);
free_irq(dev->irq, dev);
de_free_rings(de);
de_adapter_sleep(de);
return 0;
}
static void de_tx_timeout (struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
de->rx_tail, de->tx_head, de->tx_tail);
del_timer_sync(&de->media_timer);
disable_irq(dev->irq);
spin_lock_irq(&de->lock);
de_stop_hw(de);
netif_stop_queue(dev);
netif_carrier_off(dev);
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
__de_get_stats(de);
synchronize_irq(dev->irq);
de_clean_rings(de);
de_init_rings(de);
de_init_hw(de);
netif_wake_queue(dev);
}
static void __de_get_regs(struct de_private *de, u8 *buf)
{
int i;
u32 *rbuf = (u32 *)buf;
for (i = 0; i < DE_NUM_REGS; i++)
rbuf[i] = dr32(i * 8);
de_rx_missed(de, rbuf[8]);
}
static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
{
ecmd->supported = de->media_supported;
ecmd->transceiver = XCVR_INTERNAL;
ecmd->phy_address = 0;
ecmd->advertising = de->media_advertise;
switch (de->media_type) {
case DE_MEDIA_AUI:
ecmd->port = PORT_AUI;
ecmd->speed = 5;
break;
case DE_MEDIA_BNC:
ecmd->port = PORT_BNC;
ecmd->speed = 2;
break;
default:
ecmd->port = PORT_TP;
ecmd->speed = SPEED_10;
break;
}
if (dr32(MacMode) & FullDuplex)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
if (de->media_lock)
ecmd->autoneg = AUTONEG_DISABLE;
else
ecmd->autoneg = AUTONEG_ENABLE;
return 0;
}
static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
{
u32 new_media;
unsigned int media_lock;
if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
return -EINVAL;
if (de->de21040 && ecmd->speed == 2)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
return -EINVAL;
if (de->de21040 && ecmd->port == PORT_BNC)
return -EINVAL;
if (ecmd->transceiver != XCVR_INTERNAL)
return -EINVAL;
if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
if (ecmd->advertising & ~de->media_supported)
return -EINVAL;
if (ecmd->autoneg == AUTONEG_ENABLE &&
(!(ecmd->advertising & ADVERTISED_Autoneg)))
return -EINVAL;
switch (ecmd->port) {
case PORT_AUI:
new_media = DE_MEDIA_AUI;
if (!(ecmd->advertising & ADVERTISED_AUI))
return -EINVAL;
break;
case PORT_BNC:
new_media = DE_MEDIA_BNC;
if (!(ecmd->advertising & ADVERTISED_BNC))
return -EINVAL;
break;
default:
if (ecmd->autoneg == AUTONEG_ENABLE)
new_media = DE_MEDIA_TP_AUTO;
else if (ecmd->duplex == DUPLEX_FULL)
new_media = DE_MEDIA_TP_FD;
else
new_media = DE_MEDIA_TP;
if (!(ecmd->advertising & ADVERTISED_TP))
return -EINVAL;
if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
return -EINVAL;
break;
}
media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
if ((new_media == de->media_type) &&
(media_lock == de->media_lock) &&
(ecmd->advertising == de->media_advertise))
return 0;
de_link_down(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
de_stop_rxtx(de);
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
if (netif_running(de->dev))
de_start_rxtx(de);
return 0;
}
static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
strcpy (info->driver, DRV_NAME);
strcpy (info->version, DRV_VERSION);
strcpy (info->bus_info, pci_name(de->pdev));
info->eedump_len = DE_EEPROM_SIZE;
}
static int de_get_regs_len(struct net_device *dev)
{
return DE_REGS_SIZE;
}
static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct de_private *de = netdev_priv(dev);
int rc;
spin_lock_irq(&de->lock);
rc = __de_get_settings(de, ecmd);
spin_unlock_irq(&de->lock);
return rc;
}
static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct de_private *de = netdev_priv(dev);
int rc;
spin_lock_irq(&de->lock);
rc = __de_set_settings(de, ecmd);
spin_unlock_irq(&de->lock);
return rc;
}
static u32 de_get_msglevel(struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
return de->msg_enable;
}
static void de_set_msglevel(struct net_device *dev, u32 msglvl)
{
struct de_private *de = netdev_priv(dev);
de->msg_enable = msglvl;
}
static int de_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct de_private *de = netdev_priv(dev);
if (!de->ee_data)
return -EOPNOTSUPP;
if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
(eeprom->len != DE_EEPROM_SIZE))
return -EINVAL;
memcpy(data, de->ee_data, eeprom->len);
return 0;
}
static int de_nway_reset(struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u32 status;
if (de->media_type != DE_MEDIA_TP_AUTO)
return -EINVAL;
if (netif_carrier_ok(de->dev))
de_link_down(de);
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
if (netif_msg_link(de))
dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
status, dr32(SIAStatus));
return 0;
}
static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *data)
{
struct de_private *de = netdev_priv(dev);
regs->version = (DE_REGS_VER << 2) | de->de21040;
spin_lock_irq(&de->lock);
__de_get_regs(de, data);
spin_unlock_irq(&de->lock);
}
static const struct ethtool_ops de_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_drvinfo = de_get_drvinfo,
.get_regs_len = de_get_regs_len,
.get_settings = de_get_settings,
.set_settings = de_set_settings,
.get_msglevel = de_get_msglevel,
.set_msglevel = de_set_msglevel,
.get_eeprom = de_get_eeprom,
.nway_reset = de_nway_reset,
.get_regs = de_get_regs,
};
static void __devinit de21040_get_mac_address (struct de_private *de)
{
unsigned i;
dw32 (ROMCmd, 0);
udelay(5);
for (i = 0; i < 6; i++) {
int value, boguscnt = 100000;
do {
value = dr32(ROMCmd);
rmb();
} while (value < 0 && --boguscnt > 0);
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
}
}
static void __devinit de21040_get_media_info(struct de_private *de)
{
unsigned int i;
de->media_type = DE_MEDIA_TP;
de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
SUPPORTED_10baseT_Half | SUPPORTED_AUI;
de->media_advertise = de->media_supported;
for (i = 0; i < DE_MAX_MEDIA; i++) {
switch (i) {
case DE_MEDIA_AUI:
case DE_MEDIA_TP:
case DE_MEDIA_TP_FD:
de->media[i].type = i;
de->media[i].csr13 = t21040_csr13[i];
de->media[i].csr14 = t21040_csr14[i];
de->media[i].csr15 = t21040_csr15[i];
break;
default:
de->media[i].type = DE_MEDIA_INVALID;
break;
}
}
}
static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
{
int i;
unsigned retval = 0;
void __iomem *ee_addr = regs + ROMCmd;
int read_cmd = location | (EE_READ_CMD << addr_len);
writel(EE_ENB & ~EE_CS, ee_addr);
writel(EE_ENB, ee_addr);
for (i = 4 + addr_len; i >= 0; i--) {
short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
writel(EE_ENB | dataval, ee_addr);
readl(ee_addr);
writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
readl(ee_addr);
retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
}
writel(EE_ENB, ee_addr);
readl(ee_addr);
for (i = 16; i > 0; i--) {
writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
readl(ee_addr);
retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
writel(EE_ENB, ee_addr);
readl(ee_addr);
}
writel(EE_ENB & ~EE_CS, ee_addr);
return retval;
}
static void __devinit de21041_get_srom_info (struct de_private *de)
{
unsigned i, sa_offset = 0, ofs;
u8 ee_data[DE_EEPROM_SIZE + 6] = {};
unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
struct de_srom_info_leaf *il;
void *bufp;
for (i = 0; i < DE_EEPROM_WORDS; i++)
((__le16 *)ee_data)[i] =
cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
#ifndef CONFIG_MIPS_COBALT
for (i = 0; i < 8; i ++)
if (ee_data[i] != ee_data[16+i])
sa_offset = 20;
#endif
for (i = 0; i < 6; i ++)
de->dev->dev_addr[i] = ee_data[i + sa_offset];
ofs = ee_data[SROMC0InfoLeaf];
if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
goto bad_srom;
il = (struct de_srom_info_leaf *) &ee_data[ofs];
if (il->n_blocks == 0)
goto bad_srom;
if ((sizeof(ee_data) - ofs) <
(sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
goto bad_srom;
switch (get_unaligned(&il->default_media)) {
case 0x0001: de->media_type = DE_MEDIA_BNC; break;
case 0x0002: de->media_type = DE_MEDIA_AUI; break;
case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
default: de->media_type = DE_MEDIA_TP_AUTO; break;
}
if (netif_msg_probe(de))
pr_info("de%d: SROM leaf offset %u, default media %s\n",
de->board_idx, ofs, media_name[de->media_type]);
for (i = 0; i < DE_MAX_MEDIA; i++) {
de->media[i].type = DE_MEDIA_INVALID;
de->media[i].csr13 = 0xffff;
de->media[i].csr14 = 0xffff;
de->media[i].csr15 = 0xffff;
}
bufp = ((void *)il) + sizeof(*il);
for (i = 0; i < il->n_blocks; i++) {
struct de_srom_media_block *ib = bufp;
unsigned idx;
switch(ib->opts & MediaBlockMask) {
case 0:
de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
| SUPPORTED_Autoneg;
idx = DE_MEDIA_TP;
de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
break;
case 1:
de->media_supported |= SUPPORTED_BNC;
idx = DE_MEDIA_BNC;
break;
case 2:
de->media_supported |= SUPPORTED_AUI;
idx = DE_MEDIA_AUI;
break;
case 4:
de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
| SUPPORTED_Autoneg;
idx = DE_MEDIA_TP_FD;
de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
break;
default:
goto bad_srom;
}
de->media[idx].type = idx;
if (netif_msg_probe(de))
pr_info("de%d: media block #%u: %s",
de->board_idx, i,
media_name[de->media[idx].type]);
bufp += sizeof (ib->opts);
if (ib->opts & MediaCustomCSRs) {
de->media[idx].csr13 = get_unaligned(&ib->csr13);
de->media[idx].csr14 = get_unaligned(&ib->csr14);
de->media[idx].csr15 = get_unaligned(&ib->csr15);
bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
sizeof(ib->csr15);
if (netif_msg_probe(de))
pr_cont(" (%x,%x,%x)\n",
de->media[idx].csr13,
de->media[idx].csr14,
de->media[idx].csr15);
} else if (netif_msg_probe(de))
pr_cont("\n");
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
}
de->media_advertise = de->media_supported;
fill_defaults:
for (i = 0; i < DE_MAX_MEDIA; i++) {
if (de->media[i].csr13 == 0xffff)
de->media[i].csr13 = t21041_csr13[i];
if (de->media[i].csr14 == 0xffff) {
if (de->pdev->revision < 0x20)
de->media[i].csr14 = t21041_csr14_brk[i];
else
de->media[i].csr14 = t21041_csr14[i];
}
if (de->media[i].csr15 == 0xffff)
de->media[i].csr15 = t21041_csr15[i];
}
de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
return;
bad_srom:
for (i = 0; i < DE_MAX_MEDIA; i++)
de->media[i].type = i;
de->media_supported =
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP |
SUPPORTED_AUI |
SUPPORTED_BNC;
goto fill_defaults;
}
static const struct net_device_ops de_netdev_ops = {
.ndo_open = de_open,
.ndo_stop = de_close,
.ndo_set_multicast_list = de_set_rx_mode,
.ndo_start_xmit = de_start_xmit,
.ndo_get_stats = de_get_stats,
.ndo_tx_timeout = de_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __devinit de_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct de_private *de;
int rc;
void __iomem *regs;
unsigned long pciaddr;
static int board_idx = -1;
board_idx++;
#ifndef MODULE
if (board_idx == 0)
printk("%s", version);
#endif
dev = alloc_etherdev(sizeof(struct de_private));
if (!dev)
return -ENOMEM;
dev->netdev_ops = &de_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
dev->ethtool_ops = &de_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
de = netdev_priv(dev);
de->de21040 = ent->driver_data == 0 ? 1 : 0;
de->pdev = pdev;
de->dev = dev;
de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
de->board_idx = board_idx;
spin_lock_init (&de->lock);
init_timer(&de->media_timer);
if (de->de21040)
de->media_timer.function = de21040_media_timer;
else
de->media_timer.function = de21041_media_timer;
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
rc = pci_enable_device(pdev);
if (rc)
goto err_out_free;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_disable;
if (pdev->irq < 2) {
rc = -EIO;
pr_err(PFX "invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
dev->irq = pdev->irq;
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
(unsigned long long)pci_resource_len(pdev, 1),
pci_name(pdev));
goto err_out_res;
}
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
(unsigned long long)pci_resource_len(pdev, 1),
pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
de->regs = regs;
de_adapter_wake(de);
rc = de_reset_mac(de);
if (rc) {
pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
if (de->de21040) {
de21040_get_mac_address(de);
de21040_get_media_info(de);
} else {
de21041_get_srom_info(de);
}
rc = register_netdev(dev);
if (rc)
goto err_out_iomap;
dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
de->de21040 ? "21040" : "21041",
dev->base_addr,
dev->dev_addr,
dev->irq);
pci_set_drvdata(pdev, dev);
pci_set_master(pdev);
de_adapter_sleep(de);
return 0;
err_out_iomap:
kfree(de->ee_data);
iounmap(regs);
err_out_res:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out_free:
free_netdev(dev);
return rc;
}
static void __devexit de_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct de_private *de = netdev_priv(dev);
BUG_ON(!dev);
unregister_netdev(dev);
kfree(de->ee_data);
iounmap(de->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
#ifdef CONFIG_PM
static int de_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct de_private *de = netdev_priv(dev);
rtnl_lock();
if (netif_running (dev)) {
del_timer_sync(&de->media_timer);
disable_irq(dev->irq);
spin_lock_irq(&de->lock);
de_stop_hw(de);
netif_stop_queue(dev);
netif_device_detach(dev);
netif_carrier_off(dev);
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
__de_get_stats(de);
synchronize_irq(dev->irq);
de_clean_rings(de);
de_adapter_sleep(de);
pci_disable_device(pdev);
} else {
netif_device_detach(dev);
}
rtnl_unlock();
return 0;
}
static int de_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct de_private *de = netdev_priv(dev);
int retval = 0;
rtnl_lock();
if (netif_device_present(dev))
goto out;
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
pci_set_master(pdev);
de_init_rings(de);
de_init_hw(de);
out_attach:
netif_device_attach(dev);
out:
rtnl_unlock();
return 0;
}
#endif
static struct pci_driver de_driver = {
.name = DRV_NAME,
.id_table = de_pci_tbl,
.probe = de_init_one,
.remove = __devexit_p(de_remove_one),
#ifdef CONFIG_PM
.suspend = de_suspend,
.resume = de_resume,
#endif
};
static int __init de_init (void)
{
#ifdef MODULE
printk("%s", version);
#endif
return pci_register_driver(&de_driver);
}
static void __exit de_exit (void)
{
pci_unregister_driver (&de_driver);
}
module_init(de_init);
module_exit(de_exit);
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
- 177
- 178
- 179
- 180
- 181
- 182
- 183
- 184
- 185
- 186
- 187
- 188
- 189
- 190
- 191
- 192
- 193
- 194
- 195
- 196
- 197
- 198
- 199
- 200
- 201
- 202
- 203
- 204
- 205
- 206
- 207
- 208
- 209
- 210
- 211
- 212
- 213
- 214
- 215
- 216
- 217
- 218
- 219
- 220
- 221
- 222
- 223
- 224
- 225
- 226
- 227
- 228
- 229
- 230
- 231
- 232
- 233
- 234
- 235
- 236
- 237
- 238
- 239
- 240
- 241
- 242
- 243
- 244
- 245
- 246
- 247
- 248
- 249
- 250
- 251
- 252
- 253
- 254
- 255
- 256
- 257
- 258
- 259
- 260
- 261
- 262
- 263
- 264
- 265
- 266
- 267
- 268
- 269
- 270
- 271
- 272
- 273
- 274
- 275
- 276
- 277
- 278
- 279
- 280
- 281
- 282
- 283
- 284
- 285
- 286
- 287
- 288
- 289
- 290
- 291
- 292
- 293
- 294
- 295
- 296
- 297
- 298
- 299
- 300
- 301
- 302
- 303
- 304
- 305
- 306
- 307
- 308
- 309
- 310
- 311
- 312
- 313
- 314
- 315
- 316
- 317
- 318
- 319
- 320
- 321
- 322
- 323
- 324
- 325
- 326
- 327
- 328
- 329
- 330
- 331
- 332
- 333
- 334
- 335
- 336
- 337
- 338
- 339
- 340
- 341
- 342
- 343
- 344
- 345
- 346
- 347
- 348
- 349
- 350
- 351
- 352
- 353
- 354
- 355
- 356
- 357
- 358
- 359
- 360
- 361
- 362
- 363
- 364
- 365
- 366
- 367
- 368
- 369
- 370
- 371
- 372
- 373
- 374
- 375
- 376
- 377
- 378
- 379
- 380
- 381
- 382
- 383
- 384
- 385
- 386
- 387
- 388
- 389
- 390
- 391
- 392
- 393
- 394
- 395
- 396
- 397
- 398
- 399
- 400
- 401
- 402
- 403
- 404
- 405
- 406
- 407
- 408
- 409
- 410
- 411
- 412
- 413
- 414
- 415
- 416
- 417
- 418
- 419
- 420
- 421
- 422
- 423
- 424
- 425
- 426
- 427
- 428
- 429
- 430
- 431
- 432
- 433
- 434
- 435
- 436
- 437
- 438
- 439
- 440
- 441
- 442
- 443
- 444
- 445
- 446
- 447
- 448
- 449
- 450
- 451
- 452
- 453
- 454
- 455
- 456
- 457
- 458
- 459
- 460
- 461
- 462
- 463
- 464
- 465
- 466
- 467
- 468
- 469
- 470
- 471
- 472
- 473
- 474
- 475
- 476
- 477
- 478
- 479
- 480
- 481
- 482
- 483
- 484
- 485
- 486
- 487
- 488
- 489
- 490
- 491
- 492
- 493
- 494
- 495
- 496
- 497
- 498
- 499
- 500
- 501
- 502
- 503
- 504
- 505
- 506
- 507
- 508
- 509
- 510
- 511
- 512
- 513
- 514
- 515
- 516
- 517
- 518
- 519
- 520
- 521
- 522
- 523
- 524
- 525
- 526
- 527
- 528
- 529
- 530
- 531
- 532
- 533
- 534
- 535
- 536
- 537
- 538
- 539
- 540
- 541
- 542
- 543
- 544
- 545
- 546
- 547
- 548
- 549
- 550
- 551
- 552
- 553
- 554
- 555
- 556
- 557
- 558
- 559
- 560
- 561
- 562
- 563
- 564
- 565
- 566
- 567
- 568
- 569
- 570
- 571
- 572
- 573
- 574
- 575
- 576
- 577
- 578
- 579
- 580
- 581
- 582
- 583
- 584
- 585
- 586
- 587
- 588
- 589
- 590
- 591
- 592
- 593
- 594
- 595
- 596
- 597
- 598
- 599
- 600
- 601
- 602
- 603
- 604
- 605
- 606
- 607
- 608
- 609
- 610
- 611
- 612
- 613
- 614
- 615
- 616
- 617
- 618
- 619
- 620
- 621
- 622
- 623
- 624
- 625
- 626
- 627
- 628
- 629
- 630
- 631
- 632
- 633
- 634
- 635
- 636
- 637
- 638
- 639
- 640
- 641
- 642
- 643
- 644
- 645
- 646
- 647
- 648
- 649
- 650
- 651
- 652
- 653
- 654
- 655
- 656
- 657
- 658
- 659
- 660
- 661
- 662
- 663
- 664
- 665
- 666
- 667
- 668
- 669
- 670
- 671
- 672
- 673
- 674
- 675
- 676
- 677
- 678
- 679
- 680
- 681
- 682
- 683
- 684
- 685
- 686
- 687
- 688
- 689
- 690
- 691
- 692
- 693
- 694
- 695
- 696
- 697
- 698
- 699
- 700
- 701
- 702
- 703
- 704
- 705
- 706
- 707
- 708
- 709
- 710
- 711
- 712
- 713
- 714
- 715
- 716
- 717
- 718
- 719
- 720
- 721
- 722
- 723
- 724
- 725
- 726
- 727
- 728
- 729
- 730
- 731
- 732
- 733
- 734
- 735
- 736
- 737
- 738
- 739
- 740
- 741
- 742
- 743
- 744
- 745
- 746
- 747
- 748
- 749
- 750
- 751
- 752
- 753
- 754
- 755
- 756
- 757
- 758
- 759
- 760
- 761
- 762
- 763
- 764
- 765
- 766
- 767
- 768
- 769
- 770
- 771
- 772
- 773
- 774
- 775
- 776
- 777
- 778
- 779
- 780
- 781
- 782
- 783
- 784
- 785
- 786
- 787
- 788
- 789
- 790
- 791
- 792
- 793
- 794
- 795
- 796
- 797
- 798
- 799
- 800
- 801
- 802
- 803
- 804
- 805
- 806
- 807
- 808
- 809
- 810
- 811
- 812
- 813
- 814
- 815
- 816
- 817
- 818
- 819
- 820
- 821
- 822
- 823
- 824
- 825
- 826
- 827
- 828
- 829
- 830
- 831
- 832
- 833
- 834
- 835
- 836
- 837
- 838
- 839
- 840
- 841
- 842
- 843
- 844
- 845
- 846
- 847
- 848
- 849
- 850
- 851
- 852
- 853
- 854
- 855
- 856
- 857
- 858
- 859
- 860
- 861
- 862
- 863
- 864
- 865
- 866
- 867
- 868
- 869
- 870
- 871
- 872
- 873
- 874
- 875
- 876
- 877
- 878
- 879
- 880
- 881
- 882
- 883
- 884
- 885
- 886
- 887
- 888
- 889
- 890
- 891
- 892
- 893
- 894
- 895
- 896
- 897
- 898
- 899
- 900
- 901
- 902
- 903
- 904
- 905
- 906
- 907
- 908
- 909
- 910
- 911
- 912
- 913
- 914
- 915
- 916
- 917
- 918
- 919
- 920
- 921
- 922
- 923
- 924
- 925
- 926
- 927
- 928
- 929
- 930
- 931
- 932
- 933
- 934
- 935
- 936
- 937
- 938
- 939
- 940
- 941
- 942
- 943
- 944
- 945
- 946
- 947
- 948
- 949
- 950
- 951
- 952
- 953
- 954
- 955
- 956
- 957
- 958
- 959
- 960
- 961
- 962
- 963
- 964
- 965
- 966
- 967
- 968
- 969
- 970
- 971
- 972
- 973
- 974
- 975
- 976
- 977
- 978
- 979
- 980
- 981
- 982
- 983
- 984
- 985
- 986
- 987
- 988
- 989
- 990
- 991
- 992
- 993
- 994
- 995
- 996
- 997
- 998
- 999
- 1000
- 1001
- 1002
- 1003
- 1004
- 1005
- 1006
- 1007
- 1008
- 1009
- 1010
- 1011
- 1012
- 1013
- 1014
- 1015
- 1016
- 1017
- 1018
- 1019
- 1020
- 1021
- 1022
- 1023
- 1024
- 1025
- 1026
- 1027
- 1028
- 1029
- 1030
- 1031
- 1032
- 1033
- 1034
- 1035
- 1036
- 1037
- 1038
- 1039
- 1040
- 1041
- 1042
- 1043
- 1044
- 1045
- 1046
- 1047
- 1048
- 1049
- 1050
- 1051
- 1052
- 1053
- 1054
- 1055
- 1056
- 1057
- 1058
- 1059
- 1060
- 1061
- 1062
- 1063
- 1064
- 1065
- 1066
- 1067
- 1068
- 1069
- 1070
- 1071
- 1072
- 1073
- 1074
- 1075
- 1076
- 1077
- 1078
- 1079
- 1080
- 1081
- 1082
- 1083
- 1084
- 1085
- 1086
- 1087
- 1088
- 1089
- 1090
- 1091
- 1092
- 1093
- 1094
- 1095
- 1096
- 1097
- 1098
- 1099
- 1100
- 1101
- 1102
- 1103
- 1104
- 1105
- 1106
- 1107
- 1108
- 1109
- 1110
- 1111
- 1112
- 1113
- 1114
- 1115
- 1116
- 1117
- 1118
- 1119
- 1120
- 1121
- 1122
- 1123
- 1124
- 1125
- 1126
- 1127
- 1128
- 1129
- 1130
- 1131
- 1132
- 1133
- 1134
- 1135
- 1136
- 1137
- 1138
- 1139
- 1140
- 1141
- 1142
- 1143
- 1144
- 1145
- 1146
- 1147
- 1148
- 1149
- 1150
- 1151
- 1152
- 1153
- 1154
- 1155
- 1156
- 1157
- 1158
- 1159
- 1160
- 1161
- 1162
- 1163
- 1164
- 1165
- 1166
- 1167
- 1168
- 1169
- 1170
- 1171
- 1172
- 1173
- 1174
- 1175
- 1176
- 1177
- 1178
- 1179
- 1180
- 1181
- 1182
- 1183
- 1184
- 1185
- 1186
- 1187
- 1188
- 1189
- 1190
- 1191
- 1192
- 1193
- 1194
- 1195
- 1196
- 1197
- 1198
- 1199
- 1200
- 1201
- 1202
- 1203
- 1204
- 1205
- 1206
- 1207
- 1208
- 1209
- 1210
- 1211
- 1212
- 1213
- 1214
- 1215
- 1216
- 1217
- 1218
- 1219
- 1220
- 1221
- 1222
- 1223
- 1224
- 1225
- 1226
- 1227
- 1228
- 1229
- 1230
- 1231
- 1232
- 1233
- 1234
- 1235
- 1236
- 1237
- 1238
- 1239
- 1240
- 1241
- 1242
- 1243
- 1244
- 1245
- 1246
- 1247
- 1248
- 1249
- 1250
- 1251
- 1252
- 1253
- 1254
- 1255
- 1256
- 1257
- 1258
- 1259
- 1260
- 1261
- 1262
- 1263
- 1264
- 1265
- 1266
- 1267
- 1268
- 1269
- 1270
- 1271
- 1272
- 1273
- 1274
- 1275
- 1276
- 1277
- 1278
- 1279
- 1280
- 1281
- 1282
- 1283
- 1284
- 1285
- 1286
- 1287
- 1288
- 1289
- 1290
- 1291
- 1292
- 1293
- 1294
- 1295
- 1296
- 1297
- 1298
- 1299
- 1300
- 1301
- 1302
- 1303
- 1304
- 1305
- 1306
- 1307
- 1308
- 1309
- 1310
- 1311
- 1312
- 1313
- 1314
- 1315
- 1316
- 1317
- 1318
- 1319
- 1320
- 1321
- 1322
- 1323
- 1324
- 1325
- 1326
- 1327
- 1328
- 1329
- 1330
- 1331
- 1332
- 1333
- 1334
- 1335
- 1336
- 1337
- 1338
- 1339
- 1340
- 1341
- 1342
- 1343
- 1344
- 1345
- 1346
- 1347
- 1348
- 1349
- 1350
- 1351
- 1352
- 1353
- 1354
- 1355
- 1356
- 1357
- 1358
- 1359
- 1360
- 1361
- 1362
- 1363
- 1364
- 1365
- 1366
- 1367
- 1368
- 1369
- 1370
- 1371
- 1372
- 1373
- 1374
- 1375
- 1376
- 1377
- 1378
- 1379
- 1380
- 1381
- 1382
- 1383
- 1384
- 1385
- 1386
- 1387
- 1388
- 1389
- 1390
- 1391
- 1392
- 1393
- 1394
- 1395
- 1396
- 1397
- 1398
- 1399
- 1400
- 1401
- 1402
- 1403
- 1404
- 1405
- 1406
- 1407
- 1408
- 1409
- 1410
- 1411
- 1412
- 1413
- 1414
- 1415
- 1416
- 1417
- 1418
- 1419
- 1420
- 1421
- 1422
- 1423
- 1424
- 1425
- 1426
- 1427
- 1428
- 1429
- 1430
- 1431
- 1432
- 1433
- 1434
- 1435
- 1436
- 1437
- 1438
- 1439
- 1440
- 1441
- 1442
- 1443
- 1444
- 1445
- 1446
- 1447
- 1448
- 1449
- 1450
- 1451
- 1452
- 1453
- 1454
- 1455
- 1456
- 1457
- 1458
- 1459
- 1460
- 1461
- 1462
- 1463
- 1464
- 1465
- 1466
- 1467
- 1468
- 1469
- 1470
- 1471
- 1472
- 1473
- 1474
- 1475
- 1476
- 1477
- 1478
- 1479
- 1480
- 1481
- 1482
- 1483
- 1484
- 1485
- 1486
- 1487
- 1488
- 1489
- 1490
- 1491
- 1492
- 1493
- 1494
- 1495
- 1496
- 1497
- 1498
- 1499
- 1500
- 1501
- 1502
- 1503
- 1504
- 1505
- 1506
- 1507
- 1508
- 1509
- 1510
- 1511
- 1512
- 1513
- 1514
- 1515
- 1516
- 1517
- 1518
- 1519
- 1520
- 1521
- 1522
- 1523
- 1524
- 1525
- 1526
- 1527
- 1528
- 1529
- 1530
- 1531
- 1532
- 1533
- 1534
- 1535
- 1536
- 1537
- 1538
- 1539
- 1540
- 1541
- 1542
- 1543
- 1544
- 1545
- 1546
- 1547
- 1548
- 1549
- 1550
- 1551
- 1552
- 1553
- 1554
- 1555
- 1556
- 1557
- 1558
- 1559
- 1560
- 1561
- 1562
- 1563
- 1564
- 1565
- 1566
- 1567
- 1568
- 1569
- 1570
- 1571
- 1572
- 1573
- 1574
- 1575
- 1576
- 1577
- 1578
- 1579
- 1580
- 1581
- 1582
- 1583
- 1584
- 1585
- 1586
- 1587
- 1588
- 1589
- 1590
- 1591
- 1592
- 1593
- 1594
- 1595
- 1596
- 1597
- 1598
- 1599
- 1600
- 1601
- 1602
- 1603
- 1604
- 1605
- 1606
- 1607
- 1608
- 1609
- 1610
- 1611
- 1612
- 1613
- 1614
- 1615
- 1616
- 1617
- 1618
- 1619
- 1620
- 1621
- 1622
- 1623
- 1624
- 1625
- 1626
- 1627
- 1628
- 1629
- 1630
- 1631
- 1632
- 1633
- 1634
- 1635
- 1636
- 1637
- 1638
- 1639
- 1640
- 1641
- 1642
- 1643
- 1644
- 1645
- 1646
- 1647
- 1648
- 1649
- 1650
- 1651
- 1652
- 1653
- 1654
- 1655
- 1656
- 1657
- 1658
- 1659
- 1660
- 1661
- 1662
- 1663
- 1664
- 1665
- 1666
- 1667
- 1668
- 1669
- 1670
- 1671
- 1672
- 1673
- 1674
- 1675
- 1676
- 1677
- 1678
- 1679
- 1680
- 1681
- 1682
- 1683
- 1684
- 1685
- 1686
- 1687
- 1688
- 1689
- 1690
- 1691
- 1692
- 1693
- 1694
- 1695
- 1696
- 1697
- 1698
- 1699
- 1700
- 1701
- 1702
- 1703
- 1704
- 1705
- 1706
- 1707
- 1708
- 1709
- 1710
- 1711
- 1712
- 1713
- 1714
- 1715
- 1716
- 1717
- 1718
- 1719
- 1720
- 1721
- 1722
- 1723
- 1724
- 1725
- 1726
- 1727
- 1728
- 1729
- 1730
- 1731
- 1732
- 1733
- 1734
- 1735
- 1736
- 1737
- 1738
- 1739
- 1740
- 1741
- 1742
- 1743
- 1744
- 1745
- 1746
- 1747
- 1748
- 1749
- 1750
- 1751
- 1752
- 1753
- 1754
- 1755
- 1756
- 1757
- 1758
- 1759
- 1760
- 1761
- 1762
- 1763
- 1764
- 1765
- 1766
- 1767
- 1768
- 1769
- 1770
- 1771
- 1772
- 1773
- 1774
- 1775
- 1776
- 1777
- 1778
- 1779
- 1780
- 1781
- 1782
- 1783
- 1784
- 1785
- 1786
- 1787
- 1788
- 1789
- 1790
- 1791
- 1792
- 1793
- 1794
- 1795
- 1796
- 1797
- 1798
- 1799
- 1800
- 1801
- 1802
- 1803
- 1804
- 1805
- 1806
- 1807
- 1808
- 1809
- 1810
- 1811
- 1812
- 1813
- 1814
- 1815
- 1816
- 1817
- 1818
- 1819
- 1820
- 1821
- 1822
- 1823
- 1824
- 1825
- 1826
- 1827
- 1828
- 1829
- 1830
- 1831
- 1832
- 1833
- 1834
- 1835
- 1836
- 1837
- 1838
- 1839
- 1840
- 1841
- 1842
- 1843
- 1844
- 1845
- 1846
- 1847
- 1848
- 1849
- 1850
- 1851
- 1852
- 1853
- 1854
- 1855
- 1856
- 1857
- 1858
- 1859
- 1860
- 1861
- 1862
- 1863
- 1864
- 1865
- 1866
- 1867
- 1868
- 1869
- 1870
- 1871
- 1872
- 1873
- 1874
- 1875
- 1876
- 1877
- 1878
- 1879
- 1880
- 1881
- 1882
- 1883
- 1884
- 1885
- 1886
- 1887
- 1888
- 1889
- 1890
- 1891
- 1892
- 1893
- 1894
- 1895
- 1896
- 1897
- 1898
- 1899
- 1900
- 1901
- 1902
- 1903
- 1904
- 1905
- 1906
- 1907
- 1908
- 1909
- 1910
- 1911
- 1912
- 1913
- 1914
- 1915
- 1916
- 1917
- 1918
- 1919
- 1920
- 1921
- 1922
- 1923
- 1924
- 1925
- 1926
- 1927
- 1928
- 1929
- 1930
- 1931
- 1932
- 1933
- 1934
- 1935
- 1936
- 1937
- 1938
- 1939
- 1940
- 1941
- 1942
- 1943
- 1944
- 1945
- 1946
- 1947
- 1948
- 1949
- 1950
- 1951
- 1952
- 1953
- 1954
- 1955
- 1956
- 1957
- 1958
- 1959
- 1960
- 1961
- 1962
- 1963
- 1964
- 1965
- 1966
- 1967
- 1968
- 1969
- 1970
- 1971
- 1972
- 1973
- 1974
- 1975
- 1976
- 1977
- 1978
- 1979
- 1980
- 1981
- 1982
- 1983
- 1984
- 1985
- 1986
- 1987
- 1988
- 1989
- 1990
- 1991
- 1992
- 1993
- 1994
- 1995
- 1996
- 1997
- 1998
- 1999
- 2000
- 2001
- 2002
- 2003
- 2004
- 2005
- 2006
- 2007
- 2008
- 2009
- 2010
- 2011
- 2012
- 2013
- 2014
- 2015
- 2016
- 2017
- 2018
- 2019
- 2020
- 2021
- 2022
- 2023
- 2024
- 2025
- 2026
- 2027
- 2028
- 2029
- 2030
- 2031
- 2032
- 2033
- 2034
- 2035
- 2036
- 2037
- 2038
- 2039
- 2040
- 2041
- 2042
- 2043
- 2044
- 2045
- 2046
- 2047
- 2048
- 2049
- 2050
- 2051
- 2052
- 2053
- 2054
- 2055
- 2056
- 2057
- 2058
- 2059
- 2060
- 2061
- 2062
- 2063
- 2064
- 2065
- 2066
- 2067
- 2068
- 2069
- 2070
- 2071
- 2072
- 2073
- 2074
- 2075
- 2076
- 2077
- 2078
- 2079
- 2080
- 2081
- 2082
- 2083
- 2084
- 2085
- 2086
- 2087
- 2088
- 2089
- 2090
- 2091
- 2092
- 2093
- 2094
- 2095
- 2096
- 2097
- 2098
- 2099
- 2100
- 2101
- 2102
- 2103
- 2104
- 2105
- 2106
- 2107
- 2108
- 2109
- 2110
- 2111
- 2112
- 2113
- 2114
- 2115
- 2116
- 2117
- 2118
- 2119
- 2120
- 2121
- 2122
- 2123
- 2124
- 2125
- 2126
- 2127
- 2128
- 2129
- 2130
- 2131
- 2132
- 2133
- 2134
- 2135
- 2136
- 2137
- 2138
- 2139
- 2140
- 2141
- 2142
- 2143
- 2144
- 2145
- 2146
- 2147
- 2148
- 2149
- 2150
- 2151
- 2152
- 2153
- 2154
- 2155
- 2156
- 2157
- 2158
- 2159
- 2160
- 2161
- 2162
- 2163
- 2164
- 2165
- 2166
- 2167
- 2168
- 2169
- 2170
- 2171
- 2172
- 2173
- 2174
- 2175
- 2176
- 2177
- 2178
- 2179
- 2180
- 2181
- 2182
- 2183
- 2184
- 2185
- 2186
- 2187
- 2188
- 2189
- 2190
- 2191
- 2192
- 2193
- 2194
- 2195
- 2196
- 2197
- 2198
- 2199
- 2200
- 2201
- 2202
- 2203
- 2204
- 2205
- 2206
- 2207
- 2208
- 2209
- 2210
- 2211
- 2212
- 2213
- 2214
- 2215
- 2216
- 2217
- 2218
- 2219
- 2220
- 2221
- 2222
- 2223
- 2224
- 2225
- 2226
- 2227
- 2228
- 2229
- 2230
- 2231
- 2232
- 2233
- 2234
- 2235