+ return limit > 0 ? limit : 0;
+
+}
+
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
+{
+ if(likely(atmp == dpi->cur)) {
+ dpi->cnt = 0;
+ return;
+ }
+
+ if(dpi->attempt == atmp) {
+ ++(dpi->cnt);
+ }
+ else {
+ dpi->attempt = atmp;
+ dpi->cnt = 0;
+ }
+
+}
+
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+ register struct dynpcc_info *dpi = &(jme->dpi);
+
+ if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P3);
+ else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
+ || dpi->intr_cnt > PCC_INTR_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P2);
+ else
+ jme_attempt_pcc(dpi, PCC_P1);
+
+ if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
+ jme_set_rx_pcc(jme, dpi->attempt);
+ dpi->cur = dpi->attempt;
+ dpi->cnt = 0;
+ }
+}
+
+static void
+jme_start_pcc_timer(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+ dpi->last_bytes = NET_STAT(jme).rx_bytes;
+ dpi->last_pkts = NET_STAT(jme).rx_packets;
+ dpi->intr_cnt = 0;
+ jwrite32(jme, JME_TMCSR,
+ TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
+}
+
+__always_inline static void
+jme_stop_pcc_timer(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_TMCSR, 0);
+}
+
+static void
+jme_pcc_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct net_device *netdev = jme->dev;
+
+
+ if(unlikely(!netif_carrier_ok(netdev) ||
+ (atomic_read(&jme->link_changing) != 1)
+ )) {
+ jme_stop_pcc_timer(jme);
+ return;
+ }
+
+ if(!(jme->flags & JME_FLAG_POLL))
+ jme_dynamic_pcc(jme);
+
+ jme_start_pcc_timer(jme);
+}
+
+__always_inline static void
+jme_polling_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_OFF);
+}
+
+__always_inline static void
+jme_interrupt_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_P1);
+}
+
+static void
+jme_link_change_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct net_device *netdev = jme->dev;
+ int timeout = WAIT_TASKLET_TIMEOUT;
+ int rc;
+
+ if(!atomic_dec_and_test(&jme->link_changing))
+ goto out;
+
+ if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
+ goto out;
+
+ jme->old_mtu = netdev->mtu;
+ netif_stop_queue(netdev);
+
+ while(--timeout > 0 &&
+ (
+ atomic_read(&jme->rx_cleaning) != 1 ||
+ atomic_read(&jme->tx_cleaning) != 1
+ )) {
+
+ mdelay(1);
+ }
+
+ if(netif_carrier_ok(netdev)) {
+ jme_stop_pcc_timer(jme);
+ jme_reset_mac_processor(jme);
+ jme_free_rx_resources(jme);
+ jme_free_tx_resources(jme);
+
+ if(jme->flags & JME_FLAG_POLL) {
+ jme_polling_mode(jme);
+ napi_disable(&jme->napi);
+ }
+ }
+
+ jme_check_link(netdev, 0);
+ if(netif_carrier_ok(netdev)) {
+ rc = jme_setup_rx_resources(jme);
+ if(rc) {
+ jeprintk(netdev->name,
+ "Allocating resources for RX error"
+ ", Device STOPPED!\n");
+ goto out;
+ }
+
+
+ rc = jme_setup_tx_resources(jme);
+ if(rc) {
+ jeprintk(netdev->name,
+ "Allocating resources for TX error"
+ ", Device STOPPED!\n");
+ goto err_out_free_rx_resources;
+ }
+
+ jme_enable_rx_engine(jme);
+ jme_enable_tx_engine(jme);
+
+ netif_start_queue(netdev);
+
+ if(jme->flags & JME_FLAG_POLL) {
+ napi_enable(&jme->napi);
+ jme_interrupt_mode(jme);
+ }
+
+ jme_start_pcc_timer(jme);
+ }
+
+ goto out;
+
+err_out_free_rx_resources:
+ jme_free_rx_resources(jme);
+out:
+ atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_rx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ jme_process_receive(jme, jme->rx_ring_size);
+ ++(dpi->intr_cnt);
+
+}
+
+static int
+jme_poll(struct napi_struct *napi, int budget)
+{
+ struct jme_adapter *jme = container_of(napi, struct jme_adapter, napi);
+ struct net_device *netdev = jme->dev;
+ int rest;
+
+ rest = jme_process_receive(jme, budget);
+
+ while(!atomic_dec_and_test(&jme->rx_empty)) {
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
+
+ if(rest) {
+ netif_rx_complete(netdev, napi);
+ jme_interrupt_mode(jme);
+ }
+
+ return budget - rest;
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ return;
+
+ if(unlikely(!netif_carrier_ok(jme->dev)))
+ return;
+
+ queue_dbg(jme->dev->name, "RX Queue Full!\n");
+
+ jme_rx_clean_tasklet(arg);
+ jme_restart_rx_engine(jme);
+}
+
+static void
+jme_wake_queue_if_stopped(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = jme->txring;
+
+ smp_wmb();
+ if(unlikely(netif_queue_stopped(jme->dev) &&
+ atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
+
+ queue_dbg(jme->dev->name, "TX Queue Waked.\n");
+ netif_wake_queue(jme->dev);
+
+ }
+
+}
+
+static void
+jme_tx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct jme_ring *txring = &(jme->txring[0]);
+ volatile struct txdesc *txdesc = txring->desc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
+ int i, j, cnt = 0, max, err, mask;
+
+ if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
+ goto out;
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out;
+
+ if(unlikely(!netif_carrier_ok(jme->dev)))
+ goto out;
+
+ max = jme->tx_ring_size - atomic_read(&txring->nr_free);
+ mask = jme->tx_ring_mask;
+
+ tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
+
+ for(i = txring->next_to_clean ; cnt < max ; ) {
+
+ ctxbi = txbi + i;
+
+ if(likely(ctxbi->skb &&
+ !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
+
+ err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
+
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet: Clean %d+%d\n",
+ i, ctxbi->nr_desc);
+
+ for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
+ ttxbi = txbi + ((i + j) & (mask));
+ txdesc[(i + j) & (mask)].dw[0] = 0;
+
+ pci_unmap_page(jme->pdev,
+ ttxbi->mapping,
+ ttxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ttxbi->mapping = 0;
+ ttxbi->len = 0;
+ }
+
+ dev_kfree_skb(ctxbi->skb);
+
+ cnt += ctxbi->nr_desc;
+
+ if(unlikely(err))
+ ++(NET_STAT(jme).tx_carrier_errors);
+ else {
+ ++(NET_STAT(jme).tx_packets);
+ NET_STAT(jme).tx_bytes += ctxbi->len;
+ }
+
+ ctxbi->skb = NULL;
+ ctxbi->len = 0;
+ }
+ else {
+ if(!ctxbi->skb)
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet:"
+ " Stopped due to no skb.\n");
+ else
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet:"
+ "Stopped due to not done.\n");
+ break;
+ }
+
+ i = (i + ctxbi->nr_desc) & mask;
+
+ ctxbi->nr_desc = 0;
+ }
+
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet: Stop %d Jiffies %lu\n",
+ i, jiffies);
+ txring->next_to_clean = i;
+
+ atomic_add(cnt, &txring->nr_free);
+
+ jme_wake_queue_if_stopped(jme);
+
+out:
+ atomic_inc(&jme->tx_cleaning);
+}
+
+static void
+jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
+{