summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/can/c_can/c_can.h11
-rw-r--r--drivers/net/can/c_can/c_can_main.c23
2 files changed, 19 insertions, 15 deletions
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b4e54c950a6..08b6efa7a1a7 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -238,16 +238,7 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
{
- u8 head = c_can_get_tx_head(ring);
- u8 tail = c_can_get_tx_tail(ring);
-
- /* This is not a FIFO. C/D_CAN sends out the buffers
- * prioritized. The lowest buffer number wins.
- */
- if (head < tail)
- return 0;
-
- return ring->obj_num - head;
+ return ring->obj_num - (ring->head - ring->tail);
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index eb324fffab09..52671d1ea17d 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -456,7 +456,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct can_frame *frame = (struct can_frame *)skb->data;
struct c_can_priv *priv = netdev_priv(dev);
struct c_can_tx_ring *tx_ring = &priv->tx;
- u32 idx, obj;
+ u32 idx, obj, cmd = IF_COMM_TX;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -469,7 +469,8 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (c_can_get_tx_free(tx_ring) == 0)
netif_stop_queue(dev);
- obj = idx + priv->msg_obj_tx_first;
+ if (idx < c_can_get_tx_tail(tx_ring))
+ cmd &= ~IF_COMM_TXRQST; /* Cache the message */
/* Store the message in the interface so we can call
* can_put_echo_skb(). We must do this before we enable
@@ -478,9 +479,8 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0);
-
- /* Start transmission */
- c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
+ obj = idx + priv->msg_obj_tx_first;
+ c_can_object_put(dev, IF_TX, obj, cmd);
return NETDEV_TX_OK;
}
@@ -725,6 +725,7 @@ static void c_can_do_tx(struct net_device *dev)
struct c_can_tx_ring *tx_ring = &priv->tx;
struct net_device_stats *stats = &dev->stats;
u32 idx, obj, pkts = 0, bytes = 0, pend;
+ u8 tail;
if (priv->msg_obj_tx_last > 32)
pend = priv->read_reg32(priv, C_CAN_INTPND3_REG);
@@ -761,6 +762,18 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_bytes += bytes;
stats->tx_packets += pkts;
can_led_event(dev, CAN_LED_EVENT_TX);
+
+ tail = c_can_get_tx_tail(tx_ring);
+
+ if (tail == 0) {
+ u8 head = c_can_get_tx_head(tx_ring);
+
+ /* Start transmission for all cached messages */
+ for (idx = tail; idx < head; idx++) {
+ obj = idx + priv->msg_obj_tx_first;
+ c_can_object_put(dev, IF_NAPI, obj, IF_COMM_TXRQST);
+ }
+ }
}
/* If we have a gap in the pending bits, that means we either