This patch move RX queue allocation to alloc_netdev_mq and freeing of
the queues to free_netdev (symmetric to TX queue allocation). Each
kobject RX queue takes a reference to the queue's device so that the
device can't be freed before all the kobjects have been released-- this
obviates the need for reference counts specific to RX queues.
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
struct kobject kobj;
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
struct kobject kobj;
- struct netdev_rx_queue *first;
- atomic_t count;
+ struct net_device *dev;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
- /*
- * Set a pointer to first element in the array which holds the
- * reference count.
- */
for (i = 0; i < count; i++)
for (i = 0; i < count; i++)
- ret = netif_alloc_rx_queues(dev);
- if (ret)
- goto out;
-
netdev_init_queues(dev);
/* Init, if this function is available */
netdev_init_queues(dev);
/* Init, if this function is available */
#ifdef CONFIG_RPS
dev->num_rx_queues = queue_count;
dev->real_num_rx_queues = queue_count;
#ifdef CONFIG_RPS
dev->num_rx_queues = queue_count;
dev->real_num_rx_queues = queue_count;
+ if (netif_alloc_rx_queues(dev))
+ goto free_pcpu;
#endif
dev->gso_max_size = GSO_MAX_SIZE;
#endif
dev->gso_max_size = GSO_MAX_SIZE;
free_pcpu:
free_percpu(dev->pcpu_refcnt);
kfree(dev->_tx);
free_pcpu:
free_percpu(dev->pcpu_refcnt);
kfree(dev->_tx);
+#ifdef CONFIG_RPS
+ kfree(dev->_rx);
+#endif
+
free_p:
kfree(p);
return NULL;
free_p:
kfree(p);
return NULL;
release_net(dev_net(dev));
kfree(dev->_tx);
release_net(dev_net(dev));
kfree(dev->_tx);
+#ifdef CONFIG_RPS
+ kfree(dev->_rx);
+#endif
kfree(rcu_dereference_raw(dev->ingress_queue));
kfree(rcu_dereference_raw(dev->ingress_queue));
static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
- struct netdev_rx_queue *first = queue->first;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
if (flow_table)
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
if (flow_table)
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
- if (atomic_dec_and_test(&first->count))
- kfree(first);
}
static struct kobj_type rx_queue_ktype = {
}
static struct kobj_type rx_queue_ktype = {
static int rx_queue_add_kobject(struct net_device *net, int index)
{
struct netdev_rx_queue *queue = net->_rx + index;
static int rx_queue_add_kobject(struct net_device *net, int index)
{
struct netdev_rx_queue *queue = net->_rx + index;
- struct netdev_rx_queue *first = queue->first;
struct kobject *kobj = &queue->kobj;
int error = 0;
struct kobject *kobj = &queue->kobj;
int error = 0;
}
kobject_uevent(kobj, KOBJ_ADD);
}
kobject_uevent(kobj, KOBJ_ADD);
- atomic_inc(&first->count);