summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c10
6 files changed, 16 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 563ddc28139d..56c3e00098c0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2071,7 +2071,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
u32 features;
/*
- * If device doesnt need skb->dst, release it right now while
+ * If device doesn't need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2131,7 +2131,7 @@ gso:
nskb->next = NULL;
/*
- * If device doesnt need nskb->dst, release it right now while
+ * If device doesn't need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2950,8 +2950,8 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
* a compare and 2 stores extra right now if we dont have it on
* but have CONFIG_NET_CLS_ACT
- * NOTE: This doesnt stop any functionality; if you dont have
- * the ingress scheduler, you just cant add policies on ingress.
+ * NOTE: This doesn't stop any functionality; if you dont have
+ * the ingress scheduler, you just can't add policies on ingress.
*
*/
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
@@ -3780,7 +3780,7 @@ static void net_rx_action(struct softirq_action *h)
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
- * accidently calling ->poll() when NAPI is not scheduled.
+ * accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
@@ -6316,7 +6316,7 @@ static void __net_exit default_device_exit(struct net *net)
if (dev->rtnl_link_ops)
continue;
- /* Push remaing network devices to init_net */
+ /* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 232b1873bb28..afb8afb066bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -425,7 +425,7 @@ EXPORT_SYMBOL(sk_run_filter);
* As we dont want to clear mem[] array for each packet going through
* sk_run_filter(), we check that filter loaded by user never try to read
* a cell if not previously written, and we check all branches to be sure
- * a malicious user doesnt try to abuse us.
+ * a malicious user doesn't try to abuse us.
*/
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 01a1101b5936..a7b342131869 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -129,7 +129,7 @@ static void linkwatch_schedule_work(int urgent)
if (!cancel_delayed_work(&linkwatch_work))
return;
- /* Otherwise we reschedule it again for immediate exection. */
+ /* Otherwise we reschedule it again for immediate execution. */
schedule_delayed_work(&linkwatch_work, 0);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 49f7ea5b4c75..d7c4bb4b1820 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -196,7 +196,7 @@ EXPORT_SYMBOL_GPL(__rtnl_register);
* as failure of this function is very unlikely, it can only happen due
* to lack of memory when allocating the chain to store all message
* handlers for a protocol. Meant for use in init functions where lack
- * of memory implies no sense in continueing.
+ * of memory implies no sense in continuing.
*/
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit)
@@ -1440,7 +1440,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
errout:
if (err < 0 && modified && net_ratelimit())
printk(KERN_WARNING "A link change request failed with "
- "some changes comitted already. Interface %s may "
+ "some changes committed already. Interface %s may "
"have been left with an inconsistent configuration, "
"please check.\n", dev->name);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 801dd08908f9..7ebeed0a877c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2267,7 +2267,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
- * Note 1: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitrary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dfed792434d..6e819780c232 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -215,7 +215,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
-/* Maximal space eaten by iovec or ancilliary data plus some space */
+/* Maximal space eaten by iovec or ancillary data plus some space */
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
@@ -1175,7 +1175,7 @@ static void __sk_free(struct sock *sk)
void sk_free(struct sock *sk)
{
/*
- * We substract one from sk_wmem_alloc and can know if
+ * We subtract one from sk_wmem_alloc and can know if
* some packets are still in some tx queue.
* If not null, sock_wfree() will call __sk_free(sk) later
*/
@@ -1185,10 +1185,10 @@ void sk_free(struct sock *sk)
EXPORT_SYMBOL(sk_free);
/*
- * Last sock_put should drop referrence to sk->sk_net. It has already
- * been dropped in sk_change_net. Taking referrence to stopping namespace
+ * Last sock_put should drop reference to sk->sk_net. It has already
+ * been dropped in sk_change_net. Taking reference to stopping namespace
* is not an option.
- * Take referrence to a socket to remove it from hash _alive_ and after that
+ * Take reference to a socket to remove it from hash _alive_ and after that
* destroy it in the context of init_net.
*/
void sk_release_kernel(struct sock *sk)