[netfilter-cvslog] r3700 - trunk/patch-o-matic-ng/nf_conntrack/linux-2.6/net/netfilter

laforge at netfilter.org laforge at netfilter.org
Tue Feb 15 02:51:26 CET 2005


Author: laforge at netfilter.org
Date: 2005-02-15 02:51:25 +0100 (Tue, 15 Feb 2005)
New Revision: 3700

Modified:
   trunk/patch-o-matic-ng/nf_conntrack/linux-2.6/net/netfilter/nf_conntrack_core.c
Log:
netfilter: Fix ip_ct_selective_cleanup(), and rename ip_ct_iterate_cleanup()


Modified: trunk/patch-o-matic-ng/nf_conntrack/linux-2.6/net/netfilter/nf_conntrack_core.c
===================================================================
--- trunk/patch-o-matic-ng/nf_conntrack/linux-2.6/net/netfilter/nf_conntrack_core.c	2005-02-15 01:41:57 UTC (rev 3699)
+++ trunk/patch-o-matic-ng/nf_conntrack/linux-2.6/net/netfilter/nf_conntrack_core.c	2005-02-15 01:51:25 UTC (rev 3700)
@@ -79,6 +79,7 @@
 static kmem_cache_t *nf_conntrack_expect_cachep;
 struct nf_conn nf_conntrack_untracked;
 unsigned int nf_ct_log_invalid;
+static LIST_HEAD(unconfirmed);
 
 DEFINE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
@@ -526,6 +527,12 @@
 	if (ct->expecting)
 		remove_expectations(ct, 1);
 
+	/* We overload first tuple to link into unconfirmed list. */
+	if (!is_confirmed(ct)) {
+		BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+		list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+	}
+
 	/* Delete our master expectation */
 	if (ct->master) {
 		if (ct->master->expectant) {
@@ -641,6 +648,7 @@
 	DEBUGP("Confirming conntrack %p\n", ct);
 
 	WRITE_LOCK(&nf_conntrack_lock);
+
 	/* See if there's one in the list already, including reverse:
 	   NAT could have grabbed it without realizing, since we're
 	   not in the hash.  If there is, we lost race. */
@@ -652,6 +660,9 @@
 			  conntrack_tuple_cmp,
 			  struct nf_conntrack_tuple_hash *,
 			  &ct->tuplehash[NF_CT_DIR_REPLY].tuple, NULL)) {
+		/* Remove from unconfirmed list */
+		list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+
 		list_prepend(&nf_conntrack_hash[hash],
 			     &ct->tuplehash[NF_CT_DIR_ORIGINAL]);
 		list_prepend(&nf_conntrack_hash[repl_hash],
@@ -851,6 +862,10 @@
 
 		/* this is a braindead... --pablo */
 		atomic_inc(&nf_conntrack_count);
+
+		/* Overload tuple linked list to put us in unconfirmed list. */
+		list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list,
+			 &unconfirmed);
 		WRITE_UNLOCK(&nf_conntrack_lock);
 
 		if (expected->expectfn)
@@ -866,7 +881,11 @@
 			conntrack->helper = nf_ct_find_helper(&repl_tuple);
         }
 
-end:	NF_CT_STAT_INC(new);
+end:	
+	/* Overload tuple linked list to put us in unconfirmed list. */
+	list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
+
+	NF_CT_STAT_INC(new);
 	atomic_inc(&nf_conntrack_count);
 	WRITE_UNLOCK(&nf_conntrack_lock);
 
@@ -1337,6 +1356,7 @@
 	LIST_DELETE(&helpers, me);
 
 	/* Get rid of expecteds, set helpers to NULL. */
+	LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
 	for (i = 0; i < nf_conntrack_htable_size; i++)
 		LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
 			    struct nf_conntrack_tuple_hash *, me);
@@ -1405,40 +1425,45 @@
 }
 
 static inline int
-do_kill(const struct nf_conntrack_tuple_hash *i,
-	int (*kill)(const struct nf_conn *i, void *data),
+do_iter(const struct nf_conntrack_tuple_hash *i,
+	int (*iter)(struct nf_conn *i, void *data),
 	void *data)
 {
-	return kill(i->ctrack, data);
+	return iter(i->ctrack, data);
 }
 
 /* Bring out ya dead! */
 static struct nf_conntrack_tuple_hash *
-get_next_corpse(int (*kill)(const struct nf_conn *i, void *data),
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
 		void *data, unsigned int *bucket)
 {
 	struct nf_conntrack_tuple_hash *h = NULL;
 
-	READ_LOCK(&nf_conntrack_lock);
-	for (; !h && *bucket < nf_conntrack_htable_size; (*bucket)++) {
-		h = LIST_FIND(&nf_conntrack_hash[*bucket], do_kill,
-			      struct nf_conntrack_tuple_hash *, kill, data);
-	}
+	WRITE_LOCK(&nf_conntrack_lock);
+
+	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+		h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
+				struct nf_conntrack_tuple_hash *, iter, data);
+		if (h)
+			break;
+ 	}
+	if (!h)
+		h = LIST_FIND_W(&unconfirmed, do_iter,
+				struct nf_conntrack_tuple_hash *, iter, data);
 	if (h)
 		atomic_inc(&h->ctrack->ct_general.use);
-	READ_UNLOCK(&nf_conntrack_lock);
+	WRITE_UNLOCK(&nf_conntrack_lock);
 
 	return h;
 }
 
 void
-nf_ct_selective_cleanup(int (*kill)(const struct nf_conn *i, void *data),
-			void *data)
+nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
 {
 	struct nf_conntrack_tuple_hash *h;
 	unsigned int bucket = 0;
 
-	while ((h = get_next_corpse(kill, data, &bucket)) != NULL) {
+	while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
 		/* Time to push up daises... */
 		if (del_timer(&h->ctrack->timeout))
 			death_by_timeout((unsigned long)h->ctrack);
@@ -1448,7 +1473,7 @@
 	}
 }
 
-static int kill_all(const struct nf_conn *i, void *data)
+static int kill_all(struct nf_conn *i, void *data)
 {
 	return 1;
 }
@@ -1465,7 +1490,7 @@
 	synchronize_net();
 
  i_see_dead_people:
-	nf_ct_selective_cleanup(kill_all, NULL);
+	nf_ct_iterate_cleanup(kill_all, NULL);
 	if (atomic_read(&nf_conntrack_count) != 0) {
 		schedule();
 		goto i_see_dead_people;




More information about the netfilter-cvslog mailing list