[netfilter-cvslog] r3401 - in trunk/nfsim: core kernelenv kernelenv/include

rusty at netfilter.org rusty at netfilter.org
Thu Dec 16 02:37:16 CET 2004


Author: rusty at netfilter.org
Date: 2004-12-16 02:37:15 +0100 (Thu, 16 Dec 2004)
New Revision: 3401

Modified:
   trunk/nfsim/core/core.c
   trunk/nfsim/kernelenv/include/kernelenv.h
   trunk/nfsim/kernelenv/kernelenv.c
Log:
Fix up schedule() (it should not be a macro any more).
Add should_i_fail_once() to call_elem_hook.
Uninline a whole heap of kernelenv.h, so gcov doesn't see them.


Modified: trunk/nfsim/core/core.c
===================================================================
--- trunk/nfsim/core/core.c	2004-12-16 01:34:50 UTC (rev 3400)
+++ trunk/nfsim/core/core.c	2004-12-16 01:37:15 UTC (rev 3401)
@@ -131,7 +131,14 @@
 			    int (*okfn)(struct sk_buff *))
 {
 	unsigned int ret;
+	char *hookname = talloc_asprintf(NULL, "%s:%i", __func__, hooknum);
 
+	if (should_i_fail_once(hookname)) {
+		talloc_free(hookname);
+		return NF_DROP;
+	}
+	talloc_free(hookname);
+
 	ret = ops->hook(hooknum, skb, in, out, okfn);
 	nfsim_log(LOG_HOOK, "hook:%s %s %s%s",
 		   nf_hooknames[PF_INET][hooknum], ops->owner->name,
@@ -346,13 +353,3 @@
 	/* change for protocol... */
 	return ip_rcv_local(skb);
 }
-
-unsigned int skb_checksum(const struct sk_buff *skb, int offset,
-			  int len, unsigned int csum)
-{
-	assert(offset + len <= skb->len);
-
-	csum = csum_partial(skb->data + offset, len, csum);
-
-	return csum;
-}

Modified: trunk/nfsim/kernelenv/include/kernelenv.h
===================================================================
--- trunk/nfsim/kernelenv/include/kernelenv.h	2004-12-16 01:34:50 UTC (rev 3400)
+++ trunk/nfsim/kernelenv/include/kernelenv.h	2004-12-16 01:37:15 UTC (rev 3401)
@@ -110,7 +110,7 @@
 
 #define dump_stack()
 
-#define schedule()
+void schedule(void);
 
 #define BUG_ON(x) do { if (x) barf("%s:%u", __FILE__, __LINE__); } while(0)
 #define BUG() BUG_ON(1)
@@ -178,10 +178,7 @@
 /* err.h */
 #define ERR_PTR(x) ((void *)(x))
 #define PTR_ERR(x) ((long)(x))
-static inline long IS_ERR(const void *ptr)
-{
-         return (unsigned long)ptr > (unsigned long)-1000L;
-}
+int IS_ERR(const void *ptr);
 
 /* we start at time 0 */
 #define INITIAL_JIFFIES 0
@@ -226,21 +223,10 @@
 #define atomic_set(v,i)         (((v)->counter) = (i))
 #define atomic_add(v,i)         (((v)->counter) += (i))
 
-static __inline__ void atomic_inc(atomic_t *v)
-{
-	v->counter++;
-}
+void atomic_inc(atomic_t *v);
+void atomic_dec(atomic_t *v);
+int atomic_dec_and_test(atomic_t *v);
 
-static __inline__ void atomic_dec(atomic_t *v)
-{
-	v->counter--;
-}
-
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
-	return (--(v->counter) == 0);
-}
-
 /* rc_update.h  */
 struct rcu_head {
 	struct list_head list;
@@ -446,14 +432,8 @@
 	unsigned int tso_size;
 };
 
-static inline int skb_cloned(const struct sk_buff *skb)
-{
-	return skb->cloned;
-}
-static inline int skb_shared(const struct sk_buff *skb)
-{
-	return atomic_read(&skb->users) != 1;
-}
+int skb_cloned(const struct sk_buff *skb);
+int skb_shared(const struct sk_buff *skb);
 
 struct nf_conntrack {
 	atomic_t use;
@@ -525,18 +505,9 @@
 #define __skb_put  skb_put
 #define __skb_push skb_push
 
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
-{
-	skb->len  = len;
-	skb->tail = skb->data + len;
-}
+void __skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
 
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
-{
-	if (skb->len > len)
-		__skb_trim(skb, len);
-}
-
 #define SKB_LINEAR_ASSERT(x)
 
 void skb_reserve(struct sk_buff *skb, unsigned int len);
@@ -548,20 +519,11 @@
 				int newheadroom, int newtailroom,
 				int gfp_mask);
 
-static inline void skb_orphan(struct sk_buff *skb)
-{
-        if (skb->destructor)
-                skb->destructor(skb);
-        skb->destructor = NULL;
-        skb->sk         = NULL;
-}
+void skb_orphan(struct sk_buff *skb);
 
-static inline int skb_is_nonlinear(struct sk_buff *skb)
-{
-	return 0;
-}
+int skb_is_nonlinear(struct sk_buff *skb);
+
 #define skb_linearize(skb, len) 0
-#define skb_cloned(skb) 0
 #define skb_copy(skb, gfp) \
 	skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb), gfp)
 
@@ -575,16 +537,9 @@
 
 #define skb_shinfo(SKB)		((struct skb_shared_info *)((SKB)->end))
 
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
-				       int len, void *buffer)
-{
-	/* We're always linear. */
-	if (offset + len > skb->len)
-		return NULL;
+void *skb_header_pointer(const struct sk_buff *skb, int offset,
+			 int len, void *buffer);
 
-	return skb->data + offset;
-}
-
 /* net/sock.h */
 #define sk_for_each(__sk, node, list) \
 	hlist_for_each_entry(__sk, node, list, sk_node)
@@ -628,27 +583,10 @@
 	void			(*sk_data_ready)(struct sock *sk, int bytes);
 };
 
-static inline void sock_hold(struct sock *sk)
-{
-	atomic_inc(&sk->sk_refcnt);
-}
+void sock_hold(struct sock *sk);
+void sock_put(struct sock *sk);
+void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
 
-static inline void sock_put(struct sock *sk)
-{
-	if (atomic_dec_and_test(&sk->sk_refcnt))
-		free(sk);
-}
-
-static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
-	/*
-	sock_hold(sk);
-	skb->sk = sk;
-	skb->destructor = sock_wfree;
-	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
-	*/
-}
-
 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
 /* net.h */
 #define net_ratelimit() 1
@@ -830,43 +768,16 @@
 
 };
 
-static inline u32
-dst_path_metric(struct dst_entry *dst, int metric)
-{
-	return 1500;
-/*	return dst->path->metrics[metric-1]; */
-}
+u32 dst_path_metric(struct dst_entry *dst, int metric);
+u32 dst_pmtu(struct dst_entry *dst);
 
-
-static inline u32
-dst_pmtu(struct dst_entry *dst)
-{
-	u32 mtu = dst_path_metric(dst, RTAX_MTU);
-	/* Yes, _exactly_. This is paranoia. */
-	barrier();
-	return mtu;
-}
-
 #define dst_release(x)
 #define dst_hold(x)
 
-static inline int dst_output(struct sk_buff *skb)
-{
-	assert(skb);
-	assert(skb->dst);
-	assert(skb->dst->output);
-	return skb->dst->output(skb);
-}
+int dst_output(struct sk_buff *skb);
+int dst_input(struct sk_buff *skb);
 
-static inline int dst_input(struct sk_buff *skb)
-{
-	assert(skb);
-	assert(skb->dst);
-	assert(skb->dst->input);
-	return skb->dst->input(skb);
-}
 
-
 /* spinlock.h */
 
 /* no difference between spin and rw locks at present */
@@ -956,11 +867,7 @@
 
 int down_trylock(struct semaphore *sem);
 
-static inline void sema_init(struct semaphore *sem, int val)
-{
-	sem->count = val;
-	sem->limit = val;
-}
+void sema_init(struct semaphore *sem, int val);
 
 /* sched.h */
 
@@ -1017,7 +924,8 @@
 int timer_pending(const struct timer_list * timer);
 void check_timer(struct timer_list *timer);
 
-int del_timer(struct timer_list *timer);
+#define del_timer(timer) __del_timer((timer), __location__)
+int __del_timer(struct timer_list *timer, const char *location);
 void check_timer_failed(struct timer_list *timer);
 
 void add_timer(struct timer_list * timer);
@@ -1025,44 +933,11 @@
 
 
 /* asm/bitops.h */
-static __inline__ int test_bit(int nr, long * addr)
-{
-	int	mask;
-
-	addr += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	return ((mask & *addr) != 0);
-}
-
-static __inline__ int set_bit(int nr,long * addr)
-{
-	int	mask, retval;
-
-	addr += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	cli();
-	retval = (mask & *addr) != 0;
-	*addr |= mask;
-	sti();
-	return retval;
-}
-
+int test_bit(int nr, long * addr);
+int set_bit(int nr,long * addr);
 #define __set_bit set_bit
+int clear_bit(int nr, long * addr);
 
-static __inline__ int clear_bit(int nr, long * addr)
-{
-	int     mask, retval;
-
-	addr += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	cli();
-	retval = (mask & *addr) != 0;
-	*addr &= ~mask;
-	sti();
-        return retval;
-}
-
-
 /* random */
 void get_random_bytes(void *buf, int nbytes);
 
@@ -1279,9 +1154,5 @@
 #define SEQ_START_TOKEN ((void *)1)
 
 /* if_ether.h */
-static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
-{
-	return (struct ethhdr *)skb->mac.raw;
-}
-
+struct ethhdr *eth_hdr(const struct sk_buff *skb);
 #endif /* __HAVE_SIMULATOR_H */

Modified: trunk/nfsim/kernelenv/kernelenv.c
===================================================================
--- trunk/nfsim/kernelenv/kernelenv.c	2004-12-16 01:34:50 UTC (rev 3400)
+++ trunk/nfsim/kernelenv/kernelenv.c	2004-12-16 01:37:15 UTC (rev 3401)
@@ -265,6 +265,119 @@
 }
 #endif
 
+int skb_cloned(const struct sk_buff *skb)
+{
+	return skb->cloned;
+}
+
+int skb_shared(const struct sk_buff *skb)
+{
+	return atomic_read(&skb->users) != 1;
+}
+
+unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+			  int len, unsigned int csum)
+{
+	assert(offset + len <= skb->len);
+
+	csum = csum_partial(skb->data + offset, len, csum);
+
+	return csum;
+}
+
+void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+	skb->len  = len;
+	skb->tail = skb->data + len;
+}
+
+void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+	if (skb->len > len)
+		__skb_trim(skb, len);
+}
+
+void skb_orphan(struct sk_buff *skb)
+{
+        if (skb->destructor)
+                skb->destructor(skb);
+        skb->destructor = NULL;
+        skb->sk         = NULL;
+}
+
+/* FIXME: nonlinear skb support would be good. */
+int skb_is_nonlinear(struct sk_buff *skb)
+{
+	return 0;
+}
+
+void *skb_header_pointer(const struct sk_buff *skb, int offset,
+			 int len, void *buffer)
+{
+	/* We're always linear. */
+	if (offset + len > skb->len)
+		return NULL;
+
+	return skb->data + offset;
+}
+
+void sock_hold(struct sock *sk)
+{
+	atomic_inc(&sk->sk_refcnt);
+}
+
+void sock_put(struct sock *sk)
+{
+	if (atomic_dec_and_test(&sk->sk_refcnt))
+		free(sk);
+}
+
+void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+	/*
+	sock_hold(sk);
+	skb->sk = sk;
+	skb->destructor = sock_wfree;
+	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+	*/
+}
+
+u32 dst_path_metric(struct dst_entry *dst, int metric)
+{
+	return 1500;
+/*	return dst->path->metrics[metric-1]; */
+}
+
+
+u32 dst_pmtu(struct dst_entry *dst)
+{
+	u32 mtu = dst_path_metric(dst, RTAX_MTU);
+	/* Yes, _exactly_. This is paranoia. */
+	barrier();
+	return mtu;
+}
+
+int dst_output(struct sk_buff *skb)
+{
+	assert(skb);
+	assert(skb->dst);
+	assert(skb->dst->output);
+	return skb->dst->output(skb);
+}
+
+int dst_input(struct sk_buff *skb)
+{
+	assert(skb);
+	assert(skb->dst);
+	assert(skb->dst->input);
+	return skb->dst->input(skb);
+}
+
+struct ethhdr *eth_hdr(const struct sk_buff *skb)
+{
+	return (struct ethhdr *)skb->mac.raw;
+}
+
 /*
 {
 	unsigned int csum;
@@ -360,6 +473,48 @@
 	return 1;
 }
 
+void sema_init(struct semaphore *sem, int val)
+{
+	sem->count = val;
+	sem->limit = val;
+}
+
+/* bitops.h */
+int test_bit(int nr, long * addr)
+{
+	int	mask;
+
+	addr += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	return ((mask & *addr) != 0);
+}
+
+int set_bit(int nr,long * addr)
+{
+	int	mask, retval;
+
+	addr += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	cli();
+	retval = (mask & *addr) != 0;
+	*addr |= mask;
+	sti();
+	return retval;
+}
+
+int clear_bit(int nr, long * addr)
+{
+	int     mask, retval;
+
+	addr += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	cli();
+	retval = (mask & *addr) != 0;
+	*addr &= ~mask;
+	sti();
+        return retval;
+}
+
 /* timer */
 LIST_HEAD(__timers);
 LIST_HEAD(__running_timers);
@@ -634,3 +789,23 @@
 	if (fail)
 		barf("Memory leak");
 }
+
+int IS_ERR(const void *ptr)
+{
+         return (unsigned long)ptr > (unsigned long)-1000L;
+}
+
+void atomic_inc(atomic_t *v)
+{
+	v->counter++;
+}
+
+void atomic_dec(atomic_t *v)
+{
+	v->counter--;
+}
+
+int atomic_dec_and_test(atomic_t *v)
+{
+	return (--(v->counter) == 0);
+}




More information about the netfilter-cvslog mailing list