patch-2.4.11-dontuse linux/net/ipv4/tcp_input.c

Next file: linux/net/ipv4/tcp_ipv4.c
Previous file: linux/net/ipv4/tcp.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.10/linux/net/ipv4/tcp_input.c linux/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
  *
  *		Implementation of the Transmission Control Protocol(TCP).
  *
- * Version:	$Id: tcp_input.c,v 1.236 2001/09/18 22:29:09 davem Exp $
+ * Version:	$Id: tcp_input.c,v 1.237 2001/09/21 21:27:34 davem Exp $
  *
  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -168,7 +168,7 @@
 	if (quickacks==0)
 		quickacks=2;
 	if (quickacks > tp->ack.quick)
-		tp->ack.quick = min_t(unsigned int, quickacks, TCP_MAX_QUICKACKS);
+		tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 }
 
 void tcp_enter_quickack_mode(struct tcp_opt *tp)
@@ -198,7 +198,7 @@
 	int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
 
 	if (sk->sndbuf < 3*sndmem)
-		sk->sndbuf = min_t(int, 3*sndmem, sysctl_tcp_wmem[2]);
+		sk->sndbuf = min(3*sndmem, sysctl_tcp_wmem[2]);
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -262,7 +262,7 @@
 			incr = __tcp_grow_window(sk, tp, skb);
 
 		if (incr) {
-			tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
+			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
 			tp->ack.quick |= 1;
 		}
 	}
@@ -282,7 +282,7 @@
 	while (tcp_win_from_space(rcvmem) < tp->advmss)
 		rcvmem += 128;
 	if (sk->rcvbuf < 4*rcvmem)
-		sk->rcvbuf = min_t(int, 4*rcvmem, sysctl_tcp_rmem[2]);
+		sk->rcvbuf = min(4*rcvmem, sysctl_tcp_rmem[2]);
 }
 
 /* 4. Try to fixup all. It is made iimediately after connection enters
@@ -304,16 +304,16 @@
 		tp->window_clamp = maxwin;
 
 		if (sysctl_tcp_app_win && maxwin>4*tp->advmss)
-			tp->window_clamp = max_t(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
+			tp->window_clamp = max(maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
 	}
 
 	/* Force reservation of one segment. */
 	if (sysctl_tcp_app_win &&
 	    tp->window_clamp > 2*tp->advmss &&
 	    tp->window_clamp + tp->advmss > maxwin)
-		tp->window_clamp = max_t(u32, 2*tp->advmss, maxwin-tp->advmss);
+		tp->window_clamp = max(2*tp->advmss, maxwin-tp->advmss);
 
-	tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, tp->window_clamp);
+	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -321,7 +321,7 @@
 static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
 {
 	struct sk_buff *skb;
-	int app_win = tp->rcv_nxt - tp->copied_seq;
+	unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
 	int ofo_win = 0;
 
 	tp->ack.quick = 0;
@@ -338,7 +338,7 @@
 		    !(sk->userlocks&SOCK_RCVBUF_LOCK) &&
 		    !tcp_memory_pressure &&
 		    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
-			sk->rcvbuf = min_t(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+			sk->rcvbuf = min(atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
 	}
 	if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
 		app_win += ofo_win;
@@ -346,11 +346,11 @@
 			app_win >>= 1;
 		if (app_win > tp->ack.rcv_mss)
 			app_win -= tp->ack.rcv_mss;
-		app_win = max_t(unsigned int, app_win, 2*tp->advmss);
+		app_win = max(app_win, 2U*tp->advmss);
 
 		if (!ofo_win)
-			tp->window_clamp = min_t(u32, tp->window_clamp, app_win);
-		tp->rcv_ssthresh = min_t(u32, tp->window_clamp, 2*tp->advmss);
+			tp->window_clamp = min(tp->window_clamp, app_win);
+		tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
 	}
 }
 
@@ -472,7 +472,7 @@
 		/* no previous measure. */
 		tp->srtt = m<<3;	/* take the measured time to be rtt */
 		tp->mdev = m<<2;	/* make sure rto = 3*rtt */
-		tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
+		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
 		tp->rtt_seq = tp->snd_nxt;
 	}
 }
@@ -575,7 +575,7 @@
 			   tp->ca_state == TCP_CA_Open) {
 			/* Cong. avoidance phase, cwnd is reliable. */
 			if (!(dst->mxlock&(1<<RTAX_SSTHRESH)))
-				dst->ssthresh = max_t(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
+				dst->ssthresh = max(tp->snd_cwnd>>1, tp->snd_ssthresh);
 			if (!(dst->mxlock&(1<<RTAX_CWND)))
 				dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1;
 		} else {
@@ -617,7 +617,7 @@
 	else if (cwnd > tp->snd_ssthresh)
 		cwnd = tp->snd_ssthresh;
 
-	return min_t(u32, cwnd, tp->snd_cwnd_clamp);
+	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
 /* Initialize metrics on socket. */
@@ -668,7 +668,7 @@
 		tp->srtt = dst->rtt;
 	if (dst->rttvar > tp->mdev) {
 		tp->mdev = dst->rttvar;
-		tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
+		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
 	}
 	tcp_set_rto(tp);
 	tcp_bound_rto(tp);
@@ -693,7 +693,7 @@
 static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts)
 {
 	if (metric > tp->reordering) {
-		tp->reordering = min_t(unsigned int, TCP_MAX_REORDERING, metric);
+		tp->reordering = min(TCP_MAX_REORDERING, metric);
 
 		/* This exciting event is worth to be remembered. 8) */
 		if (ts)
@@ -848,12 +848,12 @@
 				if (sacked&TCPCB_RETRANS) {
 					if ((dup_sack && in_sack) &&
 					    (sacked&TCPCB_SACKED_ACKED))
-						reord = min_t(int, fack_count, reord);
+						reord = min(fack_count, reord);
 				} else {
 					/* If it was in a hole, we detected reordering. */
 					if (fack_count < prior_fackets &&
 					    !(sacked&TCPCB_SACKED_ACKED))
-						reord = min_t(int, fack_count, reord);
+						reord = min(fack_count, reord);
 				}
 
 				/* Nothing to do; acked frame is about to be dropped. */
@@ -885,7 +885,7 @@
 					 */
 					if (!(sacked & TCPCB_RETRANS) &&
 					    fack_count < prior_fackets)
-						reord = min_t(int, fack_count, reord);
+						reord = min(fack_count, reord);
 
 					if (sacked & TCPCB_LOST) {
 						TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -901,7 +901,7 @@
 					tp->fackets_out = fack_count;
 			} else {
 				if (dup_sack && (sacked&TCPCB_RETRANS))
-					reord = min_t(int, fack_count, reord);
+					reord = min(fack_count, reord);
 			}
 
 			/* D-SACK. We can detect redundant retransmission
@@ -1177,7 +1177,7 @@
 	 * recovery more?
 	 */
 	if (tp->packets_out <= tp->reordering &&
-	    tp->sacked_out >= max_t(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
+	    tp->sacked_out >= max_t(__u32, tp->packets_out/2, sysctl_tcp_reordering) &&
 	    !tcp_may_send_now(sk, tp)) {
 		/* We have nothing to send. This connection is limited
 		 * either by receiver window or by application.
@@ -1194,9 +1194,10 @@
  */
 static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend)
 {
-	u32 holes = min_t(unsigned int,
-			max_t(unsigned int, tp->lost_out, 1),
-			tp->packets_out);
+	u32 holes;
+
+	holes = max(tp->lost_out, 1U);
+	holes = min(holes, tp->packets_out);
 
 	if (tp->sacked_out + holes > tp->packets_out) {
 		tp->sacked_out = tp->packets_out - holes;
@@ -1291,7 +1292,7 @@
  */
 static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp)
 {
-	tp->snd_cwnd = min_t(u32, tp->snd_cwnd,
+	tp->snd_cwnd = min(tp->snd_cwnd,
 			   tcp_packets_in_flight(tp)+tcp_max_burst(tp));
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
@@ -1308,7 +1309,7 @@
 	if (decr && tp->snd_cwnd > tp->snd_ssthresh/2)
 		tp->snd_cwnd -= decr;
 
-	tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -1340,15 +1341,14 @@
 static void tcp_undo_cwr(struct tcp_opt *tp, int undo)
 {
 	if (tp->prior_ssthresh) {
-		tp->snd_cwnd = max_t(unsigned int,
-				   tp->snd_cwnd, tp->snd_ssthresh<<1);
+		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
 
 		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
 			tp->snd_ssthresh = tp->prior_ssthresh;
 			TCP_ECN_withdraw_cwr(tp);
 		}
 	} else {
-		tp->snd_cwnd = max_t(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
+		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
 	}
 	tcp_moderate_cwnd(tp);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1450,7 +1450,7 @@
 
 static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
 {
-	tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_ssthresh);
+	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -1836,7 +1836,7 @@
 		 */
 	} else {
 		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
-				     min_t(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
+				     min(tp->rto << tp->backoff, TCP_RTO_MAX));
 	}
 }
 
@@ -2323,7 +2323,7 @@
 		tp->dsack = 1;
 		tp->duplicate_sack[0].start_seq = seq;
 		tp->duplicate_sack[0].end_seq = end_seq;
-		tp->eff_sacks = min_t(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
+		tp->eff_sacks = min(tp->num_sacks+1, 4-tp->tstamp_ok);
 	}
 }
 
@@ -2376,7 +2376,7 @@
 			 * Decrease num_sacks.
 			 */
 			tp->num_sacks--;
-			tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+			tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 			for(i=this_sack; i < tp->num_sacks; i++)
 				sp[i] = sp[i+1];
 			continue;
@@ -2438,7 +2438,7 @@
 	sp->start_seq = seq;
 	sp->end_seq = end_seq;
 	tp->num_sacks++;
-	tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+	tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -2475,7 +2475,7 @@
 	}
 	if (num_sacks != tp->num_sacks) {
 		tp->num_sacks = num_sacks;
-		tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+		tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 	}
 }
 
@@ -2807,7 +2807,7 @@
 
 			if (offset < 0) BUG();
 			if (size > 0) {
-				size = min_t(int, copy, size);
+				size = min(copy, size);
 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
 					BUG();
 				TCP_SKB_CB(nskb)->end_seq += size;
@@ -2886,7 +2886,7 @@
 	if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
 		tcp_clamp_window(sk, tp);
 	else if (tcp_memory_pressure)
-		tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, 4*tp->advmss);
+		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
 
 	tcp_collapse_ofo_queue(sk);
 	tcp_collapse(sk, sk->receive_queue.next,
@@ -2941,7 +2941,7 @@
 	if (tp->ca_state == TCP_CA_Open &&
 	    sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
 		/* Limited by application or receiver window. */
-		u32 win_used = max_t(u32, tp->snd_cwnd_used, 2);
+		u32 win_used = max(tp->snd_cwnd_used, 2U);
 		if (win_used < tp->snd_cwnd) {
 			tp->snd_ssthresh = tcp_current_ssthresh(tp);
 			tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1;
@@ -2970,7 +2970,7 @@
 		demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering+1);
 		sndmem *= 2*demanded;
 		if (sndmem > sk->sndbuf)
-			sk->sndbuf = min_t(int, sndmem, sysctl_tcp_wmem[2]);
+			sk->sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
 		tp->snd_cwnd_stamp = tcp_time_stamp;
 	}
 
@@ -3520,7 +3520,7 @@
 
 		if (tp->wscale_ok == 0) {
 			tp->snd_wscale = tp->rcv_wscale = 0;
-			tp->window_clamp = min_t(u32, tp->window_clamp, 65535);
+			tp->window_clamp = min(tp->window_clamp, 65535U);
 		}
 
 		if (tp->saw_tstamp) {

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)