patch-2.4.9 linux/net/ipv4/tcp_input.c

Next file: linux/net/ipv4/tcp_minisocks.c
Previous file: linux/net/ipv4/tcp.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.8/linux/net/ipv4/tcp_input.c linux/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
  *
  *		Implementation of the Transmission Control Protocol(TCP).
  *
- * Version:	$Id: tcp_input.c,v 1.232 2001/05/24 22:32:49 davem Exp $
+ * Version:	$Id: tcp_input.c,v 1.235 2001/08/13 18:56:12 davem Exp $
  *
  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -168,7 +168,7 @@
 	if (quickacks==0)
 		quickacks=2;
 	if (quickacks > tp->ack.quick)
-		tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
+		tp->ack.quick = min(unsigned int, quickacks, TCP_MAX_QUICKACKS);
 }
 
 void tcp_enter_quickack_mode(struct tcp_opt *tp)
@@ -198,7 +198,7 @@
 	int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
 
 	if (sk->sndbuf < 3*sndmem)
-		sk->sndbuf = min(3*sndmem, sysctl_tcp_wmem[2]);
+		sk->sndbuf = min(int, 3*sndmem, sysctl_tcp_wmem[2]);
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -262,7 +262,7 @@
 			incr = __tcp_grow_window(sk, tp, skb);
 
 		if (incr) {
-			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
+			tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
 			tp->ack.quick |= 1;
 		}
 	}
@@ -282,7 +282,7 @@
 	while (tcp_win_from_space(rcvmem) < tp->advmss)
 		rcvmem += 128;
 	if (sk->rcvbuf < 4*rcvmem)
-		sk->rcvbuf = min(4*rcvmem, sysctl_tcp_rmem[2]);
+		sk->rcvbuf = min(int, 4*rcvmem, sysctl_tcp_rmem[2]);
 }
 
 /* 4. Try to fixup all. It is made iimediately after connection enters
@@ -304,16 +304,16 @@
 		tp->window_clamp = maxwin;
 
 		if (sysctl_tcp_app_win && maxwin>4*tp->advmss)
-			tp->window_clamp = max(maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
+			tp->window_clamp = max(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
 	}
 
 	/* Force reservation of one segment. */
 	if (sysctl_tcp_app_win &&
 	    tp->window_clamp > 2*tp->advmss &&
 	    tp->window_clamp + tp->advmss > maxwin)
-		tp->window_clamp = max(2*tp->advmss, maxwin-tp->advmss);
+		tp->window_clamp = max(u32, 2*tp->advmss, maxwin-tp->advmss);
 
-	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
+	tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, tp->window_clamp);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -338,7 +338,7 @@
 		    !(sk->userlocks&SOCK_RCVBUF_LOCK) &&
 		    !tcp_memory_pressure &&
 		    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
-			sk->rcvbuf = min(atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+			sk->rcvbuf = min(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
 	}
 	if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
 		app_win += ofo_win;
@@ -346,11 +346,11 @@
 			app_win >>= 1;
 		if (app_win > tp->ack.rcv_mss)
 			app_win -= tp->ack.rcv_mss;
-		app_win = max(app_win, 2*tp->advmss);
+		app_win = max(unsigned int, app_win, 2*tp->advmss);
 
 		if (!ofo_win)
-			tp->window_clamp = min(tp->window_clamp, app_win);
-		tp->rcv_ssthresh = min(tp->window_clamp, 2*tp->advmss);
+			tp->window_clamp = min(u32, tp->window_clamp, app_win);
+		tp->rcv_ssthresh = min(u32, tp->window_clamp, 2*tp->advmss);
 	}
 }
 
@@ -472,7 +472,7 @@
 		/* no previous measure. */
 		tp->srtt = m<<3;	/* take the measured time to be rtt */
 		tp->mdev = m<<2;	/* make sure rto = 3*rtt */
-		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+		tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
 		tp->rtt_seq = tp->snd_nxt;
 	}
 }
@@ -575,7 +575,7 @@
 			   tp->ca_state == TCP_CA_Open) {
 			/* Cong. avoidance phase, cwnd is reliable. */
 			if (!(dst->mxlock&(1<<RTAX_SSTHRESH)))
-				dst->ssthresh = max(tp->snd_cwnd>>1, tp->snd_ssthresh);
+				dst->ssthresh = max(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
 			if (!(dst->mxlock&(1<<RTAX_CWND)))
 				dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1;
 		} else {
@@ -617,7 +617,7 @@
 	else if (cwnd > tp->snd_ssthresh)
 		cwnd = tp->snd_ssthresh;
 
-	return min(cwnd, tp->snd_cwnd_clamp);
+	return min(u32, cwnd, tp->snd_cwnd_clamp);
 }
 
 /* Initialize metrics on socket. */
@@ -668,7 +668,7 @@
 		tp->srtt = dst->rtt;
 	if (dst->rttvar > tp->mdev) {
 		tp->mdev = dst->rttvar;
-		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+		tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
 	}
 	tcp_set_rto(tp);
 	tcp_bound_rto(tp);
@@ -693,7 +693,7 @@
 static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts)
 {
 	if (metric > tp->reordering) {
-		tp->reordering = min(TCP_MAX_REORDERING, metric);
+		tp->reordering = min(unsigned int, TCP_MAX_REORDERING, metric);
 
 		/* This exciting event is worth to be remembered. 8) */
 		if (ts)
@@ -848,12 +848,12 @@
 				if (sacked&TCPCB_RETRANS) {
 					if ((dup_sack && in_sack) &&
 					    (sacked&TCPCB_SACKED_ACKED))
-						reord = min(fack_count, reord);
+						reord = min(int, fack_count, reord);
 				} else {
 					/* If it was in a hole, we detected reordering. */
 					if (fack_count < prior_fackets &&
 					    !(sacked&TCPCB_SACKED_ACKED))
-						reord = min(fack_count, reord);
+						reord = min(int, fack_count, reord);
 				}
 
 				/* Nothing to do; acked frame is about to be dropped. */
@@ -885,7 +885,7 @@
 					 */
 					if (!(sacked & TCPCB_RETRANS) &&
 					    fack_count < prior_fackets)
-						reord = min(fack_count, reord);
+						reord = min(int, fack_count, reord);
 
 					if (sacked & TCPCB_LOST) {
 						TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -901,7 +901,7 @@
 					tp->fackets_out = fack_count;
 			} else {
 				if (dup_sack && (sacked&TCPCB_RETRANS))
-					reord = min(fack_count, reord);
+					reord = min(int, fack_count, reord);
 			}
 
 			/* D-SACK. We can detect redundant retransmission
@@ -1019,7 +1019,7 @@
 	}
 	tcp_sync_left_out(tp);
 
-	tp->reordering = min(tp->reordering, sysctl_tcp_reordering);
+	tp->reordering = min(unsigned int, tp->reordering, sysctl_tcp_reordering);
 	tp->ca_state = TCP_CA_Loss;
 	tp->high_seq = tp->snd_nxt;
 	TCP_ECN_queue_cwr(tp);
@@ -1177,7 +1177,7 @@
 	 * recovery more?
 	 */
 	if (tp->packets_out <= tp->reordering &&
-	    tp->sacked_out >= max(tp->packets_out/2, sysctl_tcp_reordering) &&
+	    tp->sacked_out >= max(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
 	    !tcp_may_send_now(sk, tp)) {
 		/* We have nothing to send. This connection is limited
 		 * either by receiver window or by application.
@@ -1194,7 +1194,9 @@
  */
 static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend)
 {
-	int holes = min(max(tp->lost_out, 1), tp->packets_out);
+	u32 holes = min(unsigned int,
+			max(unsigned int, tp->lost_out, 1),
+			tp->packets_out);
 
 	if (tp->sacked_out + holes > tp->packets_out) {
 		tp->sacked_out = tp->packets_out - holes;
@@ -1289,7 +1291,7 @@
  */
 static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp)
 {
-	tp->snd_cwnd = min(tp->snd_cwnd,
+	tp->snd_cwnd = min(u32, tp->snd_cwnd,
 			   tcp_packets_in_flight(tp)+tcp_max_burst(tp));
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
@@ -1306,7 +1308,7 @@
 	if (decr && tp->snd_cwnd > tp->snd_ssthresh/2)
 		tp->snd_cwnd -= decr;
 
-	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+	tp->snd_cwnd = min(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -1338,13 +1340,15 @@
 static void tcp_undo_cwr(struct tcp_opt *tp, int undo)
 {
 	if (tp->prior_ssthresh) {
-		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
+		tp->snd_cwnd = max(unsigned int,
+				   tp->snd_cwnd, tp->snd_ssthresh<<1);
+
 		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
 			tp->snd_ssthresh = tp->prior_ssthresh;
 			TCP_ECN_withdraw_cwr(tp);
 		}
 	} else {
-		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
+		tp->snd_cwnd = max(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
 	}
 	tcp_moderate_cwnd(tp);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1446,7 +1450,7 @@
 
 static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
 {
-	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+	tp->snd_cwnd = min(u32, tp->snd_cwnd, tp->snd_ssthresh);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -1832,7 +1836,7 @@
 		 */
 	} else {
 		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
-				     min(tp->rto << tp->backoff, TCP_RTO_MAX));
+				     min(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
 	}
 }
 
@@ -2319,7 +2323,7 @@
 		tp->dsack = 1;
 		tp->duplicate_sack[0].start_seq = seq;
 		tp->duplicate_sack[0].end_seq = end_seq;
-		tp->eff_sacks = min(tp->num_sacks+1, 4-tp->tstamp_ok);
+		tp->eff_sacks = min(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
 	}
 }
 
@@ -2372,7 +2376,7 @@
 			 * Decrease num_sacks.
 			 */
 			tp->num_sacks--;
-			tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+			tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 			for(i=this_sack; i < tp->num_sacks; i++)
 				sp[i] = sp[i+1];
 			continue;
@@ -2434,7 +2438,7 @@
 	sp->start_seq = seq;
 	sp->end_seq = end_seq;
 	tp->num_sacks++;
-	tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+	tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -2471,7 +2475,7 @@
 	}
 	if (num_sacks != tp->num_sacks) {
 		tp->num_sacks = num_sacks;
-		tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+		tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
 	}
 }
 
@@ -2537,7 +2541,7 @@
 
 	if (tp->dsack) {
 		tp->dsack = 0;
-		tp->eff_sacks = min(tp->num_sacks, 4-tp->tstamp_ok);
+		tp->eff_sacks = min(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
 	}
 
 	/*  Queue data for delivery to the user.
@@ -2554,7 +2558,7 @@
 		    tp->ucopy.len &&
 		    sk->lock.users &&
 		    !tp->urg_data) {
-			int chunk = min(skb->len, tp->ucopy.len);
+			int chunk = min(unsigned int, skb->len, tp->ucopy.len);
 
 			__set_current_state(TASK_RUNNING);
 
@@ -2803,7 +2807,7 @@
 
 			if (offset < 0) BUG();
 			if (size > 0) {
-				size = min(copy, size);
+				size = min(int, copy, size);
 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
 					BUG();
 				TCP_SKB_CB(nskb)->end_seq += size;
@@ -2882,7 +2886,7 @@
 	if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
 		tcp_clamp_window(sk, tp);
 	else if (tcp_memory_pressure)
-		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4*tp->advmss);
+		tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, 4*tp->advmss);
 
 	tcp_collapse_ofo_queue(sk);
 	tcp_collapse(sk, sk->receive_queue.next,
@@ -2937,7 +2941,7 @@
 	if (tp->ca_state == TCP_CA_Open &&
 	    sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
 		/* Limited by application or receiver window. */
-		u32 win_used = max(tp->snd_cwnd_used, 2);
+		u32 win_used = max(u32, tp->snd_cwnd_used, 2);
 		if (win_used < tp->snd_cwnd) {
 			tp->snd_ssthresh = tcp_current_ssthresh(tp);
 			tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1;
@@ -2963,10 +2967,10 @@
 		int sndmem, demanded;
 
 		sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
-		demanded = max(tp->snd_cwnd, tp->reordering+1);
+		demanded = max(unsigned int, tp->snd_cwnd, tp->reordering+1);
 		sndmem *= 2*demanded;
 		if (sndmem > sk->sndbuf)
-			sk->sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+			sk->sndbuf = min(int, sndmem, sysctl_tcp_wmem[2]);
 		tp->snd_cwnd_stamp = tcp_time_stamp;
 	}
 
@@ -3516,7 +3520,7 @@
 
 		if (tp->wscale_ok == 0) {
 			tp->snd_wscale = tp->rcv_wscale = 0;
-			tp->window_clamp = min(tp->window_clamp,65535);
+			tp->window_clamp = min(u32, tp->window_clamp, 65535);
 		}
 
 		if (tp->saw_tstamp) {

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)