Page MenuHomeFreeBSD

D34205.diff
No OneTemporary

D34205.diff

diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -330,22 +330,9 @@
/* Weird delayed ack mode */
static int32_t rack_use_imac_dack = 0;
/* Rack specific counters */
-counter_u64_t rack_badfr;
-counter_u64_t rack_badfr_bytes;
-counter_u64_t rack_rtm_prr_retran;
-counter_u64_t rack_rtm_prr_newdata;
-counter_u64_t rack_timestamp_mismatch;
-counter_u64_t rack_reorder_seen;
-counter_u64_t rack_paced_segments;
-counter_u64_t rack_unpaced_segments;
-counter_u64_t rack_calc_zero;
-counter_u64_t rack_calc_nonzero;
counter_u64_t rack_saw_enobuf;
counter_u64_t rack_saw_enobuf_hw;
counter_u64_t rack_saw_enetunreach;
-counter_u64_t rack_per_timer_hole;
-counter_u64_t rack_large_ackcmp;
-counter_u64_t rack_small_ackcmp;
counter_u64_t rack_persists_sends;
counter_u64_t rack_persists_acks;
counter_u64_t rack_persists_loss;
@@ -358,10 +345,7 @@
counter_u64_t rack_tlp_newdata;
counter_u64_t rack_tlp_retran;
counter_u64_t rack_tlp_retran_bytes;
-counter_u64_t rack_tlp_retran_fail;
counter_u64_t rack_to_tot;
-counter_u64_t rack_to_arm_rack;
-counter_u64_t rack_to_arm_tlp;
counter_u64_t rack_hot_alloc;
counter_u64_t rack_to_alloc;
counter_u64_t rack_to_alloc_hard;
@@ -370,8 +354,6 @@
counter_u64_t rack_alloc_limited_conns;
counter_u64_t rack_split_limited;
-#define MAX_NUM_OF_CNTS 13
-counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS];
counter_u64_t rack_multi_single_eq;
counter_u64_t rack_proc_non_comp_ack;
@@ -396,22 +378,12 @@
counter_u64_t rack_move_none;
counter_u64_t rack_move_some;
-counter_u64_t rack_used_tlpmethod;
-counter_u64_t rack_used_tlpmethod2;
-counter_u64_t rack_enter_tlp_calc;
counter_u64_t rack_input_idle_reduces;
counter_u64_t rack_collapsed_win;
-counter_u64_t rack_tlp_does_nada;
counter_u64_t rack_try_scwnd;
counter_u64_t rack_hw_pace_init_fail;
counter_u64_t rack_hw_pace_lost;
-counter_u64_t rack_sbsndptr_right;
-counter_u64_t rack_sbsndptr_wrong;
-/* Temp CPU counters */
-counter_u64_t rack_find_high;
-
-counter_u64_t rack_progress_drops;
counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
@@ -737,7 +709,6 @@
{
uint32_t stat;
int32_t error;
- int i;
error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
if (error || req->newptr == NULL)
@@ -750,30 +721,14 @@
#ifdef INVARIANTS
printf("Clearing RACK counters\n");
#endif
- counter_u64_zero(rack_badfr);
- counter_u64_zero(rack_badfr_bytes);
- counter_u64_zero(rack_rtm_prr_retran);
- counter_u64_zero(rack_rtm_prr_newdata);
- counter_u64_zero(rack_timestamp_mismatch);
- counter_u64_zero(rack_reorder_seen);
counter_u64_zero(rack_tlp_tot);
counter_u64_zero(rack_tlp_newdata);
counter_u64_zero(rack_tlp_retran);
counter_u64_zero(rack_tlp_retran_bytes);
- counter_u64_zero(rack_tlp_retran_fail);
counter_u64_zero(rack_to_tot);
- counter_u64_zero(rack_to_arm_rack);
- counter_u64_zero(rack_to_arm_tlp);
- counter_u64_zero(rack_paced_segments);
- counter_u64_zero(rack_calc_zero);
- counter_u64_zero(rack_calc_nonzero);
- counter_u64_zero(rack_unpaced_segments);
counter_u64_zero(rack_saw_enobuf);
counter_u64_zero(rack_saw_enobuf_hw);
counter_u64_zero(rack_saw_enetunreach);
- counter_u64_zero(rack_per_timer_hole);
- counter_u64_zero(rack_large_ackcmp);
- counter_u64_zero(rack_small_ackcmp);
counter_u64_zero(rack_persists_sends);
counter_u64_zero(rack_persists_acks);
counter_u64_zero(rack_persists_loss);
@@ -789,8 +744,6 @@
counter_u64_zero(rack_extended_rfo);
counter_u64_zero(rack_hw_pace_init_fail);
counter_u64_zero(rack_hw_pace_lost);
- counter_u64_zero(rack_sbsndptr_wrong);
- counter_u64_zero(rack_sbsndptr_right);
counter_u64_zero(rack_non_fto_send);
counter_u64_zero(rack_nfto_resend);
counter_u64_zero(rack_sack_proc_short);
@@ -799,12 +752,8 @@
counter_u64_zero(rack_to_alloc_limited);
counter_u64_zero(rack_alloc_limited_conns);
counter_u64_zero(rack_split_limited);
- for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
- counter_u64_zero(rack_proc_comp_ack[i]);
- }
counter_u64_zero(rack_multi_single_eq);
counter_u64_zero(rack_proc_non_comp_ack);
- counter_u64_zero(rack_find_high);
counter_u64_zero(rack_sack_attacks_detected);
counter_u64_zero(rack_sack_attacks_reversed);
counter_u64_zero(rack_sack_used_next_merge);
@@ -816,11 +765,6 @@
counter_u64_zero(rack_sack_total);
counter_u64_zero(rack_move_none);
counter_u64_zero(rack_move_some);
- counter_u64_zero(rack_used_tlpmethod);
- counter_u64_zero(rack_used_tlpmethod2);
- counter_u64_zero(rack_enter_tlp_calc);
- counter_u64_zero(rack_progress_drops);
- counter_u64_zero(rack_tlp_does_nada);
counter_u64_zero(rack_try_scwnd);
counter_u64_zero(rack_collapsed_win);
}
@@ -831,7 +775,6 @@
static void
rack_init_sysctls(void)
{
- int i;
struct sysctl_oid *rack_counters;
struct sysctl_oid *rack_attack;
struct sysctl_oid *rack_pacing;
@@ -1596,46 +1539,6 @@
SYSCTL_CHILDREN(rack_counters),
OID_AUTO, "hwpace_lost", CTLFLAG_RD,
&rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
- rack_badfr = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "badfr", CTLFLAG_RD,
- &rack_badfr, "Total number of bad FRs");
- rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "badfr_bytes", CTLFLAG_RD,
- &rack_badfr_bytes, "Total number of bad FRs");
- rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "prrsndret", CTLFLAG_RD,
- &rack_rtm_prr_retran,
- "Total number of prr based retransmits");
- rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "prrsndnew", CTLFLAG_RD,
- &rack_rtm_prr_newdata,
- "Total number of prr based new transmits");
- rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "tsnf", CTLFLAG_RD,
- &rack_timestamp_mismatch,
- "Total number of timestamps that we could not find the reported ts");
- rack_find_high = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "findhigh", CTLFLAG_RD,
- &rack_find_high,
- "Total number of FIN causing find-high");
- rack_reorder_seen = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "reordering", CTLFLAG_RD,
- &rack_reorder_seen,
- "Total number of times we added delay due to reordering");
rack_tlp_tot = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
@@ -1660,54 +1563,12 @@
OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
&rack_tlp_retran_bytes,
"Total bytes of tail loss probe sending retransmitted data");
- rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
- &rack_tlp_retran_fail,
- "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
rack_to_tot = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
OID_AUTO, "rack_to_tot", CTLFLAG_RD,
&rack_to_tot,
"Total number of times the rack to expired");
- rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "arm_rack", CTLFLAG_RD,
- &rack_to_arm_rack,
- "Total number of times the rack timer armed");
- rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "arm_tlp", CTLFLAG_RD,
- &rack_to_arm_tlp,
- "Total number of times the tlp timer armed");
- rack_calc_zero = counter_u64_alloc(M_WAITOK);
- rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "calc_zero", CTLFLAG_RD,
- &rack_calc_zero,
- "Total number of times pacing time worked out to zero");
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "calc_nonzero", CTLFLAG_RD,
- &rack_calc_nonzero,
- "Total number of times pacing time worked out to non-zero");
- rack_paced_segments = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "paced", CTLFLAG_RD,
- &rack_paced_segments,
- "Total number of times a segment send caused hptsi");
- rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "unpaced", CTLFLAG_RD,
- &rack_unpaced_segments,
- "Total number of times a segment did not cause hptsi");
rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
@@ -1768,23 +1629,6 @@
OID_AUTO, "split_limited", CTLFLAG_RD,
&rack_split_limited,
"Split allocations dropped due to limit");
-
- for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
- char name[32];
- sprintf(name, "cmp_ack_cnt_%d", i);
- rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, name, CTLFLAG_RD,
- &rack_proc_comp_ack[i],
- "Number of compressed acks we processed");
- }
- rack_large_ackcmp = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD,
- &rack_large_ackcmp,
- "Number of TCP connections with large mbuf's for compressed acks");
rack_persists_sends = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
@@ -1809,12 +1653,6 @@
OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
&rack_persists_lost_ends,
"Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
- rack_small_ackcmp = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD,
- &rack_small_ackcmp,
- "Number of TCP connections with small mbuf's for compressed acks");
#ifdef INVARIANTS
rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
@@ -1855,24 +1693,6 @@
OID_AUTO, "sack_short", CTLFLAG_RD,
&rack_sack_proc_short,
"Total times we took shortcut for sack processing");
- rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
- &rack_enter_tlp_calc,
- "Total times we called calc-tlp");
- rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
- &rack_used_tlpmethod,
- "Total number of runt sacks");
- rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
- &rack_used_tlpmethod2,
- "Total number of times we hit TLP method 2");
rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_attack),
@@ -1885,12 +1705,6 @@
OID_AUTO, "ofsplit", CTLFLAG_RD,
&rack_sack_splits,
"Total number of times we did the old fashion tree split");
- rack_progress_drops = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "prog_drops", CTLFLAG_RD,
- &rack_progress_drops,
- "Total number of progress drops");
rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
@@ -1903,37 +1717,12 @@
OID_AUTO, "collapsed_win", CTLFLAG_RD,
&rack_collapsed_win,
"Total number of collapsed windows");
- rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "tlp_nada", CTLFLAG_RD,
- &rack_tlp_does_nada,
- "Total number of nada tlp calls");
rack_try_scwnd = counter_u64_alloc(M_WAITOK);
SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_counters),
OID_AUTO, "tried_scwnd", CTLFLAG_RD,
&rack_try_scwnd,
"Total number of scwnd attempts");
-
- rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "timer_hole", CTLFLAG_RD,
- &rack_per_timer_hole,
- "Total persists start in timer hole");
-
- rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "sndptr_wrong", CTLFLAG_RD,
- &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorrect");
- rack_sbsndptr_right = counter_u64_alloc(M_WAITOK);
- SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
- SYSCTL_CHILDREN(rack_counters),
- OID_AUTO, "sndptr_right", CTLFLAG_RD,
- &rack_sbsndptr_right, "Total number of times the saved sbsndptr was correct");
-
COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
OID_AUTO, "outsize", CTLFLAG_RD,
@@ -2894,8 +2683,6 @@
static void
rack_counter_destroy(void)
{
- int i;
-
counter_u64_free(rack_fto_send);
counter_u64_free(rack_fto_rsm_send);
counter_u64_free(rack_nfto_resend);
@@ -2912,25 +2699,11 @@
counter_u64_free(rack_sack_attacks_reversed);
counter_u64_free(rack_sack_used_next_merge);
counter_u64_free(rack_sack_used_prev_merge);
- counter_u64_free(rack_badfr);
- counter_u64_free(rack_badfr_bytes);
- counter_u64_free(rack_rtm_prr_retran);
- counter_u64_free(rack_rtm_prr_newdata);
- counter_u64_free(rack_timestamp_mismatch);
- counter_u64_free(rack_find_high);
- counter_u64_free(rack_reorder_seen);
counter_u64_free(rack_tlp_tot);
counter_u64_free(rack_tlp_newdata);
counter_u64_free(rack_tlp_retran);
counter_u64_free(rack_tlp_retran_bytes);
- counter_u64_free(rack_tlp_retran_fail);
counter_u64_free(rack_to_tot);
- counter_u64_free(rack_to_arm_rack);
- counter_u64_free(rack_to_arm_tlp);
- counter_u64_free(rack_calc_zero);
- counter_u64_free(rack_calc_nonzero);
- counter_u64_free(rack_paced_segments);
- counter_u64_free(rack_unpaced_segments);
counter_u64_free(rack_saw_enobuf);
counter_u64_free(rack_saw_enobuf_hw);
counter_u64_free(rack_saw_enetunreach);
@@ -2941,27 +2714,16 @@
counter_u64_free(rack_to_alloc_limited);
counter_u64_free(rack_alloc_limited_conns);
counter_u64_free(rack_split_limited);
- for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
- counter_u64_free(rack_proc_comp_ack[i]);
- }
counter_u64_free(rack_multi_single_eq);
counter_u64_free(rack_proc_non_comp_ack);
counter_u64_free(rack_sack_proc_all);
counter_u64_free(rack_sack_proc_restart);
counter_u64_free(rack_sack_proc_short);
- counter_u64_free(rack_enter_tlp_calc);
- counter_u64_free(rack_used_tlpmethod);
- counter_u64_free(rack_used_tlpmethod2);
counter_u64_free(rack_sack_skipped_acked);
counter_u64_free(rack_sack_splits);
- counter_u64_free(rack_progress_drops);
counter_u64_free(rack_input_idle_reduces);
counter_u64_free(rack_collapsed_win);
- counter_u64_free(rack_tlp_does_nada);
counter_u64_free(rack_try_scwnd);
- counter_u64_free(rack_per_timer_hole);
- counter_u64_free(rack_large_ackcmp);
- counter_u64_free(rack_small_ackcmp);
counter_u64_free(rack_persists_sends);
counter_u64_free(rack_persists_acks);
counter_u64_free(rack_persists_loss);
@@ -3085,8 +2847,6 @@
}
if (rsm == rack->r_ctl.rc_resend)
rack->r_ctl.rc_resend = NULL;
- if (rsm == rack->r_ctl.rc_rsm_at_retran)
- rack->r_ctl.rc_rsm_at_retran = NULL;
if (rsm == rack->r_ctl.rc_end_appl)
rack->r_ctl.rc_end_appl = NULL;
if (rack->r_ctl.rc_tlpsend == rsm)
@@ -5126,7 +4886,6 @@
* the highest seq not acked. In theory when this is called it
* should be the last segment (which it was not).
*/
- counter_u64_add(rack_find_high, 1);
prsm = rsm;
RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
@@ -5240,7 +4999,6 @@
/* Get the previous sent packet, if any */
segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
- counter_u64_add(rack_enter_tlp_calc, 1);
len = rsm->r_end - rsm->r_start;
if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
/* Exactly like the ID */
@@ -5291,7 +5049,6 @@
/*
* Compensate for delayed-ack with the d-ack time.
*/
- counter_u64_add(rack_used_tlpmethod, 1);
alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
if (alt_thresh > thresh)
thresh = alt_thresh;
@@ -5366,15 +5123,6 @@
return (NULL);
}
/* Ok if we reach here we are over-due and this guy can be sent */
- if (IN_RECOVERY(tp->t_flags) == 0) {
- /*
- * For the one that enters us into recovery record undo
- * info.
- */
- rack->r_ctl.rc_rsm_start = rsm->r_start;
- rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
- rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
- }
rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
return (rsm);
}
@@ -5966,7 +5714,6 @@
*/
slot = hpts_timeout;
}
- rack->r_ctl.last_pacing_time = slot;
/**
* Turn off all the flags for queuing by default. The
* flags have important meanings to what happens when
@@ -6409,7 +6156,6 @@
}
}
if (rsm == NULL) {
- counter_u64_add(rack_tlp_does_nada, 1);
#ifdef TCP_BLACKBOX
tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
#endif
@@ -6430,7 +6176,6 @@
/* None? if so send the first */
rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
if (rsm == NULL) {
- counter_u64_add(rack_tlp_does_nada, 1);
#ifdef TCP_BLACKBOX
tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
#endif
@@ -6450,7 +6195,6 @@
* No memory to split, we will just exit and punt
* off to the RXT timer.
*/
- counter_u64_add(rack_tlp_does_nada, 1);
goto out;
}
rack_clone_rsm(rack, nrsm, rsm,
@@ -7502,7 +7246,6 @@
/* We don't log zero window probes */
return;
}
- rack->r_ctl.rc_time_last_sent = cts;
if (IN_FASTRECOVERY(tp->t_flags)) {
rack->r_ctl.rc_prr_out += len;
}
@@ -7571,9 +7314,7 @@
__func__, rack, s_moff, s_mb, rsm->soff));
}
rsm->m = lm;
- counter_u64_add(rack_sbsndptr_wrong, 1);
- } else
- counter_u64_add(rack_sbsndptr_right, 1);
+ }
rsm->orig_m_len = rsm->m->m_len;
} else
rsm->orig_m_len = 0;
@@ -7953,12 +7694,6 @@
old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
us_rtt, us_cts);
- if (rack->r_ctl.last_pacing_time &&
- rack->rc_gp_dyn_mul &&
- (rack->r_ctl.last_pacing_time > us_rtt))
- rack->pacing_longer_than_rtt = 1;
- else
- rack->pacing_longer_than_rtt = 0;
if (old_rtt > us_rtt) {
/* We just hit a new lower rtt time */
rack_log_rtt_shrinks(rack, us_cts, old_rtt,
@@ -8095,9 +7830,6 @@
(!IN_FASTRECOVERY(tp->t_flags))) {
/* Segment was a TLP and our retrans matched */
if (rack->r_ctl.rc_tlp_cwnd_reduce) {
- rack->r_ctl.rc_rsm_start = tp->snd_max;
- rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
- rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
}
}
@@ -8539,7 +8271,6 @@
changed += (nrsm->r_end - nrsm->r_start);
rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
if (nrsm->r_flags & RACK_SACK_PASSED) {
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = cts;
}
/*
@@ -8703,7 +8434,6 @@
/* Is Reordering occuring? */
if (rsm->r_flags & RACK_SACK_PASSED) {
rsm->r_flags &= ~RACK_SACK_PASSED;
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = cts;
}
if (rack->app_limited_needs_set)
@@ -8827,7 +8557,6 @@
changed += (nrsm->r_end - nrsm->r_start);
rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
if (nrsm->r_flags & RACK_SACK_PASSED) {
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = cts;
}
rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
@@ -8924,7 +8653,6 @@
/* Is Reordering occuring? */
if (rsm->r_flags & RACK_SACK_PASSED) {
rsm->r_flags &= ~RACK_SACK_PASSED;
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = cts;
}
if (rack->app_limited_needs_set)
@@ -9265,7 +8993,6 @@
* reordering.
*/
rsm->r_flags &= ~RACK_SACK_PASSED;
- counter_u64_add(rack_reorder_seen, 1);
rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
rsm->r_flags |= RACK_ACKED;
rack->r_ctl.rc_reorder_ts = cts;
@@ -9408,7 +9135,6 @@
rack->r_ent_rec_ns = 0;
orig_cwnd = tp->snd_cwnd;
- tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec;
tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
tp->snd_recover = tp->snd_una;
rack_log_to_prr(rack, 14, orig_cwnd);
@@ -9955,9 +9681,6 @@
if ((!IN_FASTRECOVERY(tp->t_flags)) &&
rsm) {
/* Enter recovery */
- rack->r_ctl.rc_rsm_start = rsm->r_start;
- rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
- rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
entered_recovery = 1;
rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
/*
@@ -10331,7 +10054,6 @@
* less than and we have not closed our window.
*/
if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
}
return (0);
@@ -12736,8 +12458,6 @@
rack->r_ctl.rc_min_to = rack_min_to;
microuptime(&rack->r_ctl.act_rcv_time);
rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
- rack->r_running_late = 0;
- rack->r_running_early = 0;
rack->rc_init_win = rack_default_init_window;
rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
if (rack_hw_up_only)
@@ -12859,9 +12579,9 @@
tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
if (rack_do_hystart) {
tp->ccv->flags |= CCF_HYSTART_ALLOWED;
- if (rack_do_hystart > 1)
+ if (rack_do_hystart > 1)
tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND;
- if (rack_do_hystart > 2)
+ if (rack_do_hystart > 2)
tp->ccv->flags |= CCF_HYSTART_CONS_SSTH;
}
if (rack_def_profile)
@@ -12953,8 +12673,6 @@
static void
rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
{
- int ack_cmp = 0;
-
if (tp->t_fb_ptr) {
struct tcp_rack *rack;
struct rack_sendmap *rsm, *nrsm;
@@ -12983,16 +12701,6 @@
m_freem(m);
m = save;
}
- if ((tp->t_inpcb) &&
- (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP))
- ack_cmp = 1;
- if (ack_cmp) {
- /* Total if we used large or small (if ack-cmp was used). */
- if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS)
- counter_u64_add(rack_large_ackcmp, 1);
- else
- counter_u64_add(rack_small_ackcmp, 1);
- }
}
tp->t_flags &= ~TF_FORCEDATA;
#ifdef NETFLIX_SHARED_CWND
@@ -13545,7 +13253,6 @@
int nsegs = 0;
int under_pacing = 1;
int recovery = 0;
- int idx;
#ifdef TCP_ACCOUNTING
sched_pin();
#endif
@@ -13589,10 +13296,6 @@
KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
cnt = m->m_len / sizeof(struct tcp_ackent);
- idx = cnt / 5;
- if (idx >= MAX_NUM_OF_CNTS)
- idx = MAX_NUM_OF_CNTS - 1;
- counter_u64_add(rack_proc_comp_ack[idx], 1);
counter_u64_add(rack_multi_single_eq, cnt);
high_seq = tp->snd_una;
the_win = tp->snd_wnd;
@@ -13600,6 +13303,7 @@
win_upd_ack = tp->snd_wl2;
cts = tcp_tv_to_usectick(tv);
ms_cts = tcp_tv_to_mssectick(tv);
+ rack->r_ctl.rc_rcvtime = cts;
segsiz = ctf_fixed_maxseg(tp);
if ((rack->rc_gp_dyn_mul) &&
(rack->use_fixed_rate == 0) &&
@@ -13615,6 +13319,8 @@
ae = ((mtod(m, struct tcp_ackent *)) + i);
/* Setup the window */
tiwin = ae->win << tp->snd_scale;
+ if (tiwin > rack->r_ctl.rc_high_rwnd)
+ rack->r_ctl.rc_high_rwnd = tiwin;
/* figure out the type of ack */
if (SEQ_LT(ae->ack, high_seq)) {
/* Case B*/
@@ -13704,7 +13410,6 @@
* or it could be a keep-alive or persists
*/
if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
- counter_u64_add(rack_reorder_seen, 1);
rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
}
} else if (ae->ack_val_set == ACK_DUPACK) {
@@ -13775,7 +13480,7 @@
tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
0, &log, false, NULL, NULL, 0, &tv);
}
- /*
+ /*
* The draft (v3) calls for us to use SEQ_GEQ, but that
* causes issues when we are just going app limited. Lets
* instead use SEQ_GT <or> where its equal but more data
@@ -14659,7 +14364,7 @@
tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
0, &log, false, NULL, NULL, 0, &tv);
}
- /*
+ /*
* The draft (v3) calls for us to use SEQ_GEQ, but that
* causes issues when we are just going app limited. Lets
* instead use SEQ_GT <or> where its equal but more data
@@ -15355,10 +15060,6 @@
hw_boost_delay = rack_enobuf_hw_min;
slot += hw_boost_delay;
}
- if (slot)
- counter_u64_add(rack_calc_nonzero, 1);
- else
- counter_u64_add(rack_calc_zero, 1);
return (slot);
}
@@ -17105,9 +16806,6 @@
if ((!IN_FASTRECOVERY(tp->t_flags)) &&
((tp->t_flags & TF_WASFRECOVERY) == 0)) {
/* Enter recovery if not induced by a time-out */
- rack->r_ctl.rc_rsm_start = rsm->r_start;
- rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
- rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
}
#ifdef INVARIANTS
@@ -17130,7 +16828,6 @@
KMOD_TCPSTAT_INC(tcps_sack_rexmits);
KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
min(len, segsiz));
- counter_u64_add(rack_rtm_prr_retran, 1);
}
} else if (rack->r_ctl.rc_tlpsend) {
/* Tail loss probe */
@@ -17239,10 +16936,6 @@
flags &= ~TH_FIN;
}
}
-#ifdef INVARIANTS
- /* For debugging */
- rack->r_ctl.rc_rsm_at_retran = rsm;
-#endif
if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
((rsm->r_flags & RACK_HAS_FIN) == 0)) {
int ret;
@@ -17400,7 +17093,6 @@
}
if (len > 0) {
sub_from_prr = 1;
- counter_u64_add(rack_rtm_prr_newdata, 1);
}
}
if (len > segsiz) {
@@ -18040,12 +17732,6 @@
tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
}
}
- if (slot) {
- /* set the rack tcb into the slot N */
- counter_u64_add(rack_paced_segments, 1);
- } else if (tot_len_this_send) {
- counter_u64_add(rack_unpaced_segments, 1);
- }
/* Check if we need to go into persists or not */
if ((tp->snd_max == tp->snd_una) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
@@ -19310,7 +18996,6 @@
}
if (slot) {
/* set the rack tcb into the slot N */
- counter_u64_add(rack_paced_segments, 1);
if ((error == 0) &&
rack_use_rfo &&
((flags & (TH_SYN|TH_FIN)) == 0) &&
@@ -19358,8 +19043,6 @@
} else if (sendalot) {
int ret;
- if (len)
- counter_u64_add(rack_unpaced_segments, 1);
sack_rxmit = 0;
if ((error == 0) &&
rack_use_rfo &&
@@ -19413,8 +19096,6 @@
}
}
goto again;
- } else if (len) {
- counter_u64_add(rack_unpaced_segments, 1);
}
/* Assure when we leave that snd_nxt will point to top */
if (SEQ_GT(tp->snd_max, tp->snd_nxt))
@@ -20245,9 +19926,9 @@
{
if (optval) {
tp->ccv->flags |= CCF_HYSTART_ALLOWED;
- if (rack_do_hystart > RACK_HYSTART_ON)
+ if (rack_do_hystart > RACK_HYSTART_ON)
tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND;
- if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
+ if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
tp->ccv->flags |= CCF_HYSTART_CONS_SSTH;
} else {
tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
diff --git a/sys/netinet/tcp_stacks/tcp_rack.h b/sys/netinet/tcp_stacks/tcp_rack.h
--- a/sys/netinet/tcp_stacks/tcp_rack.h
+++ b/sys/netinet/tcp_stacks/tcp_rack.h
@@ -158,23 +158,6 @@
#define RACK_LOG_TYPE_ALLOC 0x04
#define RACK_LOG_TYPE_FREE 0x05
-struct rack_log {
- union {
- struct rack_sendmap *rsm; /* For alloc/free */
- uint64_t sb_acc;/* For out/ack or t-o */
- };
- uint32_t th_seq;
- uint32_t th_ack;
- uint32_t snd_una;
- uint32_t snd_nxt; /* th_win for TYPE_ACK */
- uint32_t snd_max;
- uint32_t blk_start[4];
- uint32_t blk_end[4];
- uint8_t type;
- uint8_t n_sackblks;
- uint16_t len; /* Timeout T3=1, TLP=2, RACK=3 */
-};
-
/*
* Magic numbers for logging timeout events if the
* logging is enabled.
@@ -373,8 +356,6 @@
uint64_t last_hw_bw_req;
uint64_t crte_prev_rate;
uint64_t bw_rate_cap;
- uint32_t rc_time_last_sent; /* Time we last sent some data and
- * logged it Lock(a). */
uint32_t rc_reorder_ts; /* Last time we saw reordering Lock(a) */
uint32_t rc_tlp_new_data; /* we need to send new-data on a TLP
@@ -402,11 +383,6 @@
uint32_t rc_rack_tmit_time; /* Rack transmit time Lock(a) */
uint32_t rc_holes_rxt; /* Tot retraned from scoreboard Lock(a) */
- /* Variables to track bad retransmits and recover */
- uint32_t rc_rsm_start; /* RSM seq number we retransmitted Lock(a) */
- uint32_t rc_cwnd_at; /* cwnd at the retransmit Lock(a) */
-
- uint32_t rc_ssthresh_at;/* ssthresh at the retransmit Lock(a) */
uint32_t rc_num_maps_alloced; /* Number of map blocks (sacks) we
* have allocated */
uint32_t rc_rcvtime; /* When we last received data */
@@ -418,16 +394,12 @@
struct rack_sendmap *rc_sacklast; /* sack remembered place
* Lock(a) */
- struct rack_sendmap *rc_rsm_at_retran; /* Debug variable kept for
- * cache line alignment
- * Lock(a) */
struct rack_sendmap *rc_first_appl; /* Pointer to first app limited */
struct rack_sendmap *rc_end_appl; /* Pointer to last app limited */
/* Cache line split 0x100 */
struct sack_filter rack_sf;
/* Cache line split 0x140 */
/* Flags for various things */
- uint32_t last_pacing_time;
uint32_t rc_pace_max_segs;
uint32_t rc_pace_min_segs;
uint32_t rc_app_limited_cnt;
@@ -518,7 +490,6 @@
uint8_t rc_tlp_cwnd_reduce; /* Socket option value Lock(a) */
uint8_t rc_prr_sendalot;/* Socket option value Lock(a) */
uint8_t rc_rate_sample_method;
- uint8_t rc_gp_hist_idx;
};
#endif
@@ -529,8 +500,8 @@
#define RACK_HYSTART_ON 1 /* hystart++ on */
#define RACK_HYSTART_ON_W_SC 2 /* hystart++ on +Slam Cwnd */
#define RACK_HYSTART_ON_W_SC_C 3 /* hystart++ on,
- * Conservative ssthresh and
- * +Slam cwnd
+ * Conservative ssthresh and
+ * +Slam cwnd
*/
#ifdef _KERNEL
@@ -605,8 +576,8 @@
rc_dragged_bottom: 1,
rc_dack_mode : 1, /* Mac O/S emulation of d-ack */
rc_dack_toggle : 1, /* For Mac O/S emulation of d-ack */
- pacing_longer_than_rtt : 1,
- rc_gp_filled : 1;
+ rc_gp_filled : 1,
+ rc_is_spare : 1;
uint8_t r_state; /* Current rack state Lock(a) */
uint8_t rc_tmr_stopped : 7,
t_timers_stopped : 1;
@@ -642,13 +613,11 @@
sack_attack_disable : 1,
do_detection : 1,
rc_force_max_seg : 1;
- uint8_t rack_cwnd_limited : 1,
- r_early : 1,
+ uint8_t r_early : 1,
r_late : 1,
- r_running_early : 1,
- r_running_late : 1,
r_wanted_output: 1,
- r_rr_config : 2;
+ r_rr_config : 2,
+ rc_avail_bit : 3;
uint16_t rc_init_win : 8,
rc_gp_rtt_set : 1,
rc_gp_dyn_mul : 1,

File Metadata

Mime Type
text/plain
Expires
Sun, Sep 29, 3:28 PM (22 h, 11 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13137842
Default Alt Text
D34205.diff (31 KB)

Event Timeline