Line data Source code
1 : /*-
2 : * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 : * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 : * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions are met:
8 : *
9 : * a) Redistributions of source code must retain the above copyright notice,
10 : * this list of conditions and the following disclaimer.
11 : *
12 : * b) Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in
14 : * the documentation and/or other materials provided with the distribution.
15 : *
16 : * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 : * contributors may be used to endorse or promote products derived
18 : * from this software without specific prior written permission.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 : * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 : * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 : * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 : * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 : * THE POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : #ifdef __FreeBSD__
34 : #include <sys/cdefs.h>
35 : __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 280440 2015-03-24 15:05:36Z tuexen $");
36 : #endif
37 :
38 : #include <netinet/sctp_os.h>
39 : #include <netinet/sctp_var.h>
40 : #include <netinet/sctp_sysctl.h>
41 : #include <netinet/sctp_pcb.h>
42 : #include <netinet/sctp_header.h>
43 : #include <netinet/sctputil.h>
44 : #include <netinet/sctp_output.h>
45 : #include <netinet/sctp_input.h>
46 : #include <netinet/sctp_indata.h>
47 : #include <netinet/sctp_uio.h>
48 : #include <netinet/sctp_timer.h>
49 :
50 :
51 : /*
52 : * NOTES: On the outbound side of things I need to check the sack timer to
53 : * see if I should generate a sack into the chunk queue (if I have data to
54 : * send that is and will be sending it .. for bundling.
55 : *
56 : * The callback in sctp_usrreq.c will get called when the socket is read from.
57 : * This will cause sctp_service_queues() to get called on the top entry in
58 : * the list.
59 : */
60 :
61 : void
62 0 : sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 : {
64 0 : asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 0 : }
66 :
67 : /* Calculate what the rwnd would be */
68 : uint32_t
69 0 : sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 : {
71 0 : uint32_t calc = 0;
72 :
73 : /*
74 : * This is really set wrong with respect to a 1-2-m socket. Since
75 : * the sb_cc is the count that everyone as put up. When we re-write
76 : * sctp_soreceive then we will fix this so that ONLY this
77 : * associations data is taken into account.
78 : */
79 0 : if (stcb->sctp_socket == NULL)
80 0 : return (calc);
81 :
82 0 : if (stcb->asoc.sb_cc == 0 &&
83 0 : asoc->size_on_reasm_queue == 0 &&
84 0 : asoc->size_on_all_streams == 0) {
85 : /* Full rwnd granted */
86 0 : calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 0 : return (calc);
88 : }
89 : /* get actual space */
90 0 : calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 :
92 : /*
93 : * take out what has NOT been put on socket queue and we yet hold
94 : * for putting up.
95 : */
96 0 : calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
97 : asoc->cnt_on_reasm_queue * MSIZE));
98 0 : calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
99 : asoc->cnt_on_all_streams * MSIZE));
100 :
101 0 : if (calc == 0) {
102 : /* out of space */
103 0 : return (calc);
104 : }
105 :
106 : /* what is the overhead of all these rwnd's */
107 0 : calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
108 : /* If the window gets too small due to ctrl-stuff, reduce it
109 : * to 1, even it is 0. SWS engaged
110 : */
111 0 : if (calc < stcb->asoc.my_rwnd_control_len) {
112 0 : calc = 1;
113 : }
114 0 : return (calc);
115 : }
116 :
117 :
118 :
119 : /*
120 : * Build out our readq entry based on the incoming packet.
121 : */
122 : struct sctp_queued_to_read *
123 0 : sctp_build_readq_entry(struct sctp_tcb *stcb,
124 : struct sctp_nets *net,
125 : uint32_t tsn, uint32_t ppid,
126 : uint32_t context, uint16_t stream_no,
127 : uint16_t stream_seq, uint8_t flags,
128 : struct mbuf *dm)
129 : {
130 0 : struct sctp_queued_to_read *read_queue_e = NULL;
131 :
132 0 : sctp_alloc_a_readq(stcb, read_queue_e);
133 0 : if (read_queue_e == NULL) {
134 0 : goto failed_build;
135 : }
136 0 : read_queue_e->sinfo_stream = stream_no;
137 0 : read_queue_e->sinfo_ssn = stream_seq;
138 0 : read_queue_e->sinfo_flags = (flags << 8);
139 0 : read_queue_e->sinfo_ppid = ppid;
140 0 : read_queue_e->sinfo_context = context;
141 0 : read_queue_e->sinfo_timetolive = 0;
142 0 : read_queue_e->sinfo_tsn = tsn;
143 0 : read_queue_e->sinfo_cumtsn = tsn;
144 0 : read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 0 : read_queue_e->whoFrom = net;
146 0 : read_queue_e->length = 0;
147 0 : atomic_add_int(&net->ref_count, 1);
148 0 : read_queue_e->data = dm;
149 0 : read_queue_e->spec_flags = 0;
150 0 : read_queue_e->tail_mbuf = NULL;
151 0 : read_queue_e->aux_data = NULL;
152 0 : read_queue_e->stcb = stcb;
153 0 : read_queue_e->port_from = stcb->rport;
154 0 : read_queue_e->do_not_ref_stcb = 0;
155 0 : read_queue_e->end_added = 0;
156 0 : read_queue_e->some_taken = 0;
157 0 : read_queue_e->pdapi_aborted = 0;
158 : failed_build:
159 0 : return (read_queue_e);
160 : }
161 :
162 :
163 : /*
164 : * Build out our readq entry based on the incoming packet.
165 : */
166 : static struct sctp_queued_to_read *
167 0 : sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168 : struct sctp_tmit_chunk *chk)
169 : {
170 0 : struct sctp_queued_to_read *read_queue_e = NULL;
171 :
172 0 : sctp_alloc_a_readq(stcb, read_queue_e);
173 0 : if (read_queue_e == NULL) {
174 0 : goto failed_build;
175 : }
176 0 : read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 0 : read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 0 : read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 0 : read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 0 : read_queue_e->sinfo_context = stcb->asoc.context;
181 0 : read_queue_e->sinfo_timetolive = 0;
182 0 : read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 0 : read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 0 : read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 0 : read_queue_e->whoFrom = chk->whoTo;
186 0 : read_queue_e->aux_data = NULL;
187 0 : read_queue_e->length = 0;
188 0 : atomic_add_int(&chk->whoTo->ref_count, 1);
189 0 : read_queue_e->data = chk->data;
190 0 : read_queue_e->tail_mbuf = NULL;
191 0 : read_queue_e->stcb = stcb;
192 0 : read_queue_e->port_from = stcb->rport;
193 0 : read_queue_e->spec_flags = 0;
194 0 : read_queue_e->do_not_ref_stcb = 0;
195 0 : read_queue_e->end_added = 0;
196 0 : read_queue_e->some_taken = 0;
197 0 : read_queue_e->pdapi_aborted = 0;
198 : failed_build:
199 0 : return (read_queue_e);
200 : }
201 :
202 :
203 : struct mbuf *
204 0 : sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205 : {
206 : struct sctp_extrcvinfo *seinfo;
207 : struct sctp_sndrcvinfo *outinfo;
208 : struct sctp_rcvinfo *rcvinfo;
209 : struct sctp_nxtinfo *nxtinfo;
210 : #if defined(__Userspace_os_Windows)
211 : WSACMSGHDR *cmh;
212 : #else
213 : struct cmsghdr *cmh;
214 : #endif
215 : struct mbuf *ret;
216 : int len;
217 : int use_extended;
218 : int provide_nxt;
219 :
220 0 : if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
221 0 : sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
222 0 : sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
223 : /* user does not want any ancillary data */
224 0 : return (NULL);
225 : }
226 :
227 0 : len = 0;
228 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
229 0 : len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
230 : }
231 0 : seinfo = (struct sctp_extrcvinfo *)sinfo;
232 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
233 0 : (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
234 0 : provide_nxt = 1;
235 0 : len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
236 : } else {
237 0 : provide_nxt = 0;
238 : }
239 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
240 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
241 0 : use_extended = 1;
242 0 : len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
243 : } else {
244 0 : use_extended = 0;
245 0 : len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
246 : }
247 : } else {
248 0 : use_extended = 0;
249 : }
250 :
251 0 : ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
252 0 : if (ret == NULL) {
253 : /* No space */
254 0 : return (ret);
255 : }
256 0 : SCTP_BUF_LEN(ret) = 0;
257 :
258 : /* We need a CMSG header followed by the struct */
259 : #if defined(__Userspace_os_Windows)
260 : cmh = mtod(ret, WSACMSGHDR *);
261 : #else
262 0 : cmh = mtod(ret, struct cmsghdr *);
263 : #endif
264 : /*
265 : * Make sure that there is no un-initialized padding between
266 : * the cmsg header and cmsg data and after the cmsg data.
267 : */
268 0 : memset(cmh, 0, len);
269 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
270 0 : cmh->cmsg_level = IPPROTO_SCTP;
271 0 : cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
272 0 : cmh->cmsg_type = SCTP_RCVINFO;
273 0 : rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
274 0 : rcvinfo->rcv_sid = sinfo->sinfo_stream;
275 0 : rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
276 0 : rcvinfo->rcv_flags = sinfo->sinfo_flags;
277 0 : rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
278 0 : rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
279 0 : rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
280 0 : rcvinfo->rcv_context = sinfo->sinfo_context;
281 0 : rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
282 : #if defined(__Userspace_os_Windows)
283 : cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
284 : #else
285 0 : cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
286 : #endif
287 0 : SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
288 : }
289 0 : if (provide_nxt) {
290 0 : cmh->cmsg_level = IPPROTO_SCTP;
291 0 : cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
292 0 : cmh->cmsg_type = SCTP_NXTINFO;
293 0 : nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
294 0 : nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
295 0 : nxtinfo->nxt_flags = 0;
296 0 : if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
297 0 : nxtinfo->nxt_flags |= SCTP_UNORDERED;
298 : }
299 0 : if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
300 0 : nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
301 : }
302 0 : if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
303 0 : nxtinfo->nxt_flags |= SCTP_COMPLETE;
304 : }
305 0 : nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
306 0 : nxtinfo->nxt_length = seinfo->sreinfo_next_length;
307 0 : nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
308 : #if defined(__Userspace_os_Windows)
309 : cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
310 : #else
311 0 : cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
312 : #endif
313 0 : SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
314 : }
315 0 : if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
316 0 : cmh->cmsg_level = IPPROTO_SCTP;
317 0 : outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
318 0 : if (use_extended) {
319 0 : cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
320 0 : cmh->cmsg_type = SCTP_EXTRCV;
321 0 : memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
322 0 : SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
323 : } else {
324 0 : cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
325 0 : cmh->cmsg_type = SCTP_SNDRCV;
326 0 : *outinfo = *sinfo;
327 0 : SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
328 : }
329 : }
330 0 : return (ret);
331 : }
332 :
333 :
334 : static void
335 0 : sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
336 : {
337 : uint32_t gap, i, cumackp1;
338 0 : int fnd = 0;
339 :
340 0 : if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
341 0 : return;
342 : }
343 0 : cumackp1 = asoc->cumulative_tsn + 1;
344 0 : if (SCTP_TSN_GT(cumackp1, tsn)) {
345 : /* this tsn is behind the cum ack and thus we don't
346 : * need to worry about it being moved from one to the other.
347 : */
348 0 : return;
349 : }
350 0 : SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
351 0 : if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
352 0 : SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
353 0 : sctp_print_mapping_array(asoc);
354 : #ifdef INVARIANTS
355 : panic("Things are really messed up now!!");
356 : #endif
357 : }
358 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
359 0 : SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
360 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
361 0 : asoc->highest_tsn_inside_nr_map = tsn;
362 : }
363 0 : if (tsn == asoc->highest_tsn_inside_map) {
364 : /* We must back down to see what the new highest is */
365 0 : for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
366 0 : SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
367 0 : if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
368 0 : asoc->highest_tsn_inside_map = i;
369 0 : fnd = 1;
370 0 : break;
371 : }
372 : }
373 0 : if (!fnd) {
374 0 : asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
375 : }
376 : }
377 : }
378 :
379 :
380 : /*
381 : * We are delivering currently from the reassembly queue. We must continue to
382 : * deliver until we either: 1) run out of space. 2) run out of sequential
383 : * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
384 : */
385 : static void
386 0 : sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
387 : {
388 : struct sctp_tmit_chunk *chk, *nchk;
389 : uint16_t nxt_todel;
390 : uint16_t stream_no;
391 0 : int end = 0;
392 : int cntDel;
393 : struct sctp_queued_to_read *control, *ctl, *nctl;
394 :
395 0 : if (stcb == NULL)
396 0 : return;
397 :
398 0 : cntDel = stream_no = 0;
399 0 : if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
400 0 : (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
401 0 : (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
402 : /* socket above is long gone or going.. */
403 : abandon:
404 0 : asoc->fragmented_delivery_inprogress = 0;
405 0 : TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
406 0 : TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
407 0 : asoc->size_on_reasm_queue -= chk->send_size;
408 0 : sctp_ucount_decr(asoc->cnt_on_reasm_queue);
409 : /*
410 : * Lose the data pointer, since its in the socket
411 : * buffer
412 : */
413 0 : if (chk->data) {
414 0 : sctp_m_freem(chk->data);
415 0 : chk->data = NULL;
416 : }
417 : /* Now free the address and data */
418 0 : sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
419 : /*sa_ignore FREED_MEMORY*/
420 : }
421 0 : return;
422 : }
423 : SCTP_TCB_LOCK_ASSERT(stcb);
424 0 : TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
425 0 : if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
426 : /* Can't deliver more :< */
427 0 : return;
428 : }
429 0 : stream_no = chk->rec.data.stream_number;
430 0 : nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
431 0 : if (nxt_todel != chk->rec.data.stream_seq &&
432 0 : (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
433 : /*
434 : * Not the next sequence to deliver in its stream OR
435 : * unordered
436 : */
437 0 : return;
438 : }
439 0 : if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
440 :
441 0 : control = sctp_build_readq_entry_chk(stcb, chk);
442 0 : if (control == NULL) {
443 : /* out of memory? */
444 0 : return;
445 : }
446 : /* save it off for our future deliveries */
447 0 : stcb->asoc.control_pdapi = control;
448 0 : if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
449 0 : end = 1;
450 : else
451 0 : end = 0;
452 0 : sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
453 0 : sctp_add_to_readq(stcb->sctp_ep,
454 0 : stcb, control, &stcb->sctp_socket->so_rcv, end,
455 : SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
456 0 : cntDel++;
457 : } else {
458 0 : if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
459 0 : end = 1;
460 : else
461 0 : end = 0;
462 0 : sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
463 0 : if (sctp_append_to_readq(stcb->sctp_ep, stcb,
464 : stcb->asoc.control_pdapi,
465 0 : chk->data, end, chk->rec.data.TSN_seq,
466 0 : &stcb->sctp_socket->so_rcv)) {
467 : /*
468 : * something is very wrong, either
469 : * control_pdapi is NULL, or the tail_mbuf
470 : * is corrupt, or there is a EOM already on
471 : * the mbuf chain.
472 : */
473 0 : if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
474 0 : goto abandon;
475 : } else {
476 : #ifdef INVARIANTS
477 : if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
478 : panic("This should not happen control_pdapi NULL?");
479 : }
480 : /* if we did not panic, it was a EOM */
481 : panic("Bad chunking ??");
482 : #else
483 0 : if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
484 0 : SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
485 : }
486 0 : SCTP_PRINTF("Bad chunking ??\n");
487 0 : SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
488 :
489 : #endif
490 0 : goto abandon;
491 : }
492 : }
493 0 : cntDel++;
494 : }
495 : /* pull it we did it */
496 0 : TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
497 0 : if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
498 0 : asoc->fragmented_delivery_inprogress = 0;
499 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
500 0 : asoc->strmin[stream_no].last_sequence_delivered++;
501 : }
502 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
503 0 : SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
504 : }
505 0 : } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
506 : /*
507 : * turn the flag back on since we just delivered
508 : * yet another one.
509 : */
510 0 : asoc->fragmented_delivery_inprogress = 1;
511 : }
512 0 : asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
513 0 : asoc->last_flags_delivered = chk->rec.data.rcv_flags;
514 0 : asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
515 0 : asoc->last_strm_no_delivered = chk->rec.data.stream_number;
516 :
517 0 : asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
518 0 : asoc->size_on_reasm_queue -= chk->send_size;
519 0 : sctp_ucount_decr(asoc->cnt_on_reasm_queue);
520 : /* free up the chk */
521 0 : chk->data = NULL;
522 0 : sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
523 :
524 0 : if (asoc->fragmented_delivery_inprogress == 0) {
525 : /*
526 : * Now lets see if we can deliver the next one on
527 : * the stream
528 : */
529 : struct sctp_stream_in *strm;
530 :
531 0 : strm = &asoc->strmin[stream_no];
532 0 : nxt_todel = strm->last_sequence_delivered + 1;
533 0 : TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
534 : /* Deliver more if we can. */
535 0 : if (nxt_todel == ctl->sinfo_ssn) {
536 0 : TAILQ_REMOVE(&strm->inqueue, ctl, next);
537 0 : asoc->size_on_all_streams -= ctl->length;
538 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
539 0 : strm->last_sequence_delivered++;
540 0 : sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
541 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
542 : ctl,
543 0 : &stcb->sctp_socket->so_rcv, 1,
544 : SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
545 : } else {
546 0 : break;
547 : }
548 0 : nxt_todel = strm->last_sequence_delivered + 1;
549 : }
550 0 : break;
551 : }
552 : }
553 : }
554 :
555 : /*
556 : * Queue the chunk either right into the socket buffer if it is the next one
557 : * to go OR put it in the correct place in the delivery queue. If we do
558 : * append to the so_buf, keep doing so until we are out of order. One big
559 : * question still remains, what to do when the socket buffer is FULL??
560 : */
561 : static void
562 0 : sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
563 : struct sctp_queued_to_read *control, int *abort_flag)
564 : {
565 : /*
566 : * FIX-ME maybe? What happens when the ssn wraps? If we are getting
567 : * all the data in one stream this could happen quite rapidly. One
568 : * could use the TSN to keep track of things, but this scheme breaks
569 : * down in the other type of stream useage that could occur. Send a
570 : * single msg to stream 0, send 4Billion messages to stream 1, now
571 : * send a message to stream 0. You have a situation where the TSN
572 : * has wrapped but not in the stream. Is this worth worrying about
573 : * or should we just change our queue sort at the bottom to be by
574 : * TSN.
575 : *
576 : * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
577 : * with TSN 1? If the peer is doing some sort of funky TSN/SSN
578 : * assignment this could happen... and I don't see how this would be
579 : * a violation. So for now I am undecided an will leave the sort by
580 : * SSN alone. Maybe a hybred approach is the answer
581 : *
582 : */
583 : struct sctp_stream_in *strm;
584 : struct sctp_queued_to_read *at;
585 : int queue_needed;
586 : uint16_t nxt_todel;
587 : struct mbuf *op_err;
588 : char msg[SCTP_DIAG_INFO_LEN];
589 :
590 0 : queue_needed = 1;
591 0 : asoc->size_on_all_streams += control->length;
592 0 : sctp_ucount_incr(asoc->cnt_on_all_streams);
593 0 : strm = &asoc->strmin[control->sinfo_stream];
594 0 : nxt_todel = strm->last_sequence_delivered + 1;
595 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
596 0 : sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
597 : }
598 0 : SCTPDBG(SCTP_DEBUG_INDATA1,
599 : "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
600 : (uint32_t) control->sinfo_stream,
601 : (uint32_t) strm->last_sequence_delivered,
602 : (uint32_t) nxt_todel);
603 0 : if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
604 : /* The incoming sseq is behind where we last delivered? */
605 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
606 : control->sinfo_ssn, strm->last_sequence_delivered);
607 : protocol_error:
608 : /*
609 : * throw it in the stream so it gets cleaned up in
610 : * association destruction
611 : */
612 0 : TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
613 0 : snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
614 0 : strm->last_sequence_delivered, control->sinfo_tsn,
615 0 : control->sinfo_stream, control->sinfo_ssn);
616 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1;
618 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 0 : *abort_flag = 1;
620 0 : return;
621 :
622 : }
623 0 : if (nxt_todel == control->sinfo_ssn) {
624 : /* can be delivered right away? */
625 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
626 0 : sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
627 : }
628 : /* EY it wont be queued if it could be delivered directly*/
629 0 : queue_needed = 0;
630 0 : asoc->size_on_all_streams -= control->length;
631 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
632 0 : strm->last_sequence_delivered++;
633 :
634 0 : sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
636 : control,
637 0 : &stcb->sctp_socket->so_rcv, 1,
638 : SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
639 0 : TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
640 : /* all delivered */
641 0 : nxt_todel = strm->last_sequence_delivered + 1;
642 0 : if (nxt_todel == control->sinfo_ssn) {
643 0 : TAILQ_REMOVE(&strm->inqueue, control, next);
644 0 : asoc->size_on_all_streams -= control->length;
645 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
646 0 : strm->last_sequence_delivered++;
647 : /*
648 : * We ignore the return of deliver_data here
649 : * since we always can hold the chunk on the
650 : * d-queue. And we have a finite number that
651 : * can be delivered from the strq.
652 : */
653 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
654 0 : sctp_log_strm_del(control, NULL,
655 : SCTP_STR_LOG_FROM_IMMED_DEL);
656 : }
657 0 : sctp_mark_non_revokable(asoc, control->sinfo_tsn);
658 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
659 : control,
660 0 : &stcb->sctp_socket->so_rcv, 1,
661 : SCTP_READ_LOCK_NOT_HELD,
662 : SCTP_SO_NOT_LOCKED);
663 0 : continue;
664 : }
665 0 : break;
666 : }
667 : }
668 0 : if (queue_needed) {
669 : /*
670 : * Ok, we did not deliver this guy, find the correct place
671 : * to put it on the queue.
672 : */
673 0 : if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
674 : goto protocol_error;
675 : }
676 0 : if (TAILQ_EMPTY(&strm->inqueue)) {
677 : /* Empty queue */
678 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
679 0 : sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
680 : }
681 0 : TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
682 : } else {
683 0 : TAILQ_FOREACH(at, &strm->inqueue, next) {
684 0 : if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
685 : /*
686 : * one in queue is bigger than the
687 : * new one, insert before this one
688 : */
689 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
690 0 : sctp_log_strm_del(control, at,
691 : SCTP_STR_LOG_FROM_INSERT_MD);
692 : }
693 0 : TAILQ_INSERT_BEFORE(at, control, next);
694 0 : break;
695 0 : } else if (at->sinfo_ssn == control->sinfo_ssn) {
696 : /*
697 : * Gak, He sent me a duplicate str
698 : * seq number
699 : */
700 : /*
701 : * foo bar, I guess I will just free
702 : * this new guy, should we abort
703 : * too? FIX ME MAYBE? Or it COULD be
704 : * that the SSN's have wrapped.
705 : * Maybe I should compare to TSN
706 : * somehow... sigh for now just blow
707 : * away the chunk!
708 : */
709 :
710 0 : if (control->data)
711 0 : sctp_m_freem(control->data);
712 0 : control->data = NULL;
713 0 : asoc->size_on_all_streams -= control->length;
714 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
715 0 : if (control->whoFrom) {
716 0 : sctp_free_remote_addr(control->whoFrom);
717 0 : control->whoFrom = NULL;
718 : }
719 0 : sctp_free_a_readq(stcb, control);
720 0 : return;
721 : } else {
722 0 : if (TAILQ_NEXT(at, next) == NULL) {
723 : /*
724 : * We are at the end, insert
725 : * it after this one
726 : */
727 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
728 0 : sctp_log_strm_del(control, at,
729 : SCTP_STR_LOG_FROM_INSERT_TL);
730 : }
731 0 : TAILQ_INSERT_AFTER(&strm->inqueue,
732 : at, control, next);
733 0 : break;
734 : }
735 : }
736 : }
737 : }
738 : }
739 : }
740 :
741 : /*
742 : * Returns two things: You get the total size of the deliverable parts of the
743 : * first fragmented message on the reassembly queue. And you get a 1 back if
744 : * all of the message is ready or a 0 back if the message is still incomplete
745 : */
746 : static int
747 0 : sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size)
748 : {
749 : struct sctp_tmit_chunk *chk;
750 : uint32_t tsn;
751 :
752 0 : *t_size = 0;
753 0 : chk = TAILQ_FIRST(&asoc->reasmqueue);
754 0 : if (chk == NULL) {
755 : /* nothing on the queue */
756 0 : return (0);
757 : }
758 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
759 : /* Not a first on the queue */
760 0 : return (0);
761 : }
762 0 : tsn = chk->rec.data.TSN_seq;
763 0 : TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
764 0 : if (tsn != chk->rec.data.TSN_seq) {
765 0 : return (0);
766 : }
767 0 : *t_size += chk->send_size;
768 0 : if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
769 0 : return (1);
770 : }
771 0 : tsn++;
772 : }
773 0 : return (0);
774 : }
775 :
776 : static void
777 0 : sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
778 : {
779 : struct sctp_tmit_chunk *chk;
780 : uint16_t nxt_todel;
781 : uint32_t tsize, pd_point;
782 :
783 : doit_again:
784 0 : chk = TAILQ_FIRST(&asoc->reasmqueue);
785 0 : if (chk == NULL) {
786 : /* Huh? */
787 0 : asoc->size_on_reasm_queue = 0;
788 0 : asoc->cnt_on_reasm_queue = 0;
789 0 : return;
790 : }
791 0 : if (asoc->fragmented_delivery_inprogress == 0) {
792 0 : nxt_todel =
793 0 : asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
794 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
795 0 : (nxt_todel == chk->rec.data.stream_seq ||
796 0 : (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
797 : /*
798 : * Yep the first one is here and its ok to deliver
799 : * but should we?
800 : */
801 0 : if (stcb->sctp_socket) {
802 0 : pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
803 : stcb->sctp_ep->partial_delivery_point);
804 : } else {
805 0 : pd_point = stcb->sctp_ep->partial_delivery_point;
806 : }
807 0 : if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
808 : /*
809 : * Yes, we setup to start reception, by
810 : * backing down the TSN just in case we
811 : * can't deliver. If we
812 : */
813 0 : asoc->fragmented_delivery_inprogress = 1;
814 0 : asoc->tsn_last_delivered =
815 0 : chk->rec.data.TSN_seq - 1;
816 0 : asoc->str_of_pdapi =
817 0 : chk->rec.data.stream_number;
818 0 : asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
819 0 : asoc->pdapi_ppid = chk->rec.data.payloadtype;
820 0 : asoc->fragment_flags = chk->rec.data.rcv_flags;
821 0 : sctp_service_reassembly(stcb, asoc);
822 : }
823 : }
824 : } else {
825 : /* Service re-assembly will deliver stream data queued
826 : * at the end of fragmented delivery.. but it wont know
827 : * to go back and call itself again... we do that here
828 : * with the got doit_again
829 : */
830 0 : sctp_service_reassembly(stcb, asoc);
831 0 : if (asoc->fragmented_delivery_inprogress == 0) {
832 : /* finished our Fragmented delivery, could be
833 : * more waiting?
834 : */
835 0 : goto doit_again;
836 : }
837 : }
838 : }
839 :
840 : /*
841 : * Dump onto the re-assembly queue, in its proper place. After dumping on the
842 : * queue, see if anthing can be delivered. If so pull it off (or as much as
843 : * we can. If we run out of space then we must dump what we can and set the
844 : * appropriate flag to say we queued what we could.
845 : */
846 : static void
847 0 : sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
848 : struct sctp_tmit_chunk *chk, int *abort_flag)
849 : {
850 : struct mbuf *op_err;
851 : char msg[SCTP_DIAG_INFO_LEN];
852 : uint32_t cum_ackp1, prev_tsn, post_tsn;
853 : struct sctp_tmit_chunk *at, *prev, *next;
854 :
855 0 : prev = next = NULL;
856 0 : cum_ackp1 = asoc->tsn_last_delivered + 1;
857 0 : if (TAILQ_EMPTY(&asoc->reasmqueue)) {
858 : /* This is the first one on the queue */
859 0 : TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
860 : /*
861 : * we do not check for delivery of anything when only one
862 : * fragment is here
863 : */
864 0 : asoc->size_on_reasm_queue = chk->send_size;
865 0 : sctp_ucount_incr(asoc->cnt_on_reasm_queue);
866 0 : if (chk->rec.data.TSN_seq == cum_ackp1) {
867 0 : if (asoc->fragmented_delivery_inprogress == 0 &&
868 0 : (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
869 : SCTP_DATA_FIRST_FRAG) {
870 : /*
871 : * An empty queue, no delivery inprogress,
872 : * we hit the next one and it does NOT have
873 : * a FIRST fragment mark.
874 : */
875 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
876 0 : snprintf(msg, sizeof(msg),
877 : "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
878 : chk->rec.data.TSN_seq,
879 0 : chk->rec.data.stream_number,
880 0 : chk->rec.data.stream_seq);
881 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
882 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2;
883 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
884 0 : *abort_flag = 1;
885 0 : } else if (asoc->fragmented_delivery_inprogress &&
886 0 : (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
887 : /*
888 : * We are doing a partial delivery and the
889 : * NEXT chunk MUST be either the LAST or
890 : * MIDDLE fragment NOT a FIRST
891 : */
892 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
893 0 : snprintf(msg, sizeof(msg),
894 : "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
895 : chk->rec.data.TSN_seq,
896 0 : chk->rec.data.stream_number,
897 0 : chk->rec.data.stream_seq);
898 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
899 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3;
900 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
901 0 : *abort_flag = 1;
902 0 : } else if (asoc->fragmented_delivery_inprogress) {
903 : /*
904 : * Here we are ok with a MIDDLE or LAST
905 : * piece
906 : */
907 0 : if (chk->rec.data.stream_number !=
908 0 : asoc->str_of_pdapi) {
909 : /* Got to be the right STR No */
910 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
911 : chk->rec.data.stream_number,
912 : asoc->str_of_pdapi);
913 0 : snprintf(msg, sizeof(msg),
914 : "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
915 0 : asoc->str_of_pdapi,
916 : chk->rec.data.TSN_seq,
917 0 : chk->rec.data.stream_number,
918 0 : chk->rec.data.stream_seq);
919 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
920 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4;
921 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
922 0 : *abort_flag = 1;
923 0 : } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
924 0 : SCTP_DATA_UNORDERED &&
925 0 : chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
926 : /* Got to be the right STR Seq */
927 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
928 : chk->rec.data.stream_seq,
929 : asoc->ssn_of_pdapi);
930 0 : snprintf(msg, sizeof(msg),
931 : "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
932 0 : asoc->ssn_of_pdapi,
933 : chk->rec.data.TSN_seq,
934 0 : chk->rec.data.stream_number,
935 0 : chk->rec.data.stream_seq);
936 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
937 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5;
938 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
939 0 : *abort_flag = 1;
940 : }
941 : }
942 : }
943 0 : return;
944 : }
945 : /* Find its place */
946 0 : TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
947 0 : if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
948 : /*
949 : * one in queue is bigger than the new one, insert
950 : * before this one
951 : */
952 : /* A check */
953 0 : asoc->size_on_reasm_queue += chk->send_size;
954 0 : sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 0 : next = at;
956 0 : TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 0 : break;
958 0 : } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
959 : /* Gak, He sent me a duplicate str seq number */
960 : /*
961 : * foo bar, I guess I will just free this new guy,
962 : * should we abort too? FIX ME MAYBE? Or it COULD be
963 : * that the SSN's have wrapped. Maybe I should
964 : * compare to TSN somehow... sigh for now just blow
965 : * away the chunk!
966 : */
967 0 : if (chk->data) {
968 0 : sctp_m_freem(chk->data);
969 0 : chk->data = NULL;
970 : }
971 0 : sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
972 0 : return;
973 : } else {
974 0 : prev = at;
975 0 : if (TAILQ_NEXT(at, sctp_next) == NULL) {
976 : /*
977 : * We are at the end, insert it after this
978 : * one
979 : */
980 : /* check it first */
981 0 : asoc->size_on_reasm_queue += chk->send_size;
982 0 : sctp_ucount_incr(asoc->cnt_on_reasm_queue);
983 0 : TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
984 0 : break;
985 : }
986 : }
987 : }
988 : /* Now the audits */
989 0 : if (prev) {
990 0 : prev_tsn = chk->rec.data.TSN_seq - 1;
991 0 : if (prev_tsn == prev->rec.data.TSN_seq) {
992 : /*
993 : * Ok the one I am dropping onto the end is the
994 : * NEXT. A bit of valdiation here.
995 : */
996 0 : if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
997 0 : SCTP_DATA_FIRST_FRAG ||
998 0 : (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
999 : SCTP_DATA_MIDDLE_FRAG) {
1000 : /*
1001 : * Insert chk MUST be a MIDDLE or LAST
1002 : * fragment
1003 : */
1004 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1005 : SCTP_DATA_FIRST_FRAG) {
1006 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1007 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1008 0 : snprintf(msg, sizeof(msg),
1009 : "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1010 : chk->rec.data.TSN_seq,
1011 0 : chk->rec.data.stream_number,
1012 0 : chk->rec.data.stream_seq);
1013 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1014 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6;
1015 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1016 0 : *abort_flag = 1;
1017 0 : return;
1018 : }
1019 0 : if (chk->rec.data.stream_number !=
1020 0 : prev->rec.data.stream_number) {
1021 : /*
1022 : * Huh, need the correct STR here,
1023 : * they must be the same.
1024 : */
1025 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1026 : chk->rec.data.stream_number,
1027 : prev->rec.data.stream_number);
1028 0 : snprintf(msg, sizeof(msg),
1029 : "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1030 0 : prev->rec.data.stream_number,
1031 : chk->rec.data.TSN_seq,
1032 0 : chk->rec.data.stream_number,
1033 0 : chk->rec.data.stream_seq);
1034 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1035 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
1036 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1037 0 : *abort_flag = 1;
1038 0 : return;
1039 : }
1040 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1041 0 : (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1042 : /*
1043 : * Huh, need the same ordering here,
1044 : * they must be the same.
1045 : */
1046 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1047 0 : snprintf(msg, sizeof(msg),
1048 : "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1049 0 : (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1050 : chk->rec.data.TSN_seq,
1051 0 : (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1052 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1053 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
1054 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1055 0 : *abort_flag = 1;
1056 0 : return;
1057 : }
1058 0 : if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1059 0 : chk->rec.data.stream_seq !=
1060 0 : prev->rec.data.stream_seq) {
1061 : /*
1062 : * Huh, need the correct STR here,
1063 : * they must be the same.
1064 : */
1065 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1066 : chk->rec.data.stream_seq,
1067 : prev->rec.data.stream_seq);
1068 0 : snprintf(msg, sizeof(msg),
1069 : "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1070 0 : prev->rec.data.stream_seq,
1071 : chk->rec.data.TSN_seq,
1072 0 : chk->rec.data.stream_number,
1073 0 : chk->rec.data.stream_seq);
1074 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1075 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8;
1076 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1077 0 : *abort_flag = 1;
1078 0 : return;
1079 : }
1080 0 : } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1081 : SCTP_DATA_LAST_FRAG) {
1082 : /* Insert chk MUST be a FIRST */
1083 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1084 : SCTP_DATA_FIRST_FRAG) {
1085 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1086 0 : snprintf(msg, sizeof(msg),
1087 : "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1088 : chk->rec.data.TSN_seq,
1089 0 : chk->rec.data.stream_number,
1090 0 : chk->rec.data.stream_seq);
1091 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1092 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9;
1093 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1094 0 : *abort_flag = 1;
1095 0 : return;
1096 : }
1097 : }
1098 : }
1099 : }
1100 0 : if (next) {
1101 0 : post_tsn = chk->rec.data.TSN_seq + 1;
1102 0 : if (post_tsn == next->rec.data.TSN_seq) {
1103 : /*
1104 : * Ok the one I am inserting ahead of is my NEXT
1105 : * one. A bit of valdiation here.
1106 : */
1107 0 : if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1108 : /* Insert chk MUST be a last fragment */
1109 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1110 : != SCTP_DATA_LAST_FRAG) {
1111 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1112 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1113 0 : snprintf(msg, sizeof(msg),
1114 : "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1115 : chk->rec.data.TSN_seq,
1116 0 : chk->rec.data.stream_number,
1117 0 : chk->rec.data.stream_seq);
1118 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1119 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10;
1120 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1121 0 : *abort_flag = 1;
1122 0 : return;
1123 : }
1124 0 : } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1125 0 : SCTP_DATA_MIDDLE_FRAG ||
1126 0 : (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1127 : SCTP_DATA_LAST_FRAG) {
1128 : /*
1129 : * Insert chk CAN be MIDDLE or FIRST NOT
1130 : * LAST
1131 : */
1132 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1133 : SCTP_DATA_LAST_FRAG) {
1134 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1135 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1136 0 : snprintf(msg, sizeof(msg),
1137 : "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1138 : chk->rec.data.TSN_seq,
1139 0 : chk->rec.data.stream_number,
1140 0 : chk->rec.data.stream_seq);
1141 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1142 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11;
1143 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1144 0 : *abort_flag = 1;
1145 0 : return;
1146 : }
1147 0 : if (chk->rec.data.stream_number !=
1148 0 : next->rec.data.stream_number) {
1149 : /*
1150 : * Huh, need the correct STR here,
1151 : * they must be the same.
1152 : */
1153 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1154 : chk->rec.data.stream_number,
1155 : next->rec.data.stream_number);
1156 0 : snprintf(msg, sizeof(msg),
1157 : "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1158 0 : next->rec.data.stream_number,
1159 : chk->rec.data.TSN_seq,
1160 0 : chk->rec.data.stream_number,
1161 0 : chk->rec.data.stream_seq);
1162 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1163 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
1164 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1165 0 : *abort_flag = 1;
1166 0 : return;
1167 : }
1168 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1169 0 : (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1170 : /*
1171 : * Huh, need the same ordering here,
1172 : * they must be the same.
1173 : */
1174 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1175 0 : snprintf(msg, sizeof(msg),
1176 : "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1177 0 : (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1178 : chk->rec.data.TSN_seq,
1179 0 : (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1180 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1181 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
1182 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1183 0 : *abort_flag = 1;
1184 0 : return;
1185 : }
1186 0 : if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1187 0 : chk->rec.data.stream_seq !=
1188 0 : next->rec.data.stream_seq) {
1189 : /*
1190 : * Huh, need the correct STR here,
1191 : * they must be the same.
1192 : */
1193 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1194 : chk->rec.data.stream_seq,
1195 : next->rec.data.stream_seq);
1196 0 : snprintf(msg, sizeof(msg),
1197 : "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1198 0 : next->rec.data.stream_seq,
1199 : chk->rec.data.TSN_seq,
1200 0 : chk->rec.data.stream_number,
1201 0 : chk->rec.data.stream_seq);
1202 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1203 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13;
1204 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1205 0 : *abort_flag = 1;
1206 0 : return;
1207 : }
1208 : }
1209 : }
1210 : }
1211 : /* Do we need to do some delivery? check */
1212 0 : sctp_deliver_reasm_check(stcb, asoc);
1213 : }
1214 :
1215 : /*
1216 : * This is an unfortunate routine. It checks to make sure a evil guy is not
1217 : * stuffing us full of bad packet fragments. A broken peer could also do this
1218 : * but this is doubtful. It is to bad I must worry about evil crackers sigh
1219 : * :< more cycles.
1220 : */
1221 : static int
1222 0 : sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1223 : uint32_t TSN_seq)
1224 : {
1225 : struct sctp_tmit_chunk *at;
1226 : uint32_t tsn_est;
1227 :
1228 0 : TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1229 0 : if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1230 : /* is it one bigger? */
1231 0 : tsn_est = at->rec.data.TSN_seq + 1;
1232 0 : if (tsn_est == TSN_seq) {
1233 : /* yep. It better be a last then */
1234 0 : if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1235 : SCTP_DATA_LAST_FRAG) {
1236 : /*
1237 : * Ok this guy belongs next to a guy
1238 : * that is NOT last, it should be a
1239 : * middle/last, not a complete
1240 : * chunk.
1241 : */
1242 0 : return (1);
1243 : } else {
1244 : /*
1245 : * This guy is ok since its a LAST
1246 : * and the new chunk is a fully
1247 : * self- contained one.
1248 : */
1249 0 : return (0);
1250 : }
1251 : }
1252 0 : } else if (TSN_seq == at->rec.data.TSN_seq) {
1253 : /* Software error since I have a dup? */
1254 0 : return (1);
1255 : } else {
1256 : /*
1257 : * Ok, 'at' is larger than new chunk but does it
1258 : * need to be right before it.
1259 : */
1260 0 : tsn_est = TSN_seq + 1;
1261 0 : if (tsn_est == at->rec.data.TSN_seq) {
1262 : /* Yep, It better be a first */
1263 0 : if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1264 : SCTP_DATA_FIRST_FRAG) {
1265 0 : return (1);
1266 : } else {
1267 0 : return (0);
1268 : }
1269 : }
1270 : }
1271 : }
1272 0 : return (0);
1273 : }
1274 :
1275 : static int
1276 0 : sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1277 : struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1278 : struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1279 : int *break_flag, int last_chunk)
1280 : {
1281 : /* Process a data chunk */
1282 : /* struct sctp_tmit_chunk *chk; */
1283 : struct sctp_tmit_chunk *chk;
1284 : uint32_t tsn, gap;
1285 : struct mbuf *dmbuf;
1286 : int the_len;
1287 0 : int need_reasm_check = 0;
1288 : uint16_t strmno, strmseq;
1289 : struct mbuf *op_err;
1290 : char msg[SCTP_DIAG_INFO_LEN];
1291 : struct sctp_queued_to_read *control;
1292 : int ordered;
1293 : uint32_t protocol_id;
1294 : uint8_t chunk_flags;
1295 : struct sctp_stream_reset_list *liste;
1296 :
1297 0 : chk = NULL;
1298 0 : tsn = ntohl(ch->dp.tsn);
1299 0 : chunk_flags = ch->ch.chunk_flags;
1300 0 : if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1301 0 : asoc->send_sack = 1;
1302 : }
1303 0 : protocol_id = ch->dp.protocol_id;
1304 0 : ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1305 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1306 0 : sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1307 : }
1308 0 : if (stcb == NULL) {
1309 0 : return (0);
1310 : }
1311 : SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1312 0 : if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1313 : /* It is a duplicate */
1314 0 : SCTP_STAT_INCR(sctps_recvdupdata);
1315 0 : if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1316 : /* Record a dup for the next outbound sack */
1317 0 : asoc->dup_tsns[asoc->numduptsns] = tsn;
1318 0 : asoc->numduptsns++;
1319 : }
1320 0 : asoc->send_sack = 1;
1321 0 : return (0);
1322 : }
1323 : /* Calculate the number of TSN's between the base and this TSN */
1324 0 : SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1325 0 : if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1326 : /* Can't hold the bit in the mapping at max array, toss it */
1327 0 : return (0);
1328 : }
1329 0 : if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1330 : SCTP_TCB_LOCK_ASSERT(stcb);
1331 0 : if (sctp_expand_mapping_array(asoc, gap)) {
1332 : /* Can't expand, drop it */
1333 0 : return (0);
1334 : }
1335 : }
1336 0 : if (SCTP_TSN_GT(tsn, *high_tsn)) {
1337 0 : *high_tsn = tsn;
1338 : }
1339 : /* See if we have received this one already */
1340 0 : if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1341 0 : SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1342 0 : SCTP_STAT_INCR(sctps_recvdupdata);
1343 0 : if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1344 : /* Record a dup for the next outbound sack */
1345 0 : asoc->dup_tsns[asoc->numduptsns] = tsn;
1346 0 : asoc->numduptsns++;
1347 : }
1348 0 : asoc->send_sack = 1;
1349 0 : return (0);
1350 : }
1351 : /*
1352 : * Check to see about the GONE flag, duplicates would cause a sack
1353 : * to be sent up above
1354 : */
1355 0 : if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1356 0 : (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1357 0 : (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1358 : /*
1359 : * wait a minute, this guy is gone, there is no longer a
1360 : * receiver. Send peer an ABORT!
1361 : */
1362 0 : op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1363 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1364 0 : *abort_flag = 1;
1365 0 : return (0);
1366 : }
1367 : /*
1368 : * Now before going further we see if there is room. If NOT then we
1369 : * MAY let one through only IF this TSN is the one we are waiting
1370 : * for on a partial delivery API.
1371 : */
1372 :
1373 : /* now do the tests */
1374 0 : if (((asoc->cnt_on_all_streams +
1375 0 : asoc->cnt_on_reasm_queue +
1376 0 : asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1377 0 : (((int)asoc->my_rwnd) <= 0)) {
1378 : /*
1379 : * When we have NO room in the rwnd we check to make sure
1380 : * the reader is doing its job...
1381 : */
1382 0 : if (stcb->sctp_socket->so_rcv.sb_cc) {
1383 : /* some to read, wake-up */
1384 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1385 : struct socket *so;
1386 :
1387 : so = SCTP_INP_SO(stcb->sctp_ep);
1388 : atomic_add_int(&stcb->asoc.refcnt, 1);
1389 : SCTP_TCB_UNLOCK(stcb);
1390 : SCTP_SOCKET_LOCK(so, 1);
1391 : SCTP_TCB_LOCK(stcb);
1392 : atomic_subtract_int(&stcb->asoc.refcnt, 1);
1393 : if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1394 : /* assoc was freed while we were unlocked */
1395 : SCTP_SOCKET_UNLOCK(so, 1);
1396 : return (0);
1397 : }
1398 : #endif
1399 0 : sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1400 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1401 : SCTP_SOCKET_UNLOCK(so, 1);
1402 : #endif
1403 : }
1404 : /* now is it in the mapping array of what we have accepted? */
1405 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1406 0 : SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1407 : /* Nope not in the valid range dump it */
1408 0 : sctp_set_rwnd(stcb, asoc);
1409 0 : if ((asoc->cnt_on_all_streams +
1410 0 : asoc->cnt_on_reasm_queue +
1411 0 : asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1412 0 : SCTP_STAT_INCR(sctps_datadropchklmt);
1413 : } else {
1414 0 : SCTP_STAT_INCR(sctps_datadroprwnd);
1415 : }
1416 0 : *break_flag = 1;
1417 0 : return (0);
1418 : }
1419 : }
1420 0 : strmno = ntohs(ch->dp.stream_id);
1421 0 : if (strmno >= asoc->streamincnt) {
1422 : struct sctp_paramhdr *phdr;
1423 : struct mbuf *mb;
1424 :
1425 0 : mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1426 : 0, M_NOWAIT, 1, MT_DATA);
1427 0 : if (mb != NULL) {
1428 : /* add some space up front so prepend will work well */
1429 0 : SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1430 0 : phdr = mtod(mb, struct sctp_paramhdr *);
1431 : /*
1432 : * Error causes are just param's and this one has
1433 : * two back to back phdr, one with the error type
1434 : * and size, the other with the streamid and a rsvd
1435 : */
1436 0 : SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1437 0 : phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1438 0 : phdr->param_length =
1439 0 : htons(sizeof(struct sctp_paramhdr) * 2);
1440 0 : phdr++;
1441 : /* We insert the stream in the type field */
1442 0 : phdr->param_type = ch->dp.stream_id;
1443 : /* And set the length to 0 for the rsvd field */
1444 0 : phdr->param_length = 0;
1445 0 : sctp_queue_op_err(stcb, mb);
1446 : }
1447 0 : SCTP_STAT_INCR(sctps_badsid);
1448 : SCTP_TCB_LOCK_ASSERT(stcb);
1449 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1450 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1451 0 : asoc->highest_tsn_inside_nr_map = tsn;
1452 : }
1453 0 : if (tsn == (asoc->cumulative_tsn + 1)) {
1454 : /* Update cum-ack */
1455 0 : asoc->cumulative_tsn = tsn;
1456 : }
1457 0 : return (0);
1458 : }
1459 : /*
1460 : * Before we continue lets validate that we are not being fooled by
1461 : * an evil attacker. We can only have 4k chunks based on our TSN
1462 : * spread allowed by the mapping array 512 * 8 bits, so there is no
1463 : * way our stream sequence numbers could have wrapped. We of course
1464 : * only validate the FIRST fragment so the bit must be set.
1465 : */
1466 0 : strmseq = ntohs(ch->dp.stream_sequence);
1467 : #ifdef SCTP_ASOCLOG_OF_TSNS
1468 : SCTP_TCB_LOCK_ASSERT(stcb);
1469 : if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1470 : asoc->tsn_in_at = 0;
1471 : asoc->tsn_in_wrapped = 1;
1472 : }
1473 : asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1474 : asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1475 : asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1476 : asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1477 : asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1478 : asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1479 : asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1480 : asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1481 : asoc->tsn_in_at++;
1482 : #endif
1483 0 : if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1484 0 : (TAILQ_EMPTY(&asoc->resetHead)) &&
1485 0 : (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1486 0 : SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1487 : /* The incoming sseq is behind where we last delivered? */
1488 0 : SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1489 : strmseq, asoc->strmin[strmno].last_sequence_delivered);
1490 :
1491 0 : snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1492 0 : asoc->strmin[strmno].last_sequence_delivered,
1493 : tsn, strmno, strmseq);
1494 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1495 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14;
1496 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1497 0 : *abort_flag = 1;
1498 0 : return (0);
1499 : }
1500 : /************************************
1501 : * From here down we may find ch-> invalid
1502 : * so its a good idea NOT to use it.
1503 : *************************************/
1504 :
1505 0 : the_len = (chk_length - sizeof(struct sctp_data_chunk));
1506 0 : if (last_chunk == 0) {
1507 0 : dmbuf = SCTP_M_COPYM(*m,
1508 0 : (offset + sizeof(struct sctp_data_chunk)),
1509 : the_len, M_NOWAIT);
1510 : #ifdef SCTP_MBUF_LOGGING
1511 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1512 : sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1513 : }
1514 : #endif
1515 : } else {
1516 : /* We can steal the last chunk */
1517 : int l_len;
1518 0 : dmbuf = *m;
1519 : /* lop off the top part */
1520 0 : m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1521 0 : if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1522 0 : l_len = SCTP_BUF_LEN(dmbuf);
1523 : } else {
1524 : /* need to count up the size hopefully
1525 : * does not hit this to often :-0
1526 : */
1527 : struct mbuf *lat;
1528 :
1529 0 : l_len = 0;
1530 0 : for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1531 0 : l_len += SCTP_BUF_LEN(lat);
1532 : }
1533 : }
1534 0 : if (l_len > the_len) {
1535 : /* Trim the end round bytes off too */
1536 0 : m_adj(dmbuf, -(l_len - the_len));
1537 : }
1538 : }
1539 0 : if (dmbuf == NULL) {
1540 0 : SCTP_STAT_INCR(sctps_nomem);
1541 0 : return (0);
1542 : }
1543 0 : if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1544 0 : asoc->fragmented_delivery_inprogress == 0 &&
1545 0 : TAILQ_EMPTY(&asoc->resetHead) &&
1546 0 : ((ordered == 0) ||
1547 0 : ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1548 0 : TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1549 : /* Candidate for express delivery */
1550 : /*
1551 : * Its not fragmented, No PD-API is up, Nothing in the
1552 : * delivery queue, Its un-ordered OR ordered and the next to
1553 : * deliver AND nothing else is stuck on the stream queue,
1554 : * And there is room for it in the socket buffer. Lets just
1555 : * stuff it up the buffer....
1556 : */
1557 :
1558 : /* It would be nice to avoid this copy if we could :< */
1559 0 : sctp_alloc_a_readq(stcb, control);
1560 0 : sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1561 : protocol_id,
1562 : strmno, strmseq,
1563 : chunk_flags,
1564 : dmbuf);
1565 0 : if (control == NULL) {
1566 0 : goto failed_express_del;
1567 : }
1568 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1569 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1570 0 : asoc->highest_tsn_inside_nr_map = tsn;
1571 : }
1572 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
1573 0 : control, &stcb->sctp_socket->so_rcv,
1574 : 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1575 :
1576 0 : if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1577 : /* for ordered, bump what we delivered */
1578 0 : asoc->strmin[strmno].last_sequence_delivered++;
1579 : }
1580 0 : SCTP_STAT_INCR(sctps_recvexpress);
1581 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1582 0 : sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1583 : SCTP_STR_LOG_FROM_EXPRS_DEL);
1584 : }
1585 0 : control = NULL;
1586 :
1587 0 : goto finish_express_del;
1588 : }
1589 : failed_express_del:
1590 : /* If we reach here this is a new chunk */
1591 0 : chk = NULL;
1592 0 : control = NULL;
1593 : /* Express for fragmented delivery? */
1594 0 : if ((asoc->fragmented_delivery_inprogress) &&
1595 0 : (stcb->asoc.control_pdapi) &&
1596 0 : (asoc->str_of_pdapi == strmno) &&
1597 0 : (asoc->ssn_of_pdapi == strmseq)
1598 : ) {
1599 0 : control = stcb->asoc.control_pdapi;
1600 0 : if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1601 : /* Can't be another first? */
1602 0 : goto failed_pdapi_express_del;
1603 : }
1604 0 : if (tsn == (control->sinfo_tsn + 1)) {
1605 : /* Yep, we can add it on */
1606 0 : int end = 0;
1607 :
1608 0 : if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1609 0 : end = 1;
1610 : }
1611 0 : if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1612 : tsn,
1613 0 : &stcb->sctp_socket->so_rcv)) {
1614 0 : SCTP_PRINTF("Append fails end:%d\n", end);
1615 0 : goto failed_pdapi_express_del;
1616 : }
1617 :
1618 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1619 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1620 0 : asoc->highest_tsn_inside_nr_map = tsn;
1621 : }
1622 0 : SCTP_STAT_INCR(sctps_recvexpressm);
1623 0 : asoc->tsn_last_delivered = tsn;
1624 0 : asoc->fragment_flags = chunk_flags;
1625 0 : asoc->tsn_of_pdapi_last_delivered = tsn;
1626 0 : asoc->last_flags_delivered = chunk_flags;
1627 0 : asoc->last_strm_seq_delivered = strmseq;
1628 0 : asoc->last_strm_no_delivered = strmno;
1629 0 : if (end) {
1630 : /* clean up the flags and such */
1631 0 : asoc->fragmented_delivery_inprogress = 0;
1632 0 : if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1633 0 : asoc->strmin[strmno].last_sequence_delivered++;
1634 : }
1635 0 : stcb->asoc.control_pdapi = NULL;
1636 0 : if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1637 : /* There could be another message ready */
1638 0 : need_reasm_check = 1;
1639 : }
1640 : }
1641 0 : control = NULL;
1642 0 : goto finish_express_del;
1643 : }
1644 : }
1645 : failed_pdapi_express_del:
1646 0 : control = NULL;
1647 0 : if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1648 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1649 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1650 0 : asoc->highest_tsn_inside_nr_map = tsn;
1651 : }
1652 : } else {
1653 0 : SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1654 0 : if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1655 0 : asoc->highest_tsn_inside_map = tsn;
1656 : }
1657 : }
1658 0 : if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1659 0 : sctp_alloc_a_chunk(stcb, chk);
1660 0 : if (chk == NULL) {
1661 : /* No memory so we drop the chunk */
1662 0 : SCTP_STAT_INCR(sctps_nomem);
1663 0 : if (last_chunk == 0) {
1664 : /* we copied it, free the copy */
1665 0 : sctp_m_freem(dmbuf);
1666 : }
1667 0 : return (0);
1668 : }
1669 0 : chk->rec.data.TSN_seq = tsn;
1670 0 : chk->no_fr_allowed = 0;
1671 0 : chk->rec.data.stream_seq = strmseq;
1672 0 : chk->rec.data.stream_number = strmno;
1673 0 : chk->rec.data.payloadtype = protocol_id;
1674 0 : chk->rec.data.context = stcb->asoc.context;
1675 0 : chk->rec.data.doing_fast_retransmit = 0;
1676 0 : chk->rec.data.rcv_flags = chunk_flags;
1677 0 : chk->asoc = asoc;
1678 0 : chk->send_size = the_len;
1679 0 : chk->whoTo = net;
1680 0 : atomic_add_int(&net->ref_count, 1);
1681 0 : chk->data = dmbuf;
1682 : } else {
1683 0 : sctp_alloc_a_readq(stcb, control);
1684 0 : sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1685 : protocol_id,
1686 : strmno, strmseq,
1687 : chunk_flags,
1688 : dmbuf);
1689 0 : if (control == NULL) {
1690 : /* No memory so we drop the chunk */
1691 0 : SCTP_STAT_INCR(sctps_nomem);
1692 0 : if (last_chunk == 0) {
1693 : /* we copied it, free the copy */
1694 0 : sctp_m_freem(dmbuf);
1695 : }
1696 0 : return (0);
1697 : }
1698 0 : control->length = the_len;
1699 : }
1700 :
1701 : /* Mark it as received */
1702 : /* Now queue it where it belongs */
1703 0 : if (control != NULL) {
1704 : /* First a sanity check */
1705 0 : if (asoc->fragmented_delivery_inprogress) {
1706 : /*
1707 : * Ok, we have a fragmented delivery in progress if
1708 : * this chunk is next to deliver OR belongs in our
1709 : * view to the reassembly, the peer is evil or
1710 : * broken.
1711 : */
1712 : uint32_t estimate_tsn;
1713 :
1714 0 : estimate_tsn = asoc->tsn_last_delivered + 1;
1715 0 : if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1716 0 : (estimate_tsn == control->sinfo_tsn)) {
1717 : /* Evil/Broke peer */
1718 0 : sctp_m_freem(control->data);
1719 0 : control->data = NULL;
1720 0 : if (control->whoFrom) {
1721 0 : sctp_free_remote_addr(control->whoFrom);
1722 0 : control->whoFrom = NULL;
1723 : }
1724 0 : sctp_free_a_readq(stcb, control);
1725 0 : snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1726 : tsn, strmno, strmseq);
1727 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1728 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15;
1729 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1730 0 : *abort_flag = 1;
1731 0 : if (last_chunk) {
1732 0 : *m = NULL;
1733 : }
1734 0 : return (0);
1735 : } else {
1736 0 : if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1737 0 : sctp_m_freem(control->data);
1738 0 : control->data = NULL;
1739 0 : if (control->whoFrom) {
1740 0 : sctp_free_remote_addr(control->whoFrom);
1741 0 : control->whoFrom = NULL;
1742 : }
1743 0 : sctp_free_a_readq(stcb, control);
1744 0 : snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1745 : tsn, strmno, strmseq);
1746 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1747 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16;
1748 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1749 0 : *abort_flag = 1;
1750 0 : if (last_chunk) {
1751 0 : *m = NULL;
1752 : }
1753 0 : return (0);
1754 : }
1755 : }
1756 : } else {
1757 : /* No PDAPI running */
1758 0 : if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1759 : /*
1760 : * Reassembly queue is NOT empty validate
1761 : * that this tsn does not need to be in
1762 : * reasembly queue. If it does then our peer
1763 : * is broken or evil.
1764 : */
1765 0 : if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1766 0 : sctp_m_freem(control->data);
1767 0 : control->data = NULL;
1768 0 : if (control->whoFrom) {
1769 0 : sctp_free_remote_addr(control->whoFrom);
1770 0 : control->whoFrom = NULL;
1771 : }
1772 0 : sctp_free_a_readq(stcb, control);
1773 0 : snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1774 : tsn, strmno, strmseq);
1775 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1776 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17;
1777 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1778 0 : *abort_flag = 1;
1779 0 : if (last_chunk) {
1780 0 : *m = NULL;
1781 : }
1782 0 : return (0);
1783 : }
1784 : }
1785 : }
1786 : /* ok, if we reach here we have passed the sanity checks */
1787 0 : if (chunk_flags & SCTP_DATA_UNORDERED) {
1788 : /* queue directly into socket buffer */
1789 0 : sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1790 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
1791 : control,
1792 0 : &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1793 : } else {
1794 : /*
1795 : * Special check for when streams are resetting. We
1796 : * could be more smart about this and check the
1797 : * actual stream to see if it is not being reset..
1798 : * that way we would not create a HOLB when amongst
1799 : * streams being reset and those not being reset.
1800 : *
1801 : * We take complete messages that have a stream reset
1802 : * intervening (aka the TSN is after where our
1803 : * cum-ack needs to be) off and put them on a
1804 : * pending_reply_queue. The reassembly ones we do
1805 : * not have to worry about since they are all sorted
1806 : * and proceessed by TSN order. It is only the
1807 : * singletons I must worry about.
1808 : */
1809 0 : if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1810 0 : SCTP_TSN_GT(tsn, liste->tsn)) {
1811 : /*
1812 : * yep its past where we need to reset... go
1813 : * ahead and queue it.
1814 : */
1815 0 : if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1816 : /* first one on */
1817 0 : TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1818 : } else {
1819 : struct sctp_queued_to_read *ctlOn, *nctlOn;
1820 0 : unsigned char inserted = 0;
1821 :
1822 0 : TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1823 0 : if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1824 0 : continue;
1825 : } else {
1826 : /* found it */
1827 0 : TAILQ_INSERT_BEFORE(ctlOn, control, next);
1828 0 : inserted = 1;
1829 0 : break;
1830 : }
1831 : }
1832 0 : if (inserted == 0) {
1833 : /*
1834 : * must be put at end, use
1835 : * prevP (all setup from
1836 : * loop) to setup nextP.
1837 : */
1838 0 : TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1839 : }
1840 : }
1841 : } else {
1842 0 : sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1843 0 : if (*abort_flag) {
1844 0 : if (last_chunk) {
1845 0 : *m = NULL;
1846 : }
1847 0 : return (0);
1848 : }
1849 : }
1850 : }
1851 : } else {
1852 : /* Into the re-assembly queue */
1853 0 : sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1854 0 : if (*abort_flag) {
1855 : /*
1856 : * the assoc is now gone and chk was put onto the
1857 : * reasm queue, which has all been freed.
1858 : */
1859 0 : if (last_chunk) {
1860 0 : *m = NULL;
1861 : }
1862 0 : return (0);
1863 : }
1864 : }
1865 : finish_express_del:
1866 0 : if (tsn == (asoc->cumulative_tsn + 1)) {
1867 : /* Update cum-ack */
1868 0 : asoc->cumulative_tsn = tsn;
1869 : }
1870 0 : if (last_chunk) {
1871 0 : *m = NULL;
1872 : }
1873 0 : if (ordered) {
1874 0 : SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1875 : } else {
1876 0 : SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1877 : }
1878 0 : SCTP_STAT_INCR(sctps_recvdata);
1879 : /* Set it present please */
1880 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1881 0 : sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1882 : }
1883 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1884 0 : sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1885 : asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1886 : }
1887 : /* check the special flag for stream resets */
1888 0 : if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1889 0 : SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1890 : /*
1891 : * we have finished working through the backlogged TSN's now
1892 : * time to reset streams. 1: call reset function. 2: free
1893 : * pending_reply space 3: distribute any chunks in
1894 : * pending_reply_queue.
1895 : */
1896 : struct sctp_queued_to_read *ctl, *nctl;
1897 :
1898 0 : sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1899 0 : TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1900 0 : SCTP_FREE(liste, SCTP_M_STRESET);
1901 : /*sa_ignore FREED_MEMORY*/
1902 0 : liste = TAILQ_FIRST(&asoc->resetHead);
1903 0 : if (TAILQ_EMPTY(&asoc->resetHead)) {
1904 : /* All can be removed */
1905 0 : TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1906 0 : TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1907 0 : sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1908 0 : if (*abort_flag) {
1909 0 : return (0);
1910 : }
1911 : }
1912 : } else {
1913 0 : TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1914 0 : if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1915 : break;
1916 : }
1917 : /*
1918 : * if ctl->sinfo_tsn is <= liste->tsn we can
1919 : * process it which is the NOT of
1920 : * ctl->sinfo_tsn > liste->tsn
1921 : */
1922 0 : TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1923 0 : sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1924 0 : if (*abort_flag) {
1925 0 : return (0);
1926 : }
1927 : }
1928 : }
1929 : /*
1930 : * Now service re-assembly to pick up anything that has been
1931 : * held on reassembly queue?
1932 : */
1933 0 : sctp_deliver_reasm_check(stcb, asoc);
1934 0 : need_reasm_check = 0;
1935 : }
1936 :
1937 0 : if (need_reasm_check) {
1938 : /* Another one waits ? */
1939 0 : sctp_deliver_reasm_check(stcb, asoc);
1940 : }
1941 0 : return (1);
1942 : }
1943 :
1944 : int8_t sctp_map_lookup_tab[256] = {
1945 : 0, 1, 0, 2, 0, 1, 0, 3,
1946 : 0, 1, 0, 2, 0, 1, 0, 4,
1947 : 0, 1, 0, 2, 0, 1, 0, 3,
1948 : 0, 1, 0, 2, 0, 1, 0, 5,
1949 : 0, 1, 0, 2, 0, 1, 0, 3,
1950 : 0, 1, 0, 2, 0, 1, 0, 4,
1951 : 0, 1, 0, 2, 0, 1, 0, 3,
1952 : 0, 1, 0, 2, 0, 1, 0, 6,
1953 : 0, 1, 0, 2, 0, 1, 0, 3,
1954 : 0, 1, 0, 2, 0, 1, 0, 4,
1955 : 0, 1, 0, 2, 0, 1, 0, 3,
1956 : 0, 1, 0, 2, 0, 1, 0, 5,
1957 : 0, 1, 0, 2, 0, 1, 0, 3,
1958 : 0, 1, 0, 2, 0, 1, 0, 4,
1959 : 0, 1, 0, 2, 0, 1, 0, 3,
1960 : 0, 1, 0, 2, 0, 1, 0, 7,
1961 : 0, 1, 0, 2, 0, 1, 0, 3,
1962 : 0, 1, 0, 2, 0, 1, 0, 4,
1963 : 0, 1, 0, 2, 0, 1, 0, 3,
1964 : 0, 1, 0, 2, 0, 1, 0, 5,
1965 : 0, 1, 0, 2, 0, 1, 0, 3,
1966 : 0, 1, 0, 2, 0, 1, 0, 4,
1967 : 0, 1, 0, 2, 0, 1, 0, 3,
1968 : 0, 1, 0, 2, 0, 1, 0, 6,
1969 : 0, 1, 0, 2, 0, 1, 0, 3,
1970 : 0, 1, 0, 2, 0, 1, 0, 4,
1971 : 0, 1, 0, 2, 0, 1, 0, 3,
1972 : 0, 1, 0, 2, 0, 1, 0, 5,
1973 : 0, 1, 0, 2, 0, 1, 0, 3,
1974 : 0, 1, 0, 2, 0, 1, 0, 4,
1975 : 0, 1, 0, 2, 0, 1, 0, 3,
1976 : 0, 1, 0, 2, 0, 1, 0, 8
1977 : };
1978 :
1979 :
1980 : void
1981 0 : sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1982 : {
1983 : /*
1984 : * Now we also need to check the mapping array in a couple of ways.
1985 : * 1) Did we move the cum-ack point?
1986 : *
1987 : * When you first glance at this you might think
1988 : * that all entries that make up the postion
1989 : * of the cum-ack would be in the nr-mapping array
1990 : * only.. i.e. things up to the cum-ack are always
1991 : * deliverable. Thats true with one exception, when
1992 : * its a fragmented message we may not deliver the data
1993 : * until some threshold (or all of it) is in place. So
1994 : * we must OR the nr_mapping_array and mapping_array to
1995 : * get a true picture of the cum-ack.
1996 : */
1997 : struct sctp_association *asoc;
1998 : int at;
1999 : uint8_t val;
2000 : int slide_from, slide_end, lgap, distance;
2001 : uint32_t old_cumack, old_base, old_highest, highest_tsn;
2002 :
2003 0 : asoc = &stcb->asoc;
2004 :
2005 0 : old_cumack = asoc->cumulative_tsn;
2006 0 : old_base = asoc->mapping_array_base_tsn;
2007 0 : old_highest = asoc->highest_tsn_inside_map;
2008 : /*
2009 : * We could probably improve this a small bit by calculating the
2010 : * offset of the current cum-ack as the starting point.
2011 : */
2012 0 : at = 0;
2013 0 : for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2014 0 : val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2015 0 : if (val == 0xff) {
2016 0 : at += 8;
2017 : } else {
2018 : /* there is a 0 bit */
2019 0 : at += sctp_map_lookup_tab[val];
2020 0 : break;
2021 : }
2022 : }
2023 0 : asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2024 :
2025 0 : if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2026 0 : SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2027 : #ifdef INVARIANTS
2028 : panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2029 : asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2030 : #else
2031 0 : SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2032 : asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2033 0 : sctp_print_mapping_array(asoc);
2034 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2035 0 : sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2036 : }
2037 0 : asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2038 0 : asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2039 : #endif
2040 : }
2041 0 : if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2042 0 : highest_tsn = asoc->highest_tsn_inside_nr_map;
2043 : } else {
2044 0 : highest_tsn = asoc->highest_tsn_inside_map;
2045 : }
2046 0 : if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2047 : /* The complete array was completed by a single FR */
2048 : /* highest becomes the cum-ack */
2049 : int clr;
2050 : #ifdef INVARIANTS
2051 : unsigned int i;
2052 : #endif
2053 :
2054 : /* clear the array */
2055 0 : clr = ((at+7) >> 3);
2056 0 : if (clr > asoc->mapping_array_size) {
2057 0 : clr = asoc->mapping_array_size;
2058 : }
2059 0 : memset(asoc->mapping_array, 0, clr);
2060 0 : memset(asoc->nr_mapping_array, 0, clr);
2061 : #ifdef INVARIANTS
2062 : for (i = 0; i < asoc->mapping_array_size; i++) {
2063 : if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2064 : SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2065 : sctp_print_mapping_array(asoc);
2066 : }
2067 : }
2068 : #endif
2069 0 : asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2070 0 : asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2071 0 : } else if (at >= 8) {
2072 : /* we can slide the mapping array down */
2073 : /* slide_from holds where we hit the first NON 0xff byte */
2074 :
2075 : /*
2076 : * now calculate the ceiling of the move using our highest
2077 : * TSN value
2078 : */
2079 0 : SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2080 0 : slide_end = (lgap >> 3);
2081 0 : if (slide_end < slide_from) {
2082 0 : sctp_print_mapping_array(asoc);
2083 : #ifdef INVARIANTS
2084 : panic("impossible slide");
2085 : #else
2086 0 : SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2087 : lgap, slide_end, slide_from, at);
2088 0 : return;
2089 : #endif
2090 : }
2091 0 : if (slide_end > asoc->mapping_array_size) {
2092 : #ifdef INVARIANTS
2093 : panic("would overrun buffer");
2094 : #else
2095 0 : SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2096 : asoc->mapping_array_size, slide_end);
2097 0 : slide_end = asoc->mapping_array_size;
2098 : #endif
2099 : }
2100 0 : distance = (slide_end - slide_from) + 1;
2101 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2102 0 : sctp_log_map(old_base, old_cumack, old_highest,
2103 : SCTP_MAP_PREPARE_SLIDE);
2104 0 : sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2105 : (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2106 : }
2107 0 : if (distance + slide_from > asoc->mapping_array_size ||
2108 : distance < 0) {
2109 : /*
2110 : * Here we do NOT slide forward the array so that
2111 : * hopefully when more data comes in to fill it up
2112 : * we will be able to slide it forward. Really I
2113 : * don't think this should happen :-0
2114 : */
2115 :
2116 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2117 0 : sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2118 0 : (uint32_t) asoc->mapping_array_size,
2119 : SCTP_MAP_SLIDE_NONE);
2120 : }
2121 : } else {
2122 : int ii;
2123 :
2124 0 : for (ii = 0; ii < distance; ii++) {
2125 0 : asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2126 0 : asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2127 :
2128 : }
2129 0 : for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2130 0 : asoc->mapping_array[ii] = 0;
2131 0 : asoc->nr_mapping_array[ii] = 0;
2132 : }
2133 0 : if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2134 0 : asoc->highest_tsn_inside_map += (slide_from << 3);
2135 : }
2136 0 : if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2137 0 : asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2138 : }
2139 0 : asoc->mapping_array_base_tsn += (slide_from << 3);
2140 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2141 0 : sctp_log_map(asoc->mapping_array_base_tsn,
2142 : asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2143 : SCTP_MAP_SLIDE_RESULT);
2144 : }
2145 : }
2146 : }
2147 : }
2148 :
2149 : void
2150 0 : sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2151 : {
2152 : struct sctp_association *asoc;
2153 : uint32_t highest_tsn;
2154 :
2155 0 : asoc = &stcb->asoc;
2156 0 : if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2157 0 : highest_tsn = asoc->highest_tsn_inside_nr_map;
2158 : } else {
2159 0 : highest_tsn = asoc->highest_tsn_inside_map;
2160 : }
2161 :
2162 : /*
2163 : * Now we need to see if we need to queue a sack or just start the
2164 : * timer (if allowed).
2165 : */
2166 0 : if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2167 : /*
2168 : * Ok special case, in SHUTDOWN-SENT case. here we
2169 : * maker sure SACK timer is off and instead send a
2170 : * SHUTDOWN and a SACK
2171 : */
2172 0 : if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2173 0 : sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2174 : stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18);
2175 : }
2176 0 : sctp_send_shutdown(stcb,
2177 0 : ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2178 0 : sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2179 : } else {
2180 : int is_a_gap;
2181 :
2182 : /* is there a gap now ? */
2183 0 : is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2184 :
2185 : /*
2186 : * CMT DAC algorithm: increase number of packets
2187 : * received since last ack
2188 : */
2189 0 : stcb->asoc.cmt_dac_pkts_rcvd++;
2190 :
2191 0 : if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2192 0 : ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2193 : * longer is one */
2194 0 : (stcb->asoc.numduptsns) || /* we have dup's */
2195 0 : (is_a_gap) || /* is still a gap */
2196 0 : (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2197 0 : (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2198 : ) {
2199 :
2200 0 : if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2201 0 : (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2202 0 : (stcb->asoc.send_sack == 0) &&
2203 0 : (stcb->asoc.numduptsns == 0) &&
2204 0 : (stcb->asoc.delayed_ack) &&
2205 0 : (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2206 :
2207 : /*
2208 : * CMT DAC algorithm: With CMT,
2209 : * delay acks even in the face of
2210 :
2211 : * reordering. Therefore, if acks
2212 : * that do not have to be sent
2213 : * because of the above reasons,
2214 : * will be delayed. That is, acks
2215 : * that would have been sent due to
2216 : * gap reports will be delayed with
2217 : * DAC. Start the delayed ack timer.
2218 : */
2219 0 : sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2220 : stcb->sctp_ep, stcb, NULL);
2221 : } else {
2222 : /*
2223 : * Ok we must build a SACK since the
2224 : * timer is pending, we got our
2225 : * first packet OR there are gaps or
2226 : * duplicates.
2227 : */
2228 0 : (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2229 0 : sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2230 : }
2231 : } else {
2232 0 : if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2233 0 : sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2234 : stcb->sctp_ep, stcb, NULL);
2235 : }
2236 : }
2237 : }
2238 0 : }
2239 :
2240 : void
2241 0 : sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2242 : {
2243 : struct sctp_tmit_chunk *chk;
2244 : uint32_t tsize, pd_point;
2245 : uint16_t nxt_todel;
2246 :
2247 0 : if (asoc->fragmented_delivery_inprogress) {
2248 0 : sctp_service_reassembly(stcb, asoc);
2249 : }
2250 : /* Can we proceed further, i.e. the PD-API is complete */
2251 0 : if (asoc->fragmented_delivery_inprogress) {
2252 : /* no */
2253 0 : return;
2254 : }
2255 : /*
2256 : * Now is there some other chunk I can deliver from the reassembly
2257 : * queue.
2258 : */
2259 : doit_again:
2260 0 : chk = TAILQ_FIRST(&asoc->reasmqueue);
2261 0 : if (chk == NULL) {
2262 0 : asoc->size_on_reasm_queue = 0;
2263 0 : asoc->cnt_on_reasm_queue = 0;
2264 0 : return;
2265 : }
2266 0 : nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2267 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2268 0 : ((nxt_todel == chk->rec.data.stream_seq) ||
2269 0 : (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2270 : /*
2271 : * Yep the first one is here. We setup to start reception,
2272 : * by backing down the TSN just in case we can't deliver.
2273 : */
2274 :
2275 : /*
2276 : * Before we start though either all of the message should
2277 : * be here or the socket buffer max or nothing on the
2278 : * delivery queue and something can be delivered.
2279 : */
2280 0 : if (stcb->sctp_socket) {
2281 0 : pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2282 : stcb->sctp_ep->partial_delivery_point);
2283 : } else {
2284 0 : pd_point = stcb->sctp_ep->partial_delivery_point;
2285 : }
2286 0 : if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2287 0 : asoc->fragmented_delivery_inprogress = 1;
2288 0 : asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2289 0 : asoc->str_of_pdapi = chk->rec.data.stream_number;
2290 0 : asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2291 0 : asoc->pdapi_ppid = chk->rec.data.payloadtype;
2292 0 : asoc->fragment_flags = chk->rec.data.rcv_flags;
2293 0 : sctp_service_reassembly(stcb, asoc);
2294 0 : if (asoc->fragmented_delivery_inprogress == 0) {
2295 0 : goto doit_again;
2296 : }
2297 : }
2298 : }
2299 : }
2300 :
2301 : int
2302 0 : sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2303 : struct sockaddr *src, struct sockaddr *dst,
2304 : struct sctphdr *sh, struct sctp_inpcb *inp,
2305 : struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn,
2306 : #if defined(__FreeBSD__)
2307 : uint8_t mflowtype, uint32_t mflowid,
2308 : #endif
2309 : uint32_t vrf_id, uint16_t port)
2310 : {
2311 : struct sctp_data_chunk *ch, chunk_buf;
2312 : struct sctp_association *asoc;
2313 0 : int num_chunks = 0; /* number of control chunks processed */
2314 0 : int stop_proc = 0;
2315 : int chk_length, break_flag, last_chunk;
2316 0 : int abort_flag = 0, was_a_gap;
2317 : struct mbuf *m;
2318 : uint32_t highest_tsn;
2319 :
2320 : /* set the rwnd */
2321 0 : sctp_set_rwnd(stcb, &stcb->asoc);
2322 :
2323 0 : m = *mm;
2324 : SCTP_TCB_LOCK_ASSERT(stcb);
2325 0 : asoc = &stcb->asoc;
2326 0 : if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2327 0 : highest_tsn = asoc->highest_tsn_inside_nr_map;
2328 : } else {
2329 0 : highest_tsn = asoc->highest_tsn_inside_map;
2330 : }
2331 0 : was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2332 : /*
2333 : * setup where we got the last DATA packet from for any SACK that
2334 : * may need to go out. Don't bump the net. This is done ONLY when a
2335 : * chunk is assigned.
2336 : */
2337 0 : asoc->last_data_chunk_from = net;
2338 :
2339 : #ifndef __Panda__
2340 : /*-
2341 : * Now before we proceed we must figure out if this is a wasted
2342 : * cluster... i.e. it is a small packet sent in and yet the driver
2343 : * underneath allocated a full cluster for it. If so we must copy it
2344 : * to a smaller mbuf and free up the cluster mbuf. This will help
2345 : * with cluster starvation. Note for __Panda__ we don't do this
2346 : * since it has clusters all the way down to 64 bytes.
2347 : */
2348 0 : if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2349 : /* we only handle mbufs that are singletons.. not chains */
2350 0 : m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2351 0 : if (m) {
2352 : /* ok lets see if we can copy the data up */
2353 : caddr_t *from, *to;
2354 : /* get the pointers and copy */
2355 0 : to = mtod(m, caddr_t *);
2356 0 : from = mtod((*mm), caddr_t *);
2357 0 : memcpy(to, from, SCTP_BUF_LEN((*mm)));
2358 : /* copy the length and free up the old */
2359 0 : SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2360 0 : sctp_m_freem(*mm);
2361 : /* sucess, back copy */
2362 0 : *mm = m;
2363 : } else {
2364 : /* We are in trouble in the mbuf world .. yikes */
2365 0 : m = *mm;
2366 : }
2367 : }
2368 : #endif
2369 : /* get pointer to the first chunk header */
2370 0 : ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2371 : sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2372 0 : if (ch == NULL) {
2373 0 : return (1);
2374 : }
2375 : /*
2376 : * process all DATA chunks...
2377 : */
2378 0 : *high_tsn = asoc->cumulative_tsn;
2379 0 : break_flag = 0;
2380 0 : asoc->data_pkts_seen++;
2381 0 : while (stop_proc == 0) {
2382 : /* validate chunk length */
2383 0 : chk_length = ntohs(ch->ch.chunk_length);
2384 0 : if (length - *offset < chk_length) {
2385 : /* all done, mutulated chunk */
2386 0 : stop_proc = 1;
2387 0 : continue;
2388 : }
2389 0 : if (ch->ch.chunk_type == SCTP_DATA) {
2390 0 : if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2391 : /*
2392 : * Need to send an abort since we had a
2393 : * invalid data chunk.
2394 : */
2395 : struct mbuf *op_err;
2396 : char msg[SCTP_DIAG_INFO_LEN];
2397 :
2398 0 : snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2399 : chk_length);
2400 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2401 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
2402 0 : sctp_abort_association(inp, stcb, m, iphlen,
2403 : src, dst, sh, op_err,
2404 : #if defined(__FreeBSD__)
2405 : mflowtype, mflowid,
2406 : #endif
2407 : vrf_id, port);
2408 0 : return (2);
2409 : }
2410 0 : if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2411 : /*
2412 : * Need to send an abort since we had an
2413 : * empty data chunk.
2414 : */
2415 : struct mbuf *op_err;
2416 :
2417 0 : op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2418 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
2419 0 : sctp_abort_association(inp, stcb, m, iphlen,
2420 : src, dst, sh, op_err,
2421 : #if defined(__FreeBSD__)
2422 : mflowtype, mflowid,
2423 : #endif
2424 : vrf_id, port);
2425 0 : return (2);
2426 : }
2427 : #ifdef SCTP_AUDITING_ENABLED
2428 : sctp_audit_log(0xB1, 0);
2429 : #endif
2430 0 : if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2431 0 : last_chunk = 1;
2432 : } else {
2433 0 : last_chunk = 0;
2434 : }
2435 0 : if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2436 : chk_length, net, high_tsn, &abort_flag, &break_flag,
2437 : last_chunk)) {
2438 0 : num_chunks++;
2439 : }
2440 0 : if (abort_flag)
2441 0 : return (2);
2442 :
2443 0 : if (break_flag) {
2444 : /*
2445 : * Set because of out of rwnd space and no
2446 : * drop rep space left.
2447 : */
2448 0 : stop_proc = 1;
2449 0 : continue;
2450 : }
2451 : } else {
2452 : /* not a data chunk in the data region */
2453 0 : switch (ch->ch.chunk_type) {
2454 : case SCTP_INITIATION:
2455 : case SCTP_INITIATION_ACK:
2456 : case SCTP_SELECTIVE_ACK:
2457 : case SCTP_NR_SELECTIVE_ACK:
2458 : case SCTP_HEARTBEAT_REQUEST:
2459 : case SCTP_HEARTBEAT_ACK:
2460 : case SCTP_ABORT_ASSOCIATION:
2461 : case SCTP_SHUTDOWN:
2462 : case SCTP_SHUTDOWN_ACK:
2463 : case SCTP_OPERATION_ERROR:
2464 : case SCTP_COOKIE_ECHO:
2465 : case SCTP_COOKIE_ACK:
2466 : case SCTP_ECN_ECHO:
2467 : case SCTP_ECN_CWR:
2468 : case SCTP_SHUTDOWN_COMPLETE:
2469 : case SCTP_AUTHENTICATION:
2470 : case SCTP_ASCONF_ACK:
2471 : case SCTP_PACKET_DROPPED:
2472 : case SCTP_STREAM_RESET:
2473 : case SCTP_FORWARD_CUM_TSN:
2474 : case SCTP_ASCONF:
2475 : /*
2476 : * Now, what do we do with KNOWN chunks that
2477 : * are NOT in the right place?
2478 : *
2479 : * For now, I do nothing but ignore them. We
2480 : * may later want to add sysctl stuff to
2481 : * switch out and do either an ABORT() or
2482 : * possibly process them.
2483 : */
2484 0 : if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2485 : struct mbuf *op_err;
2486 :
2487 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2488 0 : sctp_abort_association(inp, stcb,
2489 : m, iphlen,
2490 : src, dst,
2491 : sh, op_err,
2492 : #if defined(__FreeBSD__)
2493 : mflowtype, mflowid,
2494 : #endif
2495 : vrf_id, port);
2496 0 : return (2);
2497 : }
2498 0 : break;
2499 : default:
2500 : /* unknown chunk type, use bit rules */
2501 0 : if (ch->ch.chunk_type & 0x40) {
2502 : /* Add a error report to the queue */
2503 : struct mbuf *merr;
2504 : struct sctp_paramhdr *phd;
2505 :
2506 0 : merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2507 0 : if (merr) {
2508 0 : phd = mtod(merr, struct sctp_paramhdr *);
2509 : /*
2510 : * We cheat and use param
2511 : * type since we did not
2512 : * bother to define a error
2513 : * cause struct. They are
2514 : * the same basic format
2515 : * with different names.
2516 : */
2517 0 : phd->param_type =
2518 0 : htons(SCTP_CAUSE_UNRECOG_CHUNK);
2519 0 : phd->param_length =
2520 0 : htons(chk_length + sizeof(*phd));
2521 0 : SCTP_BUF_LEN(merr) = sizeof(*phd);
2522 0 : SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2523 0 : if (SCTP_BUF_NEXT(merr)) {
2524 0 : if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2525 0 : sctp_m_freem(merr);
2526 : } else {
2527 0 : sctp_queue_op_err(stcb, merr);
2528 : }
2529 : } else {
2530 0 : sctp_m_freem(merr);
2531 : }
2532 : }
2533 : }
2534 0 : if ((ch->ch.chunk_type & 0x80) == 0) {
2535 : /* discard the rest of this packet */
2536 0 : stop_proc = 1;
2537 : } /* else skip this bad chunk and
2538 : * continue... */
2539 0 : break;
2540 : } /* switch of chunk type */
2541 : }
2542 0 : *offset += SCTP_SIZE32(chk_length);
2543 0 : if ((*offset >= length) || stop_proc) {
2544 : /* no more data left in the mbuf chain */
2545 0 : stop_proc = 1;
2546 0 : continue;
2547 : }
2548 0 : ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2549 : sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2550 0 : if (ch == NULL) {
2551 0 : *offset = length;
2552 0 : stop_proc = 1;
2553 0 : continue;
2554 : }
2555 : }
2556 0 : if (break_flag) {
2557 : /*
2558 : * we need to report rwnd overrun drops.
2559 : */
2560 0 : sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2561 : }
2562 0 : if (num_chunks) {
2563 : /*
2564 : * Did we get data, if so update the time for auto-close and
2565 : * give peer credit for being alive.
2566 : */
2567 0 : SCTP_STAT_INCR(sctps_recvpktwithdata);
2568 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2569 0 : sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2570 : stcb->asoc.overall_error_count,
2571 : 0,
2572 : SCTP_FROM_SCTP_INDATA,
2573 : __LINE__);
2574 : }
2575 0 : stcb->asoc.overall_error_count = 0;
2576 0 : (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2577 : }
2578 : /* now service all of the reassm queue if needed */
2579 0 : if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2580 0 : sctp_service_queues(stcb, asoc);
2581 :
2582 0 : if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2583 : /* Assure that we ack right away */
2584 0 : stcb->asoc.send_sack = 1;
2585 : }
2586 : /* Start a sack timer or QUEUE a SACK for sending */
2587 0 : sctp_sack_check(stcb, was_a_gap);
2588 0 : return (0);
2589 : }
2590 :
2591 : static int
2592 0 : sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2593 : uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2594 : int *num_frs,
2595 : uint32_t *biggest_newly_acked_tsn,
2596 : uint32_t *this_sack_lowest_newack,
2597 : int *rto_ok)
2598 : {
2599 : struct sctp_tmit_chunk *tp1;
2600 : unsigned int theTSN;
2601 0 : int j, wake_him = 0, circled = 0;
2602 :
2603 : /* Recover the tp1 we last saw */
2604 0 : tp1 = *p_tp1;
2605 0 : if (tp1 == NULL) {
2606 0 : tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2607 : }
2608 0 : for (j = frag_strt; j <= frag_end; j++) {
2609 0 : theTSN = j + last_tsn;
2610 0 : while (tp1) {
2611 0 : if (tp1->rec.data.doing_fast_retransmit)
2612 0 : (*num_frs) += 1;
2613 :
2614 : /*-
2615 : * CMT: CUCv2 algorithm. For each TSN being
2616 : * processed from the sent queue, track the
2617 : * next expected pseudo-cumack, or
2618 : * rtx_pseudo_cumack, if required. Separate
2619 : * cumack trackers for first transmissions,
2620 : * and retransmissions.
2621 : */
2622 0 : if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2623 0 : (tp1->whoTo->find_pseudo_cumack == 1) &&
2624 0 : (tp1->snd_count == 1)) {
2625 0 : tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2626 0 : tp1->whoTo->find_pseudo_cumack = 0;
2627 : }
2628 0 : if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2629 0 : (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2630 0 : (tp1->snd_count > 1)) {
2631 0 : tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2632 0 : tp1->whoTo->find_rtx_pseudo_cumack = 0;
2633 : }
2634 0 : if (tp1->rec.data.TSN_seq == theTSN) {
2635 0 : if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2636 : /*-
2637 : * must be held until
2638 : * cum-ack passes
2639 : */
2640 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2641 : /*-
2642 : * If it is less than RESEND, it is
2643 : * now no-longer in flight.
2644 : * Higher values may already be set
2645 : * via previous Gap Ack Blocks...
2646 : * i.e. ACKED or RESEND.
2647 : */
2648 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2649 : *biggest_newly_acked_tsn)) {
2650 0 : *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2651 : }
2652 : /*-
2653 : * CMT: SFR algo (and HTNA) - set
2654 : * saw_newack to 1 for dest being
2655 : * newly acked. update
2656 : * this_sack_highest_newack if
2657 : * appropriate.
2658 : */
2659 0 : if (tp1->rec.data.chunk_was_revoked == 0)
2660 0 : tp1->whoTo->saw_newack = 1;
2661 :
2662 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2663 : tp1->whoTo->this_sack_highest_newack)) {
2664 0 : tp1->whoTo->this_sack_highest_newack =
2665 0 : tp1->rec.data.TSN_seq;
2666 : }
2667 : /*-
2668 : * CMT DAC algo: also update
2669 : * this_sack_lowest_newack
2670 : */
2671 0 : if (*this_sack_lowest_newack == 0) {
2672 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2673 0 : sctp_log_sack(*this_sack_lowest_newack,
2674 : last_tsn,
2675 : tp1->rec.data.TSN_seq,
2676 : 0,
2677 : 0,
2678 : SCTP_LOG_TSN_ACKED);
2679 : }
2680 0 : *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2681 : }
2682 : /*-
2683 : * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2684 : * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2685 : * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2686 : * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2687 : * Separate pseudo_cumack trackers for first transmissions and
2688 : * retransmissions.
2689 : */
2690 0 : if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2691 0 : if (tp1->rec.data.chunk_was_revoked == 0) {
2692 0 : tp1->whoTo->new_pseudo_cumack = 1;
2693 : }
2694 0 : tp1->whoTo->find_pseudo_cumack = 1;
2695 : }
2696 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2697 0 : sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2698 : }
2699 0 : if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2700 0 : if (tp1->rec.data.chunk_was_revoked == 0) {
2701 0 : tp1->whoTo->new_pseudo_cumack = 1;
2702 : }
2703 0 : tp1->whoTo->find_rtx_pseudo_cumack = 1;
2704 : }
2705 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2706 0 : sctp_log_sack(*biggest_newly_acked_tsn,
2707 : last_tsn,
2708 : tp1->rec.data.TSN_seq,
2709 : frag_strt,
2710 : frag_end,
2711 : SCTP_LOG_TSN_ACKED);
2712 : }
2713 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2714 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2715 0 : tp1->whoTo->flight_size,
2716 0 : tp1->book_size,
2717 0 : (uintptr_t)tp1->whoTo,
2718 : tp1->rec.data.TSN_seq);
2719 : }
2720 0 : sctp_flight_size_decrease(tp1);
2721 0 : if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2722 0 : (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
2723 : tp1);
2724 : }
2725 0 : sctp_total_flight_decrease(stcb, tp1);
2726 :
2727 0 : tp1->whoTo->net_ack += tp1->send_size;
2728 0 : if (tp1->snd_count < 2) {
2729 : /*-
2730 : * True non-retransmited chunk
2731 : */
2732 0 : tp1->whoTo->net_ack2 += tp1->send_size;
2733 :
2734 : /*-
2735 : * update RTO too ?
2736 : */
2737 0 : if (tp1->do_rtt) {
2738 0 : if (*rto_ok) {
2739 0 : tp1->whoTo->RTO =
2740 0 : sctp_calculate_rto(stcb,
2741 : &stcb->asoc,
2742 : tp1->whoTo,
2743 : &tp1->sent_rcv_time,
2744 : sctp_align_safe_nocopy,
2745 : SCTP_RTT_FROM_DATA);
2746 0 : *rto_ok = 0;
2747 : }
2748 0 : if (tp1->whoTo->rto_needed == 0) {
2749 0 : tp1->whoTo->rto_needed = 1;
2750 : }
2751 0 : tp1->do_rtt = 0;
2752 : }
2753 : }
2754 :
2755 : }
2756 0 : if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2757 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2758 : stcb->asoc.this_sack_highest_gap)) {
2759 0 : stcb->asoc.this_sack_highest_gap =
2760 0 : tp1->rec.data.TSN_seq;
2761 : }
2762 0 : if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2763 0 : sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2764 : #ifdef SCTP_AUDITING_ENABLED
2765 : sctp_audit_log(0xB2,
2766 : (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2767 : #endif
2768 : }
2769 : }
2770 : /*-
2771 : * All chunks NOT UNSENT fall through here and are marked
2772 : * (leave PR-SCTP ones that are to skip alone though)
2773 : */
2774 0 : if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2775 0 : (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2776 0 : tp1->sent = SCTP_DATAGRAM_MARKED;
2777 : }
2778 0 : if (tp1->rec.data.chunk_was_revoked) {
2779 : /* deflate the cwnd */
2780 0 : tp1->whoTo->cwnd -= tp1->book_size;
2781 0 : tp1->rec.data.chunk_was_revoked = 0;
2782 : }
2783 : /* NR Sack code here */
2784 0 : if (nr_sacking &&
2785 0 : (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2786 0 : if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2787 0 : stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2788 : #ifdef INVARIANTS
2789 : } else {
2790 : panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2791 : #endif
2792 : }
2793 0 : tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2794 0 : if (tp1->data) {
2795 : /* sa_ignore NO_NULL_CHK */
2796 0 : sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2797 0 : sctp_m_freem(tp1->data);
2798 0 : tp1->data = NULL;
2799 : }
2800 0 : wake_him++;
2801 : }
2802 : }
2803 0 : break;
2804 : } /* if (tp1->TSN_seq == theTSN) */
2805 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2806 : break;
2807 : }
2808 0 : tp1 = TAILQ_NEXT(tp1, sctp_next);
2809 0 : if ((tp1 == NULL) && (circled == 0)) {
2810 0 : circled++;
2811 0 : tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2812 : }
2813 : } /* end while (tp1) */
2814 0 : if (tp1 == NULL) {
2815 0 : circled = 0;
2816 0 : tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2817 : }
2818 : /* In case the fragments were not in order we must reset */
2819 : } /* end for (j = fragStart */
2820 0 : *p_tp1 = tp1;
2821 0 : return (wake_him); /* Return value only used for nr-sack */
2822 : }
2823 :
2824 :
2825 : static int
2826 0 : sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2827 : uint32_t last_tsn, uint32_t *biggest_tsn_acked,
2828 : uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
2829 : int num_seg, int num_nr_seg, int *rto_ok)
2830 : {
2831 : struct sctp_gap_ack_block *frag, block;
2832 : struct sctp_tmit_chunk *tp1;
2833 : int i;
2834 0 : int num_frs = 0;
2835 : int chunk_freed;
2836 : int non_revocable;
2837 : uint16_t frag_strt, frag_end, prev_frag_end;
2838 :
2839 0 : tp1 = TAILQ_FIRST(&asoc->sent_queue);
2840 0 : prev_frag_end = 0;
2841 0 : chunk_freed = 0;
2842 :
2843 0 : for (i = 0; i < (num_seg + num_nr_seg); i++) {
2844 0 : if (i == num_seg) {
2845 0 : prev_frag_end = 0;
2846 0 : tp1 = TAILQ_FIRST(&asoc->sent_queue);
2847 : }
2848 0 : frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2849 : sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
2850 0 : *offset += sizeof(block);
2851 0 : if (frag == NULL) {
2852 0 : return (chunk_freed);
2853 : }
2854 0 : frag_strt = ntohs(frag->start);
2855 0 : frag_end = ntohs(frag->end);
2856 :
2857 0 : if (frag_strt > frag_end) {
2858 : /* This gap report is malformed, skip it. */
2859 0 : continue;
2860 : }
2861 0 : if (frag_strt <= prev_frag_end) {
2862 : /* This gap report is not in order, so restart. */
2863 0 : tp1 = TAILQ_FIRST(&asoc->sent_queue);
2864 : }
2865 0 : if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2866 0 : *biggest_tsn_acked = last_tsn + frag_end;
2867 : }
2868 0 : if (i < num_seg) {
2869 0 : non_revocable = 0;
2870 : } else {
2871 0 : non_revocable = 1;
2872 : }
2873 0 : if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2874 : non_revocable, &num_frs, biggest_newly_acked_tsn,
2875 : this_sack_lowest_newack, rto_ok)) {
2876 0 : chunk_freed = 1;
2877 : }
2878 0 : prev_frag_end = frag_end;
2879 : }
2880 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2881 0 : if (num_frs)
2882 0 : sctp_log_fr(*biggest_tsn_acked,
2883 : *biggest_newly_acked_tsn,
2884 : last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2885 : }
2886 0 : return (chunk_freed);
2887 : }
2888 :
2889 : static void
2890 0 : sctp_check_for_revoked(struct sctp_tcb *stcb,
2891 : struct sctp_association *asoc, uint32_t cumack,
2892 : uint32_t biggest_tsn_acked)
2893 : {
2894 : struct sctp_tmit_chunk *tp1;
2895 :
2896 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2897 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2898 : /*
2899 : * ok this guy is either ACK or MARKED. If it is
2900 : * ACKED it has been previously acked but not this
2901 : * time i.e. revoked. If it is MARKED it was ACK'ed
2902 : * again.
2903 : */
2904 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2905 : break;
2906 : }
2907 0 : if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2908 : /* it has been revoked */
2909 0 : tp1->sent = SCTP_DATAGRAM_SENT;
2910 0 : tp1->rec.data.chunk_was_revoked = 1;
2911 : /* We must add this stuff back in to
2912 : * assure timers and such get started.
2913 : */
2914 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2915 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2916 0 : tp1->whoTo->flight_size,
2917 0 : tp1->book_size,
2918 0 : (uintptr_t)tp1->whoTo,
2919 : tp1->rec.data.TSN_seq);
2920 : }
2921 0 : sctp_flight_size_increase(tp1);
2922 0 : sctp_total_flight_increase(stcb, tp1);
2923 : /* We inflate the cwnd to compensate for our
2924 : * artificial inflation of the flight_size.
2925 : */
2926 0 : tp1->whoTo->cwnd += tp1->book_size;
2927 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2928 0 : sctp_log_sack(asoc->last_acked_seq,
2929 : cumack,
2930 : tp1->rec.data.TSN_seq,
2931 : 0,
2932 : 0,
2933 : SCTP_LOG_TSN_REVOKED);
2934 : }
2935 0 : } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2936 : /* it has been re-acked in this SACK */
2937 0 : tp1->sent = SCTP_DATAGRAM_ACKED;
2938 : }
2939 : }
2940 0 : if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2941 0 : break;
2942 : }
2943 0 : }
2944 :
2945 :
2946 : static void
2947 0 : sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2948 : uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2949 : {
2950 : struct sctp_tmit_chunk *tp1;
2951 0 : int strike_flag = 0;
2952 : struct timeval now;
2953 0 : int tot_retrans = 0;
2954 : uint32_t sending_seq;
2955 : struct sctp_nets *net;
2956 0 : int num_dests_sacked = 0;
2957 :
2958 : /*
2959 : * select the sending_seq, this is either the next thing ready to be
2960 : * sent but not transmitted, OR, the next seq we assign.
2961 : */
2962 0 : tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2963 0 : if (tp1 == NULL) {
2964 0 : sending_seq = asoc->sending_seq;
2965 : } else {
2966 0 : sending_seq = tp1->rec.data.TSN_seq;
2967 : }
2968 :
2969 : /* CMT DAC algo: finding out if SACK is a mixed SACK */
2970 0 : if ((asoc->sctp_cmt_on_off > 0) &&
2971 0 : SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2972 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2973 0 : if (net->saw_newack)
2974 0 : num_dests_sacked++;
2975 : }
2976 : }
2977 0 : if (stcb->asoc.prsctp_supported) {
2978 0 : (void)SCTP_GETTIME_TIMEVAL(&now);
2979 : }
2980 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2981 0 : strike_flag = 0;
2982 0 : if (tp1->no_fr_allowed) {
2983 : /* this one had a timeout or something */
2984 0 : continue;
2985 : }
2986 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2987 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND)
2988 0 : sctp_log_fr(biggest_tsn_newly_acked,
2989 : tp1->rec.data.TSN_seq,
2990 0 : tp1->sent,
2991 : SCTP_FR_LOG_CHECK_STRIKE);
2992 : }
2993 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2994 0 : tp1->sent == SCTP_DATAGRAM_UNSENT) {
2995 : /* done */
2996 : break;
2997 : }
2998 0 : if (stcb->asoc.prsctp_supported) {
2999 0 : if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3000 : /* Is it expired? */
3001 : #ifndef __FreeBSD__
3002 0 : if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3003 : #else
3004 : if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3005 : #endif
3006 : /* Yes so drop it */
3007 0 : if (tp1->data != NULL) {
3008 0 : (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3009 : SCTP_SO_NOT_LOCKED);
3010 : }
3011 0 : continue;
3012 : }
3013 : }
3014 :
3015 : }
3016 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3017 : /* we are beyond the tsn in the sack */
3018 : break;
3019 : }
3020 0 : if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3021 : /* either a RESEND, ACKED, or MARKED */
3022 : /* skip */
3023 0 : if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3024 : /* Continue strikin FWD-TSN chunks */
3025 0 : tp1->rec.data.fwd_tsn_cnt++;
3026 : }
3027 0 : continue;
3028 : }
3029 : /*
3030 : * CMT : SFR algo (covers part of DAC and HTNA as well)
3031 : */
3032 0 : if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3033 : /*
3034 : * No new acks were receieved for data sent to this
3035 : * dest. Therefore, according to the SFR algo for
3036 : * CMT, no data sent to this dest can be marked for
3037 : * FR using this SACK.
3038 : */
3039 0 : continue;
3040 0 : } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3041 : tp1->whoTo->this_sack_highest_newack)) {
3042 : /*
3043 : * CMT: New acks were receieved for data sent to
3044 : * this dest. But no new acks were seen for data
3045 : * sent after tp1. Therefore, according to the SFR
3046 : * algo for CMT, tp1 cannot be marked for FR using
3047 : * this SACK. This step covers part of the DAC algo
3048 : * and the HTNA algo as well.
3049 : */
3050 0 : continue;
3051 : }
3052 : /*
3053 : * Here we check to see if we were have already done a FR
3054 : * and if so we see if the biggest TSN we saw in the sack is
3055 : * smaller than the recovery point. If so we don't strike
3056 : * the tsn... otherwise we CAN strike the TSN.
3057 : */
3058 : /*
3059 : * @@@ JRI: Check for CMT
3060 : * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3061 : */
3062 0 : if (accum_moved && asoc->fast_retran_loss_recovery) {
3063 : /*
3064 : * Strike the TSN if in fast-recovery and cum-ack
3065 : * moved.
3066 : */
3067 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3068 0 : sctp_log_fr(biggest_tsn_newly_acked,
3069 : tp1->rec.data.TSN_seq,
3070 0 : tp1->sent,
3071 : SCTP_FR_LOG_STRIKE_CHUNK);
3072 : }
3073 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3074 0 : tp1->sent++;
3075 : }
3076 0 : if ((asoc->sctp_cmt_on_off > 0) &&
3077 0 : SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3078 : /*
3079 : * CMT DAC algorithm: If SACK flag is set to
3080 : * 0, then lowest_newack test will not pass
3081 : * because it would have been set to the
3082 : * cumack earlier. If not already to be
3083 : * rtx'd, If not a mixed sack and if tp1 is
3084 : * not between two sacked TSNs, then mark by
3085 : * one more.
3086 : * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3087 : * two packets have been received after this missing TSN.
3088 : */
3089 0 : if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3090 0 : SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3091 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3092 0 : sctp_log_fr(16 + num_dests_sacked,
3093 : tp1->rec.data.TSN_seq,
3094 0 : tp1->sent,
3095 : SCTP_FR_LOG_STRIKE_CHUNK);
3096 : }
3097 0 : tp1->sent++;
3098 : }
3099 : }
3100 0 : } else if ((tp1->rec.data.doing_fast_retransmit) &&
3101 0 : (asoc->sctp_cmt_on_off == 0)) {
3102 : /*
3103 : * For those that have done a FR we must take
3104 : * special consideration if we strike. I.e the
3105 : * biggest_newly_acked must be higher than the
3106 : * sending_seq at the time we did the FR.
3107 : */
3108 0 : if (
3109 : #ifdef SCTP_FR_TO_ALTERNATE
3110 : /*
3111 : * If FR's go to new networks, then we must only do
3112 : * this for singly homed asoc's. However if the FR's
3113 : * go to the same network (Armando's work) then its
3114 : * ok to FR multiple times.
3115 : */
3116 : (asoc->numnets < 2)
3117 : #else
3118 : (1)
3119 : #endif
3120 : ) {
3121 :
3122 0 : if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3123 : tp1->rec.data.fast_retran_tsn)) {
3124 : /*
3125 : * Strike the TSN, since this ack is
3126 : * beyond where things were when we
3127 : * did a FR.
3128 : */
3129 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3130 0 : sctp_log_fr(biggest_tsn_newly_acked,
3131 : tp1->rec.data.TSN_seq,
3132 0 : tp1->sent,
3133 : SCTP_FR_LOG_STRIKE_CHUNK);
3134 : }
3135 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3136 0 : tp1->sent++;
3137 : }
3138 0 : strike_flag = 1;
3139 0 : if ((asoc->sctp_cmt_on_off > 0) &&
3140 0 : SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3141 : /*
3142 : * CMT DAC algorithm: If
3143 : * SACK flag is set to 0,
3144 : * then lowest_newack test
3145 : * will not pass because it
3146 : * would have been set to
3147 : * the cumack earlier. If
3148 : * not already to be rtx'd,
3149 : * If not a mixed sack and
3150 : * if tp1 is not between two
3151 : * sacked TSNs, then mark by
3152 : * one more.
3153 : * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3154 : * two packets have been received after this missing TSN.
3155 : */
3156 0 : if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3157 0 : (num_dests_sacked == 1) &&
3158 0 : SCTP_TSN_GT(this_sack_lowest_newack,
3159 : tp1->rec.data.TSN_seq)) {
3160 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3161 0 : sctp_log_fr(32 + num_dests_sacked,
3162 : tp1->rec.data.TSN_seq,
3163 0 : tp1->sent,
3164 : SCTP_FR_LOG_STRIKE_CHUNK);
3165 : }
3166 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3167 0 : tp1->sent++;
3168 : }
3169 : }
3170 : }
3171 : }
3172 : }
3173 : /*
3174 : * JRI: TODO: remove code for HTNA algo. CMT's
3175 : * SFR algo covers HTNA.
3176 : */
3177 0 : } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3178 : biggest_tsn_newly_acked)) {
3179 : /*
3180 : * We don't strike these: This is the HTNA
3181 : * algorithm i.e. we don't strike If our TSN is
3182 : * larger than the Highest TSN Newly Acked.
3183 : */
3184 : ;
3185 : } else {
3186 : /* Strike the TSN */
3187 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3188 0 : sctp_log_fr(biggest_tsn_newly_acked,
3189 : tp1->rec.data.TSN_seq,
3190 0 : tp1->sent,
3191 : SCTP_FR_LOG_STRIKE_CHUNK);
3192 : }
3193 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3194 0 : tp1->sent++;
3195 : }
3196 0 : if ((asoc->sctp_cmt_on_off > 0) &&
3197 0 : SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3198 : /*
3199 : * CMT DAC algorithm: If SACK flag is set to
3200 : * 0, then lowest_newack test will not pass
3201 : * because it would have been set to the
3202 : * cumack earlier. If not already to be
3203 : * rtx'd, If not a mixed sack and if tp1 is
3204 : * not between two sacked TSNs, then mark by
3205 : * one more.
3206 : * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3207 : * two packets have been received after this missing TSN.
3208 : */
3209 0 : if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3210 0 : SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3211 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3212 0 : sctp_log_fr(48 + num_dests_sacked,
3213 : tp1->rec.data.TSN_seq,
3214 0 : tp1->sent,
3215 : SCTP_FR_LOG_STRIKE_CHUNK);
3216 : }
3217 0 : tp1->sent++;
3218 : }
3219 : }
3220 : }
3221 0 : if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3222 : struct sctp_nets *alt;
3223 :
3224 : /* fix counts and things */
3225 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3226 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3227 0 : (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3228 0 : tp1->book_size,
3229 0 : (uintptr_t)tp1->whoTo,
3230 : tp1->rec.data.TSN_seq);
3231 : }
3232 0 : if (tp1->whoTo) {
3233 0 : tp1->whoTo->net_ack++;
3234 0 : sctp_flight_size_decrease(tp1);
3235 0 : if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3236 0 : (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3237 : tp1);
3238 : }
3239 : }
3240 :
3241 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3242 0 : sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3243 0 : asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3244 : }
3245 : /* add back to the rwnd */
3246 0 : asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3247 :
3248 : /* remove from the total flight */
3249 0 : sctp_total_flight_decrease(stcb, tp1);
3250 :
3251 0 : if ((stcb->asoc.prsctp_supported) &&
3252 0 : (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3253 : /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3254 0 : if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3255 : /* Yes, so drop it */
3256 0 : if (tp1->data != NULL) {
3257 0 : (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3258 : SCTP_SO_NOT_LOCKED);
3259 : }
3260 : /* Make sure to flag we had a FR */
3261 0 : tp1->whoTo->net_ack++;
3262 0 : continue;
3263 : }
3264 : }
3265 : /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3266 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3267 0 : sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3268 : 0, SCTP_FR_MARKED);
3269 : }
3270 0 : if (strike_flag) {
3271 : /* This is a subsequent FR */
3272 0 : SCTP_STAT_INCR(sctps_sendmultfastretrans);
3273 : }
3274 0 : sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3275 0 : if (asoc->sctp_cmt_on_off > 0) {
3276 : /*
3277 : * CMT: Using RTX_SSTHRESH policy for CMT.
3278 : * If CMT is being used, then pick dest with
3279 : * largest ssthresh for any retransmission.
3280 : */
3281 0 : tp1->no_fr_allowed = 1;
3282 0 : alt = tp1->whoTo;
3283 : /*sa_ignore NO_NULL_CHK*/
3284 0 : if (asoc->sctp_cmt_pf > 0) {
3285 : /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3286 0 : alt = sctp_find_alternate_net(stcb, alt, 2);
3287 : } else {
3288 : /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3289 : /*sa_ignore NO_NULL_CHK*/
3290 0 : alt = sctp_find_alternate_net(stcb, alt, 1);
3291 : }
3292 0 : if (alt == NULL) {
3293 0 : alt = tp1->whoTo;
3294 : }
3295 : /*
3296 : * CUCv2: If a different dest is picked for
3297 : * the retransmission, then new
3298 : * (rtx-)pseudo_cumack needs to be tracked
3299 : * for orig dest. Let CUCv2 track new (rtx-)
3300 : * pseudo-cumack always.
3301 : */
3302 0 : if (tp1->whoTo) {
3303 0 : tp1->whoTo->find_pseudo_cumack = 1;
3304 0 : tp1->whoTo->find_rtx_pseudo_cumack = 1;
3305 : }
3306 :
3307 : } else {/* CMT is OFF */
3308 :
3309 : #ifdef SCTP_FR_TO_ALTERNATE
3310 : /* Can we find an alternate? */
3311 : alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3312 : #else
3313 : /*
3314 : * default behavior is to NOT retransmit
3315 : * FR's to an alternate. Armando Caro's
3316 : * paper details why.
3317 : */
3318 0 : alt = tp1->whoTo;
3319 : #endif
3320 : }
3321 :
3322 0 : tp1->rec.data.doing_fast_retransmit = 1;
3323 0 : tot_retrans++;
3324 : /* mark the sending seq for possible subsequent FR's */
3325 : /*
3326 : * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3327 : * (uint32_t)tpi->rec.data.TSN_seq);
3328 : */
3329 0 : if (TAILQ_EMPTY(&asoc->send_queue)) {
3330 : /*
3331 : * If the queue of send is empty then its
3332 : * the next sequence number that will be
3333 : * assigned so we subtract one from this to
3334 : * get the one we last sent.
3335 : */
3336 0 : tp1->rec.data.fast_retran_tsn = sending_seq;
3337 : } else {
3338 : /*
3339 : * If there are chunks on the send queue
3340 : * (unsent data that has made it from the
3341 : * stream queues but not out the door, we
3342 : * take the first one (which will have the
3343 : * lowest TSN) and subtract one to get the
3344 : * one we last sent.
3345 : */
3346 : struct sctp_tmit_chunk *ttt;
3347 :
3348 0 : ttt = TAILQ_FIRST(&asoc->send_queue);
3349 0 : tp1->rec.data.fast_retran_tsn =
3350 0 : ttt->rec.data.TSN_seq;
3351 : }
3352 :
3353 0 : if (tp1->do_rtt) {
3354 : /*
3355 : * this guy had a RTO calculation pending on
3356 : * it, cancel it
3357 : */
3358 0 : if ((tp1->whoTo != NULL) &&
3359 0 : (tp1->whoTo->rto_needed == 0)) {
3360 0 : tp1->whoTo->rto_needed = 1;
3361 : }
3362 0 : tp1->do_rtt = 0;
3363 : }
3364 0 : if (alt != tp1->whoTo) {
3365 : /* yes, there is an alternate. */
3366 0 : sctp_free_remote_addr(tp1->whoTo);
3367 : /*sa_ignore FREED_MEMORY*/
3368 0 : tp1->whoTo = alt;
3369 0 : atomic_add_int(&alt->ref_count, 1);
3370 : }
3371 : }
3372 : }
3373 0 : }
3374 :
3375 : struct sctp_tmit_chunk *
3376 0 : sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3377 : struct sctp_association *asoc)
3378 : {
3379 0 : struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3380 : struct timeval now;
3381 0 : int now_filled = 0;
3382 :
3383 0 : if (asoc->prsctp_supported == 0) {
3384 0 : return (NULL);
3385 : }
3386 0 : TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3387 0 : if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3388 0 : tp1->sent != SCTP_DATAGRAM_RESEND &&
3389 0 : tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3390 : /* no chance to advance, out of here */
3391 0 : break;
3392 : }
3393 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3394 0 : if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3395 0 : (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3396 0 : sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3397 : asoc->advanced_peer_ack_point,
3398 : tp1->rec.data.TSN_seq, 0, 0);
3399 : }
3400 : }
3401 0 : if (!PR_SCTP_ENABLED(tp1->flags)) {
3402 : /*
3403 : * We can't fwd-tsn past any that are reliable aka
3404 : * retransmitted until the asoc fails.
3405 : */
3406 : break;
3407 : }
3408 0 : if (!now_filled) {
3409 0 : (void)SCTP_GETTIME_TIMEVAL(&now);
3410 0 : now_filled = 1;
3411 : }
3412 : /*
3413 : * now we got a chunk which is marked for another
3414 : * retransmission to a PR-stream but has run out its chances
3415 : * already maybe OR has been marked to skip now. Can we skip
3416 : * it if its a resend?
3417 : */
3418 0 : if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3419 0 : (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3420 : /*
3421 : * Now is this one marked for resend and its time is
3422 : * now up?
3423 : */
3424 : #ifndef __FreeBSD__
3425 0 : if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3426 : #else
3427 : if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3428 : #endif
3429 : /* Yes so drop it */
3430 0 : if (tp1->data) {
3431 0 : (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3432 : 1, SCTP_SO_NOT_LOCKED);
3433 : }
3434 : } else {
3435 : /*
3436 : * No, we are done when hit one for resend
3437 : * whos time as not expired.
3438 : */
3439 0 : break;
3440 : }
3441 : }
3442 : /*
3443 : * Ok now if this chunk is marked to drop it we can clean up
3444 : * the chunk, advance our peer ack point and we can check
3445 : * the next chunk.
3446 : */
3447 0 : if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3448 0 : (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3449 : /* advance PeerAckPoint goes forward */
3450 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3451 0 : asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3452 0 : a_adv = tp1;
3453 0 : } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3454 : /* No update but we do save the chk */
3455 0 : a_adv = tp1;
3456 : }
3457 : } else {
3458 : /*
3459 : * If it is still in RESEND we can advance no
3460 : * further
3461 : */
3462 : break;
3463 : }
3464 : }
3465 0 : return (a_adv);
3466 : }
3467 :
3468 : static int
3469 0 : sctp_fs_audit(struct sctp_association *asoc)
3470 : {
3471 : struct sctp_tmit_chunk *chk;
3472 0 : int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3473 : int entry_flight, entry_cnt, ret;
3474 :
3475 0 : entry_flight = asoc->total_flight;
3476 0 : entry_cnt = asoc->total_flight_count;
3477 0 : ret = 0;
3478 :
3479 0 : if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3480 0 : return (0);
3481 :
3482 0 : TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3483 0 : if (chk->sent < SCTP_DATAGRAM_RESEND) {
3484 0 : SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3485 : chk->rec.data.TSN_seq,
3486 : chk->send_size,
3487 : chk->snd_count);
3488 0 : inflight++;
3489 0 : } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3490 0 : resend++;
3491 0 : } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3492 0 : inbetween++;
3493 0 : } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3494 0 : above++;
3495 : } else {
3496 0 : acked++;
3497 : }
3498 : }
3499 :
3500 0 : if ((inflight > 0) || (inbetween > 0)) {
3501 : #ifdef INVARIANTS
3502 : panic("Flight size-express incorrect? \n");
3503 : #else
3504 0 : SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3505 : entry_flight, entry_cnt);
3506 :
3507 0 : SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3508 : inflight, inbetween, resend, above, acked);
3509 0 : ret = 1;
3510 : #endif
3511 : }
3512 0 : return (ret);
3513 : }
3514 :
3515 :
3516 : static void
3517 0 : sctp_window_probe_recovery(struct sctp_tcb *stcb,
3518 : struct sctp_association *asoc,
3519 : struct sctp_tmit_chunk *tp1)
3520 : {
3521 0 : tp1->window_probe = 0;
3522 0 : if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3523 : /* TSN's skipped we do NOT move back. */
3524 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3525 0 : tp1->whoTo ? tp1->whoTo->flight_size : 0,
3526 0 : tp1->book_size,
3527 0 : (uintptr_t)tp1->whoTo,
3528 : tp1->rec.data.TSN_seq);
3529 0 : return;
3530 : }
3531 : /* First setup this by shrinking flight */
3532 0 : if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3533 0 : (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3534 : tp1);
3535 : }
3536 0 : sctp_flight_size_decrease(tp1);
3537 0 : sctp_total_flight_decrease(stcb, tp1);
3538 : /* Now mark for resend */
3539 0 : tp1->sent = SCTP_DATAGRAM_RESEND;
3540 0 : sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3541 :
3542 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3543 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3544 0 : tp1->whoTo->flight_size,
3545 0 : tp1->book_size,
3546 0 : (uintptr_t)tp1->whoTo,
3547 : tp1->rec.data.TSN_seq);
3548 : }
3549 : }
3550 :
3551 : void
3552 0 : sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3553 : uint32_t rwnd, int *abort_now, int ecne_seen)
3554 : {
3555 : struct sctp_nets *net;
3556 : struct sctp_association *asoc;
3557 : struct sctp_tmit_chunk *tp1, *tp2;
3558 : uint32_t old_rwnd;
3559 0 : int win_probe_recovery = 0;
3560 0 : int win_probe_recovered = 0;
3561 0 : int j, done_once = 0;
3562 0 : int rto_ok = 1;
3563 :
3564 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3565 0 : sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3566 : rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3567 : }
3568 : SCTP_TCB_LOCK_ASSERT(stcb);
3569 : #ifdef SCTP_ASOCLOG_OF_TSNS
3570 : stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3571 : stcb->asoc.cumack_log_at++;
3572 : if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3573 : stcb->asoc.cumack_log_at = 0;
3574 : }
3575 : #endif
3576 0 : asoc = &stcb->asoc;
3577 0 : old_rwnd = asoc->peers_rwnd;
3578 0 : if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3579 : /* old ack */
3580 0 : return;
3581 0 : } else if (asoc->last_acked_seq == cumack) {
3582 : /* Window update sack */
3583 0 : asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3584 : (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3585 0 : if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3586 : /* SWS sender side engages */
3587 0 : asoc->peers_rwnd = 0;
3588 : }
3589 0 : if (asoc->peers_rwnd > old_rwnd) {
3590 0 : goto again;
3591 : }
3592 0 : return;
3593 : }
3594 :
3595 : /* First setup for CC stuff */
3596 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3597 0 : if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3598 : /* Drag along the window_tsn for cwr's */
3599 0 : net->cwr_window_tsn = cumack;
3600 : }
3601 0 : net->prev_cwnd = net->cwnd;
3602 0 : net->net_ack = 0;
3603 0 : net->net_ack2 = 0;
3604 :
3605 : /*
3606 : * CMT: Reset CUC and Fast recovery algo variables before
3607 : * SACK processing
3608 : */
3609 0 : net->new_pseudo_cumack = 0;
3610 0 : net->will_exit_fast_recovery = 0;
3611 0 : if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3612 0 : (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3613 : }
3614 : }
3615 0 : if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3616 : uint32_t send_s;
3617 :
3618 0 : if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3619 0 : tp1 = TAILQ_LAST(&asoc->sent_queue,
3620 : sctpchunk_listhead);
3621 0 : send_s = tp1->rec.data.TSN_seq + 1;
3622 : } else {
3623 0 : send_s = asoc->sending_seq;
3624 : }
3625 0 : if (SCTP_TSN_GE(cumack, send_s)) {
3626 : #ifndef INVARIANTS
3627 : struct mbuf *op_err;
3628 : char msg[SCTP_DIAG_INFO_LEN];
3629 :
3630 : #endif
3631 : #ifdef INVARIANTS
3632 : panic("Impossible sack 1");
3633 : #else
3634 :
3635 0 : *abort_now = 1;
3636 : /* XXX */
3637 0 : snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3638 : cumack, send_s);
3639 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3640 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3641 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3642 0 : return;
3643 : #endif
3644 : }
3645 : }
3646 0 : asoc->this_sack_highest_gap = cumack;
3647 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3648 0 : sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3649 : stcb->asoc.overall_error_count,
3650 : 0,
3651 : SCTP_FROM_SCTP_INDATA,
3652 : __LINE__);
3653 : }
3654 0 : stcb->asoc.overall_error_count = 0;
3655 0 : if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3656 : /* process the new consecutive TSN first */
3657 0 : TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3658 0 : if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3659 0 : if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3660 0 : SCTP_PRINTF("Warning, an unsent is now acked?\n");
3661 : }
3662 0 : if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3663 : /*
3664 : * If it is less than ACKED, it is
3665 : * now no-longer in flight. Higher
3666 : * values may occur during marking
3667 : */
3668 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3669 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3670 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3671 0 : tp1->whoTo->flight_size,
3672 0 : tp1->book_size,
3673 0 : (uintptr_t)tp1->whoTo,
3674 : tp1->rec.data.TSN_seq);
3675 : }
3676 0 : sctp_flight_size_decrease(tp1);
3677 0 : if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3678 0 : (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3679 : tp1);
3680 : }
3681 : /* sa_ignore NO_NULL_CHK */
3682 0 : sctp_total_flight_decrease(stcb, tp1);
3683 : }
3684 0 : tp1->whoTo->net_ack += tp1->send_size;
3685 0 : if (tp1->snd_count < 2) {
3686 : /*
3687 : * True non-retransmited
3688 : * chunk
3689 : */
3690 0 : tp1->whoTo->net_ack2 +=
3691 0 : tp1->send_size;
3692 :
3693 : /* update RTO too? */
3694 0 : if (tp1->do_rtt) {
3695 0 : if (rto_ok) {
3696 0 : tp1->whoTo->RTO =
3697 : /*
3698 : * sa_ignore
3699 : * NO_NULL_CHK
3700 : */
3701 0 : sctp_calculate_rto(stcb,
3702 : asoc, tp1->whoTo,
3703 : &tp1->sent_rcv_time,
3704 : sctp_align_safe_nocopy,
3705 : SCTP_RTT_FROM_DATA);
3706 0 : rto_ok = 0;
3707 : }
3708 0 : if (tp1->whoTo->rto_needed == 0) {
3709 0 : tp1->whoTo->rto_needed = 1;
3710 : }
3711 0 : tp1->do_rtt = 0;
3712 : }
3713 : }
3714 : /*
3715 : * CMT: CUCv2 algorithm. From the
3716 : * cumack'd TSNs, for each TSN being
3717 : * acked for the first time, set the
3718 : * following variables for the
3719 : * corresp destination.
3720 : * new_pseudo_cumack will trigger a
3721 : * cwnd update.
3722 : * find_(rtx_)pseudo_cumack will
3723 : * trigger search for the next
3724 : * expected (rtx-)pseudo-cumack.
3725 : */
3726 0 : tp1->whoTo->new_pseudo_cumack = 1;
3727 0 : tp1->whoTo->find_pseudo_cumack = 1;
3728 0 : tp1->whoTo->find_rtx_pseudo_cumack = 1;
3729 :
3730 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3731 : /* sa_ignore NO_NULL_CHK */
3732 0 : sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3733 : }
3734 : }
3735 0 : if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3736 0 : sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3737 : }
3738 0 : if (tp1->rec.data.chunk_was_revoked) {
3739 : /* deflate the cwnd */
3740 0 : tp1->whoTo->cwnd -= tp1->book_size;
3741 0 : tp1->rec.data.chunk_was_revoked = 0;
3742 : }
3743 0 : if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3744 0 : if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3745 0 : asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3746 : #ifdef INVARIANTS
3747 : } else {
3748 : panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3749 : #endif
3750 : }
3751 : }
3752 0 : TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3753 0 : if (tp1->data) {
3754 : /* sa_ignore NO_NULL_CHK */
3755 0 : sctp_free_bufspace(stcb, asoc, tp1, 1);
3756 0 : sctp_m_freem(tp1->data);
3757 0 : tp1->data = NULL;
3758 : }
3759 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3760 0 : sctp_log_sack(asoc->last_acked_seq,
3761 : cumack,
3762 : tp1->rec.data.TSN_seq,
3763 : 0,
3764 : 0,
3765 : SCTP_LOG_FREE_SENT);
3766 : }
3767 0 : asoc->sent_queue_cnt--;
3768 0 : sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3769 : } else {
3770 : break;
3771 : }
3772 : }
3773 :
3774 : }
3775 : #if defined(__Userspace__)
3776 0 : if (stcb->sctp_ep->recv_callback) {
3777 0 : if (stcb->sctp_socket) {
3778 : uint32_t inqueue_bytes, sb_free_now;
3779 : struct sctp_inpcb *inp;
3780 :
3781 0 : inp = stcb->sctp_ep;
3782 0 : inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
3783 0 : sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
3784 :
3785 : /* check if the amount free in the send socket buffer crossed the threshold */
3786 0 : if (inp->send_callback &&
3787 0 : (((inp->send_sb_threshold > 0) &&
3788 0 : (sb_free_now >= inp->send_sb_threshold) &&
3789 0 : (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
3790 0 : (inp->send_sb_threshold == 0))) {
3791 0 : atomic_add_int(&stcb->asoc.refcnt, 1);
3792 0 : SCTP_TCB_UNLOCK(stcb);
3793 0 : inp->send_callback(stcb->sctp_socket, sb_free_now);
3794 0 : SCTP_TCB_LOCK(stcb);
3795 0 : atomic_subtract_int(&stcb->asoc.refcnt, 1);
3796 : }
3797 : }
3798 0 : } else if (stcb->sctp_socket) {
3799 : #else
3800 : /* sa_ignore NO_NULL_CHK */
3801 : if (stcb->sctp_socket) {
3802 : #endif
3803 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3804 : struct socket *so;
3805 :
3806 : #endif
3807 0 : SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3808 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3809 : /* sa_ignore NO_NULL_CHK */
3810 0 : sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3811 : }
3812 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3813 : so = SCTP_INP_SO(stcb->sctp_ep);
3814 : atomic_add_int(&stcb->asoc.refcnt, 1);
3815 : SCTP_TCB_UNLOCK(stcb);
3816 : SCTP_SOCKET_LOCK(so, 1);
3817 : SCTP_TCB_LOCK(stcb);
3818 : atomic_subtract_int(&stcb->asoc.refcnt, 1);
3819 : if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3820 : /* assoc was freed while we were unlocked */
3821 : SCTP_SOCKET_UNLOCK(so, 1);
3822 : return;
3823 : }
3824 : #endif
3825 0 : sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3826 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3827 : SCTP_SOCKET_UNLOCK(so, 1);
3828 : #endif
3829 : } else {
3830 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3831 0 : sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3832 : }
3833 : }
3834 :
3835 : /* JRS - Use the congestion control given in the CC module */
3836 0 : if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3837 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3838 0 : if (net->net_ack2 > 0) {
3839 : /*
3840 : * Karn's rule applies to clearing error count, this
3841 : * is optional.
3842 : */
3843 0 : net->error_count = 0;
3844 0 : if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3845 : /* addr came good */
3846 0 : net->dest_state |= SCTP_ADDR_REACHABLE;
3847 0 : sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3848 : 0, (void *)net, SCTP_SO_NOT_LOCKED);
3849 : }
3850 0 : if (net == stcb->asoc.primary_destination) {
3851 0 : if (stcb->asoc.alternate) {
3852 : /* release the alternate, primary is good */
3853 0 : sctp_free_remote_addr(stcb->asoc.alternate);
3854 0 : stcb->asoc.alternate = NULL;
3855 : }
3856 : }
3857 0 : if (net->dest_state & SCTP_ADDR_PF) {
3858 0 : net->dest_state &= ~SCTP_ADDR_PF;
3859 0 : sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3860 0 : sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3861 0 : asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3862 : /* Done with this net */
3863 0 : net->net_ack = 0;
3864 : }
3865 : /* restore any doubled timers */
3866 0 : net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3867 0 : if (net->RTO < stcb->asoc.minrto) {
3868 0 : net->RTO = stcb->asoc.minrto;
3869 : }
3870 0 : if (net->RTO > stcb->asoc.maxrto) {
3871 0 : net->RTO = stcb->asoc.maxrto;
3872 : }
3873 : }
3874 : }
3875 0 : asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3876 : }
3877 0 : asoc->last_acked_seq = cumack;
3878 :
3879 0 : if (TAILQ_EMPTY(&asoc->sent_queue)) {
3880 : /* nothing left in-flight */
3881 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3882 0 : net->flight_size = 0;
3883 0 : net->partial_bytes_acked = 0;
3884 : }
3885 0 : asoc->total_flight = 0;
3886 0 : asoc->total_flight_count = 0;
3887 : }
3888 :
3889 : /* RWND update */
3890 0 : asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3891 : (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3892 0 : if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3893 : /* SWS sender side engages */
3894 0 : asoc->peers_rwnd = 0;
3895 : }
3896 0 : if (asoc->peers_rwnd > old_rwnd) {
3897 0 : win_probe_recovery = 1;
3898 : }
3899 : /* Now assure a timer where data is queued at */
3900 : again:
3901 0 : j = 0;
3902 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3903 : int to_ticks;
3904 0 : if (win_probe_recovery && (net->window_probe)) {
3905 0 : win_probe_recovered = 1;
3906 : /*
3907 : * Find first chunk that was used with window probe
3908 : * and clear the sent
3909 : */
3910 : /* sa_ignore FREED_MEMORY */
3911 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3912 0 : if (tp1->window_probe) {
3913 : /* move back to data send queue */
3914 0 : sctp_window_probe_recovery(stcb, asoc, tp1);
3915 0 : break;
3916 : }
3917 : }
3918 : }
3919 0 : if (net->RTO == 0) {
3920 0 : to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3921 : } else {
3922 0 : to_ticks = MSEC_TO_TICKS(net->RTO);
3923 : }
3924 0 : if (net->flight_size) {
3925 0 : j++;
3926 0 : (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3927 0 : sctp_timeout_handler, &net->rxt_timer);
3928 0 : if (net->window_probe) {
3929 0 : net->window_probe = 0;
3930 : }
3931 : } else {
3932 0 : if (net->window_probe) {
3933 : /* In window probes we must assure a timer is still running there */
3934 0 : net->window_probe = 0;
3935 0 : if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3936 0 : SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3937 0 : sctp_timeout_handler, &net->rxt_timer);
3938 : }
3939 0 : } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3940 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3941 : stcb, net,
3942 : SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3943 : }
3944 : }
3945 : }
3946 0 : if ((j == 0) &&
3947 0 : (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3948 0 : (asoc->sent_queue_retran_cnt == 0) &&
3949 0 : (win_probe_recovered == 0) &&
3950 : (done_once == 0)) {
3951 : /* huh, this should not happen unless all packets
3952 : * are PR-SCTP and marked to skip of course.
3953 : */
3954 0 : if (sctp_fs_audit(asoc)) {
3955 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3956 0 : net->flight_size = 0;
3957 : }
3958 0 : asoc->total_flight = 0;
3959 0 : asoc->total_flight_count = 0;
3960 0 : asoc->sent_queue_retran_cnt = 0;
3961 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3962 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3963 0 : sctp_flight_size_increase(tp1);
3964 0 : sctp_total_flight_increase(stcb, tp1);
3965 0 : } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3966 0 : sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3967 : }
3968 : }
3969 : }
3970 0 : done_once = 1;
3971 0 : goto again;
3972 : }
3973 : /**********************************/
3974 : /* Now what about shutdown issues */
3975 : /**********************************/
3976 0 : if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3977 : /* nothing left on sendqueue.. consider done */
3978 : /* clean up */
3979 0 : if ((asoc->stream_queue_cnt == 1) &&
3980 0 : ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3981 0 : (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3982 0 : (asoc->locked_on_sending)
3983 : ) {
3984 : struct sctp_stream_queue_pending *sp;
3985 : /* I may be in a state where we got
3986 : * all across.. but cannot write more due
3987 : * to a shutdown... we abort since the
3988 : * user did not indicate EOR in this case. The
3989 : * sp will be cleaned during free of the asoc.
3990 : */
3991 0 : sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3992 : sctp_streamhead);
3993 0 : if ((sp) && (sp->length == 0)) {
3994 : /* Let cleanup code purge it */
3995 0 : if (sp->msg_is_complete) {
3996 0 : asoc->stream_queue_cnt--;
3997 : } else {
3998 0 : asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3999 0 : asoc->locked_on_sending = NULL;
4000 0 : asoc->stream_queue_cnt--;
4001 : }
4002 : }
4003 : }
4004 0 : if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4005 0 : (asoc->stream_queue_cnt == 0)) {
4006 0 : if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4007 : /* Need to abort here */
4008 : struct mbuf *op_err;
4009 :
4010 : abort_out_now:
4011 0 : *abort_now = 1;
4012 : /* XXX */
4013 0 : op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4014 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4015 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4016 : } else {
4017 : struct sctp_nets *netp;
4018 :
4019 0 : if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4020 0 : (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4021 0 : SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4022 : }
4023 0 : SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4024 0 : SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4025 0 : sctp_stop_timers_for_shutdown(stcb);
4026 0 : if (asoc->alternate) {
4027 0 : netp = asoc->alternate;
4028 : } else {
4029 0 : netp = asoc->primary_destination;
4030 : }
4031 0 : sctp_send_shutdown(stcb, netp);
4032 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4033 : stcb->sctp_ep, stcb, netp);
4034 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4035 : stcb->sctp_ep, stcb, netp);
4036 : }
4037 0 : } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4038 0 : (asoc->stream_queue_cnt == 0)) {
4039 : struct sctp_nets *netp;
4040 :
4041 0 : if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4042 0 : goto abort_out_now;
4043 : }
4044 0 : SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4045 0 : SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4046 0 : SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4047 0 : sctp_stop_timers_for_shutdown(stcb);
4048 0 : if (asoc->alternate) {
4049 0 : netp = asoc->alternate;
4050 : } else {
4051 0 : netp = asoc->primary_destination;
4052 : }
4053 0 : sctp_send_shutdown_ack(stcb, netp);
4054 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4055 : stcb->sctp_ep, stcb, netp);
4056 : }
4057 : }
4058 : /*********************************************/
4059 : /* Here we perform PR-SCTP procedures */
4060 : /* (section 4.2) */
4061 : /*********************************************/
4062 : /* C1. update advancedPeerAckPoint */
4063 0 : if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4064 0 : asoc->advanced_peer_ack_point = cumack;
4065 : }
4066 : /* PR-Sctp issues need to be addressed too */
4067 0 : if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4068 : struct sctp_tmit_chunk *lchk;
4069 : uint32_t old_adv_peer_ack_point;
4070 :
4071 0 : old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4072 0 : lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4073 : /* C3. See if we need to send a Fwd-TSN */
4074 0 : if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4075 : /*
4076 : * ISSUE with ECN, see FWD-TSN processing.
4077 : */
4078 0 : if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4079 0 : send_forward_tsn(stcb, asoc);
4080 0 : } else if (lchk) {
4081 : /* try to FR fwd-tsn's that get lost too */
4082 0 : if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4083 0 : send_forward_tsn(stcb, asoc);
4084 : }
4085 : }
4086 : }
4087 0 : if (lchk) {
4088 : /* Assure a timer is up */
4089 0 : sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4090 : stcb->sctp_ep, stcb, lchk->whoTo);
4091 : }
4092 : }
4093 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4094 0 : sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4095 : rwnd,
4096 : stcb->asoc.peers_rwnd,
4097 : stcb->asoc.total_flight,
4098 : stcb->asoc.total_output_queue_size);
4099 : }
4100 : }
4101 :
4102 : void
4103 0 : sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4104 : struct sctp_tcb *stcb,
4105 : uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4106 : int *abort_now, uint8_t flags,
4107 : uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4108 : {
4109 : struct sctp_association *asoc;
4110 : struct sctp_tmit_chunk *tp1, *tp2;
4111 : uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4112 0 : uint16_t wake_him = 0;
4113 0 : uint32_t send_s = 0;
4114 : long j;
4115 0 : int accum_moved = 0;
4116 0 : int will_exit_fast_recovery = 0;
4117 : uint32_t a_rwnd, old_rwnd;
4118 0 : int win_probe_recovery = 0;
4119 0 : int win_probe_recovered = 0;
4120 0 : struct sctp_nets *net = NULL;
4121 : int done_once;
4122 0 : int rto_ok = 1;
4123 0 : uint8_t reneged_all = 0;
4124 : uint8_t cmt_dac_flag;
4125 : /*
4126 : * we take any chance we can to service our queues since we cannot
4127 : * get awoken when the socket is read from :<
4128 : */
4129 : /*
4130 : * Now perform the actual SACK handling: 1) Verify that it is not an
4131 : * old sack, if so discard. 2) If there is nothing left in the send
4132 : * queue (cum-ack is equal to last acked) then you have a duplicate
4133 : * too, update any rwnd change and verify no timers are running.
4134 : * then return. 3) Process any new consequtive data i.e. cum-ack
4135 : * moved process these first and note that it moved. 4) Process any
4136 : * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4137 : * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4138 : * sync up flightsizes and things, stop all timers and also check
4139 : * for shutdown_pending state. If so then go ahead and send off the
4140 : * shutdown. If in shutdown recv, send off the shutdown-ack and
4141 : * start that timer, Ret. 9) Strike any non-acked things and do FR
4142 : * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4143 : * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4144 : * if in shutdown_recv state.
4145 : */
4146 : SCTP_TCB_LOCK_ASSERT(stcb);
4147 : /* CMT DAC algo */
4148 0 : this_sack_lowest_newack = 0;
4149 0 : SCTP_STAT_INCR(sctps_slowpath_sack);
4150 0 : last_tsn = cum_ack;
4151 0 : cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4152 : #ifdef SCTP_ASOCLOG_OF_TSNS
4153 : stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4154 : stcb->asoc.cumack_log_at++;
4155 : if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4156 : stcb->asoc.cumack_log_at = 0;
4157 : }
4158 : #endif
4159 0 : a_rwnd = rwnd;
4160 :
4161 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4162 0 : sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4163 : rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4164 : }
4165 :
4166 0 : old_rwnd = stcb->asoc.peers_rwnd;
4167 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4168 0 : sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4169 : stcb->asoc.overall_error_count,
4170 : 0,
4171 : SCTP_FROM_SCTP_INDATA,
4172 : __LINE__);
4173 : }
4174 0 : stcb->asoc.overall_error_count = 0;
4175 0 : asoc = &stcb->asoc;
4176 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4177 0 : sctp_log_sack(asoc->last_acked_seq,
4178 : cum_ack,
4179 : 0,
4180 : num_seg,
4181 : num_dup,
4182 : SCTP_LOG_NEW_SACK);
4183 : }
4184 0 : if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4185 : uint16_t i;
4186 : uint32_t *dupdata, dblock;
4187 :
4188 0 : for (i = 0; i < num_dup; i++) {
4189 0 : dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4190 : sizeof(uint32_t), (uint8_t *)&dblock);
4191 0 : if (dupdata == NULL) {
4192 0 : break;
4193 : }
4194 0 : sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4195 : }
4196 : }
4197 0 : if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4198 : /* reality check */
4199 0 : if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4200 0 : tp1 = TAILQ_LAST(&asoc->sent_queue,
4201 : sctpchunk_listhead);
4202 0 : send_s = tp1->rec.data.TSN_seq + 1;
4203 : } else {
4204 0 : tp1 = NULL;
4205 0 : send_s = asoc->sending_seq;
4206 : }
4207 0 : if (SCTP_TSN_GE(cum_ack, send_s)) {
4208 : struct mbuf *op_err;
4209 : char msg[SCTP_DIAG_INFO_LEN];
4210 :
4211 : /*
4212 : * no way, we have not even sent this TSN out yet.
4213 : * Peer is hopelessly messed up with us.
4214 : */
4215 0 : SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4216 : cum_ack, send_s);
4217 0 : if (tp1) {
4218 0 : SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4219 : tp1->rec.data.TSN_seq, (void *)tp1);
4220 : }
4221 : hopeless_peer:
4222 0 : *abort_now = 1;
4223 : /* XXX */
4224 0 : snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4225 : cum_ack, send_s);
4226 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4227 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4228 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4229 0 : return;
4230 : }
4231 : }
4232 : /**********************/
4233 : /* 1) check the range */
4234 : /**********************/
4235 0 : if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4236 : /* acking something behind */
4237 0 : return;
4238 : }
4239 :
4240 : /* update the Rwnd of the peer */
4241 0 : if (TAILQ_EMPTY(&asoc->sent_queue) &&
4242 0 : TAILQ_EMPTY(&asoc->send_queue) &&
4243 0 : (asoc->stream_queue_cnt == 0)) {
4244 : /* nothing left on send/sent and strmq */
4245 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4246 0 : sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4247 : asoc->peers_rwnd, 0, 0, a_rwnd);
4248 : }
4249 0 : asoc->peers_rwnd = a_rwnd;
4250 0 : if (asoc->sent_queue_retran_cnt) {
4251 0 : asoc->sent_queue_retran_cnt = 0;
4252 : }
4253 0 : if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4254 : /* SWS sender side engages */
4255 0 : asoc->peers_rwnd = 0;
4256 : }
4257 : /* stop any timers */
4258 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4259 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4260 : stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4261 0 : net->partial_bytes_acked = 0;
4262 0 : net->flight_size = 0;
4263 : }
4264 0 : asoc->total_flight = 0;
4265 0 : asoc->total_flight_count = 0;
4266 0 : return;
4267 : }
4268 : /*
4269 : * We init netAckSz and netAckSz2 to 0. These are used to track 2
4270 : * things. The total byte count acked is tracked in netAckSz AND
4271 : * netAck2 is used to track the total bytes acked that are un-
4272 : * amibguious and were never retransmitted. We track these on a per
4273 : * destination address basis.
4274 : */
4275 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4276 0 : if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4277 : /* Drag along the window_tsn for cwr's */
4278 0 : net->cwr_window_tsn = cum_ack;
4279 : }
4280 0 : net->prev_cwnd = net->cwnd;
4281 0 : net->net_ack = 0;
4282 0 : net->net_ack2 = 0;
4283 :
4284 : /*
4285 : * CMT: Reset CUC and Fast recovery algo variables before
4286 : * SACK processing
4287 : */
4288 0 : net->new_pseudo_cumack = 0;
4289 0 : net->will_exit_fast_recovery = 0;
4290 0 : if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4291 0 : (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4292 : }
4293 : }
4294 : /* process the new consecutive TSN first */
4295 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4296 0 : if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4297 0 : if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4298 0 : accum_moved = 1;
4299 0 : if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4300 : /*
4301 : * If it is less than ACKED, it is
4302 : * now no-longer in flight. Higher
4303 : * values may occur during marking
4304 : */
4305 0 : if ((tp1->whoTo->dest_state &
4306 0 : SCTP_ADDR_UNCONFIRMED) &&
4307 0 : (tp1->snd_count < 2)) {
4308 : /*
4309 : * If there was no retran
4310 : * and the address is
4311 : * un-confirmed and we sent
4312 : * there and are now
4313 : * sacked.. its confirmed,
4314 : * mark it so.
4315 : */
4316 0 : tp1->whoTo->dest_state &=
4317 : ~SCTP_ADDR_UNCONFIRMED;
4318 : }
4319 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4320 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4321 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4322 0 : tp1->whoTo->flight_size,
4323 0 : tp1->book_size,
4324 0 : (uintptr_t)tp1->whoTo,
4325 : tp1->rec.data.TSN_seq);
4326 : }
4327 0 : sctp_flight_size_decrease(tp1);
4328 0 : sctp_total_flight_decrease(stcb, tp1);
4329 0 : if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4330 0 : (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4331 : tp1);
4332 : }
4333 : }
4334 0 : tp1->whoTo->net_ack += tp1->send_size;
4335 :
4336 : /* CMT SFR and DAC algos */
4337 0 : this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4338 0 : tp1->whoTo->saw_newack = 1;
4339 :
4340 0 : if (tp1->snd_count < 2) {
4341 : /*
4342 : * True non-retransmited
4343 : * chunk
4344 : */
4345 0 : tp1->whoTo->net_ack2 +=
4346 0 : tp1->send_size;
4347 :
4348 : /* update RTO too? */
4349 0 : if (tp1->do_rtt) {
4350 0 : if (rto_ok) {
4351 0 : tp1->whoTo->RTO =
4352 0 : sctp_calculate_rto(stcb,
4353 : asoc, tp1->whoTo,
4354 : &tp1->sent_rcv_time,
4355 : sctp_align_safe_nocopy,
4356 : SCTP_RTT_FROM_DATA);
4357 0 : rto_ok = 0;
4358 : }
4359 0 : if (tp1->whoTo->rto_needed == 0) {
4360 0 : tp1->whoTo->rto_needed = 1;
4361 : }
4362 0 : tp1->do_rtt = 0;
4363 : }
4364 : }
4365 : /*
4366 : * CMT: CUCv2 algorithm. From the
4367 : * cumack'd TSNs, for each TSN being
4368 : * acked for the first time, set the
4369 : * following variables for the
4370 : * corresp destination.
4371 : * new_pseudo_cumack will trigger a
4372 : * cwnd update.
4373 : * find_(rtx_)pseudo_cumack will
4374 : * trigger search for the next
4375 : * expected (rtx-)pseudo-cumack.
4376 : */
4377 0 : tp1->whoTo->new_pseudo_cumack = 1;
4378 0 : tp1->whoTo->find_pseudo_cumack = 1;
4379 0 : tp1->whoTo->find_rtx_pseudo_cumack = 1;
4380 :
4381 :
4382 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4383 0 : sctp_log_sack(asoc->last_acked_seq,
4384 : cum_ack,
4385 : tp1->rec.data.TSN_seq,
4386 : 0,
4387 : 0,
4388 : SCTP_LOG_TSN_ACKED);
4389 : }
4390 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4391 0 : sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4392 : }
4393 : }
4394 0 : if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4395 0 : sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4396 : #ifdef SCTP_AUDITING_ENABLED
4397 : sctp_audit_log(0xB3,
4398 : (asoc->sent_queue_retran_cnt & 0x000000ff));
4399 : #endif
4400 : }
4401 0 : if (tp1->rec.data.chunk_was_revoked) {
4402 : /* deflate the cwnd */
4403 0 : tp1->whoTo->cwnd -= tp1->book_size;
4404 0 : tp1->rec.data.chunk_was_revoked = 0;
4405 : }
4406 0 : if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4407 0 : tp1->sent = SCTP_DATAGRAM_ACKED;
4408 : }
4409 : }
4410 : } else {
4411 : break;
4412 : }
4413 : }
4414 0 : biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4415 : /* always set this up to cum-ack */
4416 0 : asoc->this_sack_highest_gap = last_tsn;
4417 :
4418 0 : if ((num_seg > 0) || (num_nr_seg > 0)) {
4419 :
4420 : /*
4421 : * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4422 : * to be greater than the cumack. Also reset saw_newack to 0
4423 : * for all dests.
4424 : */
4425 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4426 0 : net->saw_newack = 0;
4427 0 : net->this_sack_highest_newack = last_tsn;
4428 : }
4429 :
4430 : /*
4431 : * thisSackHighestGap will increase while handling NEW
4432 : * segments this_sack_highest_newack will increase while
4433 : * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4434 : * used for CMT DAC algo. saw_newack will also change.
4435 : */
4436 0 : if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4437 : &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4438 : num_seg, num_nr_seg, &rto_ok)) {
4439 0 : wake_him++;
4440 : }
4441 0 : if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4442 : /*
4443 : * validate the biggest_tsn_acked in the gap acks if
4444 : * strict adherence is wanted.
4445 : */
4446 0 : if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4447 : /*
4448 : * peer is either confused or we are under
4449 : * attack. We must abort.
4450 : */
4451 0 : SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4452 : biggest_tsn_acked, send_s);
4453 0 : goto hopeless_peer;
4454 : }
4455 : }
4456 : }
4457 : /*******************************************/
4458 : /* cancel ALL T3-send timer if accum moved */
4459 : /*******************************************/
4460 0 : if (asoc->sctp_cmt_on_off > 0) {
4461 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4462 0 : if (net->new_pseudo_cumack)
4463 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4464 : stcb, net,
4465 : SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4466 :
4467 : }
4468 : } else {
4469 0 : if (accum_moved) {
4470 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4471 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4472 : stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4473 : }
4474 : }
4475 : }
4476 : /********************************************/
4477 : /* drop the acked chunks from the sentqueue */
4478 : /********************************************/
4479 0 : asoc->last_acked_seq = cum_ack;
4480 :
4481 0 : TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4482 0 : if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4483 : break;
4484 : }
4485 0 : if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4486 0 : if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4487 0 : asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4488 : #ifdef INVARIANTS
4489 : } else {
4490 : panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4491 : #endif
4492 : }
4493 : }
4494 0 : TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4495 0 : if (PR_SCTP_ENABLED(tp1->flags)) {
4496 0 : if (asoc->pr_sctp_cnt != 0)
4497 0 : asoc->pr_sctp_cnt--;
4498 : }
4499 0 : asoc->sent_queue_cnt--;
4500 0 : if (tp1->data) {
4501 : /* sa_ignore NO_NULL_CHK */
4502 0 : sctp_free_bufspace(stcb, asoc, tp1, 1);
4503 0 : sctp_m_freem(tp1->data);
4504 0 : tp1->data = NULL;
4505 0 : if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4506 0 : asoc->sent_queue_cnt_removeable--;
4507 : }
4508 : }
4509 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4510 0 : sctp_log_sack(asoc->last_acked_seq,
4511 : cum_ack,
4512 : tp1->rec.data.TSN_seq,
4513 : 0,
4514 : 0,
4515 : SCTP_LOG_FREE_SENT);
4516 : }
4517 0 : sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4518 0 : wake_him++;
4519 : }
4520 0 : if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4521 : #ifdef INVARIANTS
4522 : panic("Warning flight size is postive and should be 0");
4523 : #else
4524 0 : SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4525 : asoc->total_flight);
4526 : #endif
4527 0 : asoc->total_flight = 0;
4528 : }
4529 :
4530 : #if defined(__Userspace__)
4531 0 : if (stcb->sctp_ep->recv_callback) {
4532 0 : if (stcb->sctp_socket) {
4533 : uint32_t inqueue_bytes, sb_free_now;
4534 : struct sctp_inpcb *inp;
4535 :
4536 0 : inp = stcb->sctp_ep;
4537 0 : inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4538 0 : sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4539 :
4540 : /* check if the amount free in the send socket buffer crossed the threshold */
4541 0 : if (inp->send_callback &&
4542 0 : (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4543 0 : (inp->send_sb_threshold == 0))) {
4544 0 : atomic_add_int(&stcb->asoc.refcnt, 1);
4545 0 : SCTP_TCB_UNLOCK(stcb);
4546 0 : inp->send_callback(stcb->sctp_socket, sb_free_now);
4547 0 : SCTP_TCB_LOCK(stcb);
4548 0 : atomic_subtract_int(&stcb->asoc.refcnt, 1);
4549 : }
4550 : }
4551 0 : } else if ((wake_him) && (stcb->sctp_socket)) {
4552 : #else
4553 : /* sa_ignore NO_NULL_CHK */
4554 : if ((wake_him) && (stcb->sctp_socket)) {
4555 : #endif
4556 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4557 : struct socket *so;
4558 :
4559 : #endif
4560 0 : SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4561 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4562 0 : sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4563 : }
4564 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4565 : so = SCTP_INP_SO(stcb->sctp_ep);
4566 : atomic_add_int(&stcb->asoc.refcnt, 1);
4567 : SCTP_TCB_UNLOCK(stcb);
4568 : SCTP_SOCKET_LOCK(so, 1);
4569 : SCTP_TCB_LOCK(stcb);
4570 : atomic_subtract_int(&stcb->asoc.refcnt, 1);
4571 : if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4572 : /* assoc was freed while we were unlocked */
4573 : SCTP_SOCKET_UNLOCK(so, 1);
4574 : return;
4575 : }
4576 : #endif
4577 0 : sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4578 : #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4579 : SCTP_SOCKET_UNLOCK(so, 1);
4580 : #endif
4581 : } else {
4582 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4583 0 : sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4584 : }
4585 : }
4586 :
4587 0 : if (asoc->fast_retran_loss_recovery && accum_moved) {
4588 0 : if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4589 : /* Setup so we will exit RFC2582 fast recovery */
4590 0 : will_exit_fast_recovery = 1;
4591 : }
4592 : }
4593 : /*
4594 : * Check for revoked fragments:
4595 : *
4596 : * if Previous sack - Had no frags then we can't have any revoked if
4597 : * Previous sack - Had frag's then - If we now have frags aka
4598 : * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4599 : * some of them. else - The peer revoked all ACKED fragments, since
4600 : * we had some before and now we have NONE.
4601 : */
4602 :
4603 0 : if (num_seg) {
4604 0 : sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4605 0 : asoc->saw_sack_with_frags = 1;
4606 0 : } else if (asoc->saw_sack_with_frags) {
4607 0 : int cnt_revoked = 0;
4608 :
4609 : /* Peer revoked all dg's marked or acked */
4610 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4611 0 : if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4612 0 : tp1->sent = SCTP_DATAGRAM_SENT;
4613 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4614 0 : sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4615 0 : tp1->whoTo->flight_size,
4616 0 : tp1->book_size,
4617 0 : (uintptr_t)tp1->whoTo,
4618 : tp1->rec.data.TSN_seq);
4619 : }
4620 0 : sctp_flight_size_increase(tp1);
4621 0 : sctp_total_flight_increase(stcb, tp1);
4622 0 : tp1->rec.data.chunk_was_revoked = 1;
4623 : /*
4624 : * To ensure that this increase in
4625 : * flightsize, which is artificial,
4626 : * does not throttle the sender, we
4627 : * also increase the cwnd
4628 : * artificially.
4629 : */
4630 0 : tp1->whoTo->cwnd += tp1->book_size;
4631 0 : cnt_revoked++;
4632 : }
4633 : }
4634 0 : if (cnt_revoked) {
4635 0 : reneged_all = 1;
4636 : }
4637 0 : asoc->saw_sack_with_frags = 0;
4638 : }
4639 0 : if (num_nr_seg > 0)
4640 0 : asoc->saw_sack_with_nr_frags = 1;
4641 : else
4642 0 : asoc->saw_sack_with_nr_frags = 0;
4643 :
4644 : /* JRS - Use the congestion control given in the CC module */
4645 0 : if (ecne_seen == 0) {
4646 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4647 0 : if (net->net_ack2 > 0) {
4648 : /*
4649 : * Karn's rule applies to clearing error count, this
4650 : * is optional.
4651 : */
4652 0 : net->error_count = 0;
4653 0 : if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4654 : /* addr came good */
4655 0 : net->dest_state |= SCTP_ADDR_REACHABLE;
4656 0 : sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4657 : 0, (void *)net, SCTP_SO_NOT_LOCKED);
4658 : }
4659 :
4660 0 : if (net == stcb->asoc.primary_destination) {
4661 0 : if (stcb->asoc.alternate) {
4662 : /* release the alternate, primary is good */
4663 0 : sctp_free_remote_addr(stcb->asoc.alternate);
4664 0 : stcb->asoc.alternate = NULL;
4665 : }
4666 : }
4667 :
4668 0 : if (net->dest_state & SCTP_ADDR_PF) {
4669 0 : net->dest_state &= ~SCTP_ADDR_PF;
4670 0 : sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4671 0 : sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4672 0 : asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4673 : /* Done with this net */
4674 0 : net->net_ack = 0;
4675 : }
4676 : /* restore any doubled timers */
4677 0 : net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4678 0 : if (net->RTO < stcb->asoc.minrto) {
4679 0 : net->RTO = stcb->asoc.minrto;
4680 : }
4681 0 : if (net->RTO > stcb->asoc.maxrto) {
4682 0 : net->RTO = stcb->asoc.maxrto;
4683 : }
4684 : }
4685 : }
4686 0 : asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4687 : }
4688 :
4689 0 : if (TAILQ_EMPTY(&asoc->sent_queue)) {
4690 : /* nothing left in-flight */
4691 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4692 : /* stop all timers */
4693 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4694 : stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4695 0 : net->flight_size = 0;
4696 0 : net->partial_bytes_acked = 0;
4697 : }
4698 0 : asoc->total_flight = 0;
4699 0 : asoc->total_flight_count = 0;
4700 : }
4701 :
4702 : /**********************************/
4703 : /* Now what about shutdown issues */
4704 : /**********************************/
4705 0 : if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4706 : /* nothing left on sendqueue.. consider done */
4707 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4708 0 : sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4709 : asoc->peers_rwnd, 0, 0, a_rwnd);
4710 : }
4711 0 : asoc->peers_rwnd = a_rwnd;
4712 0 : if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4713 : /* SWS sender side engages */
4714 0 : asoc->peers_rwnd = 0;
4715 : }
4716 : /* clean up */
4717 0 : if ((asoc->stream_queue_cnt == 1) &&
4718 0 : ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4719 0 : (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4720 0 : (asoc->locked_on_sending)
4721 : ) {
4722 : struct sctp_stream_queue_pending *sp;
4723 : /* I may be in a state where we got
4724 : * all across.. but cannot write more due
4725 : * to a shutdown... we abort since the
4726 : * user did not indicate EOR in this case.
4727 : */
4728 0 : sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4729 : sctp_streamhead);
4730 0 : if ((sp) && (sp->length == 0)) {
4731 0 : asoc->locked_on_sending = NULL;
4732 0 : if (sp->msg_is_complete) {
4733 0 : asoc->stream_queue_cnt--;
4734 : } else {
4735 0 : asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4736 0 : asoc->stream_queue_cnt--;
4737 : }
4738 : }
4739 : }
4740 0 : if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4741 0 : (asoc->stream_queue_cnt == 0)) {
4742 0 : if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4743 : /* Need to abort here */
4744 : struct mbuf *op_err;
4745 :
4746 : abort_out_now:
4747 0 : *abort_now = 1;
4748 : /* XXX */
4749 0 : op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4750 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4751 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4752 0 : return;
4753 : } else {
4754 : struct sctp_nets *netp;
4755 :
4756 0 : if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4757 0 : (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4758 0 : SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4759 : }
4760 0 : SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4761 0 : SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4762 0 : sctp_stop_timers_for_shutdown(stcb);
4763 0 : if (asoc->alternate) {
4764 0 : netp = asoc->alternate;
4765 : } else {
4766 0 : netp = asoc->primary_destination;
4767 : }
4768 0 : sctp_send_shutdown(stcb, netp);
4769 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4770 : stcb->sctp_ep, stcb, netp);
4771 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4772 : stcb->sctp_ep, stcb, netp);
4773 : }
4774 0 : return;
4775 0 : } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4776 0 : (asoc->stream_queue_cnt == 0)) {
4777 : struct sctp_nets *netp;
4778 :
4779 0 : if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4780 0 : goto abort_out_now;
4781 : }
4782 0 : SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4783 0 : SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4784 0 : SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4785 0 : sctp_stop_timers_for_shutdown(stcb);
4786 0 : if (asoc->alternate) {
4787 0 : netp = asoc->alternate;
4788 : } else {
4789 0 : netp = asoc->primary_destination;
4790 : }
4791 0 : sctp_send_shutdown_ack(stcb, netp);
4792 0 : sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4793 : stcb->sctp_ep, stcb, netp);
4794 0 : return;
4795 : }
4796 : }
4797 : /*
4798 : * Now here we are going to recycle net_ack for a different use...
4799 : * HEADS UP.
4800 : */
4801 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 0 : net->net_ack = 0;
4803 : }
4804 :
4805 : /*
4806 : * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4807 : * to be done. Setting this_sack_lowest_newack to the cum_ack will
4808 : * automatically ensure that.
4809 : */
4810 0 : if ((asoc->sctp_cmt_on_off > 0) &&
4811 0 : SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4812 : (cmt_dac_flag == 0)) {
4813 0 : this_sack_lowest_newack = cum_ack;
4814 : }
4815 0 : if ((num_seg > 0) || (num_nr_seg > 0)) {
4816 0 : sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4817 : biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4818 : }
4819 : /* JRS - Use the congestion control given in the CC module */
4820 0 : asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4821 :
4822 : /* Now are we exiting loss recovery ? */
4823 0 : if (will_exit_fast_recovery) {
4824 : /* Ok, we must exit fast recovery */
4825 0 : asoc->fast_retran_loss_recovery = 0;
4826 : }
4827 0 : if ((asoc->sat_t3_loss_recovery) &&
4828 0 : SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4829 : /* end satellite t3 loss recovery */
4830 0 : asoc->sat_t3_loss_recovery = 0;
4831 : }
4832 : /*
4833 : * CMT Fast recovery
4834 : */
4835 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4836 0 : if (net->will_exit_fast_recovery) {
4837 : /* Ok, we must exit fast recovery */
4838 0 : net->fast_retran_loss_recovery = 0;
4839 : }
4840 : }
4841 :
4842 : /* Adjust and set the new rwnd value */
4843 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4844 0 : sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4845 0 : asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4846 : }
4847 0 : asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4848 : (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4849 0 : if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4850 : /* SWS sender side engages */
4851 0 : asoc->peers_rwnd = 0;
4852 : }
4853 0 : if (asoc->peers_rwnd > old_rwnd) {
4854 0 : win_probe_recovery = 1;
4855 : }
4856 :
4857 : /*
4858 : * Now we must setup so we have a timer up for anyone with
4859 : * outstanding data.
4860 : */
4861 0 : done_once = 0;
4862 : again:
4863 0 : j = 0;
4864 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4865 0 : if (win_probe_recovery && (net->window_probe)) {
4866 0 : win_probe_recovered = 1;
4867 : /*-
4868 : * Find first chunk that was used with
4869 : * window probe and clear the event. Put
4870 : * it back into the send queue as if has
4871 : * not been sent.
4872 : */
4873 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4874 0 : if (tp1->window_probe) {
4875 0 : sctp_window_probe_recovery(stcb, asoc, tp1);
4876 0 : break;
4877 : }
4878 : }
4879 : }
4880 0 : if (net->flight_size) {
4881 0 : j++;
4882 0 : if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4883 0 : sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4884 : stcb->sctp_ep, stcb, net);
4885 : }
4886 0 : if (net->window_probe) {
4887 0 : net->window_probe = 0;
4888 : }
4889 : } else {
4890 0 : if (net->window_probe) {
4891 : /* In window probes we must assure a timer is still running there */
4892 0 : if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4893 0 : sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4894 : stcb->sctp_ep, stcb, net);
4895 :
4896 : }
4897 0 : } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4898 0 : sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4899 : stcb, net,
4900 : SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4901 : }
4902 : }
4903 : }
4904 0 : if ((j == 0) &&
4905 0 : (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4906 0 : (asoc->sent_queue_retran_cnt == 0) &&
4907 0 : (win_probe_recovered == 0) &&
4908 : (done_once == 0)) {
4909 : /* huh, this should not happen unless all packets
4910 : * are PR-SCTP and marked to skip of course.
4911 : */
4912 0 : if (sctp_fs_audit(asoc)) {
4913 0 : TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4914 0 : net->flight_size = 0;
4915 : }
4916 0 : asoc->total_flight = 0;
4917 0 : asoc->total_flight_count = 0;
4918 0 : asoc->sent_queue_retran_cnt = 0;
4919 0 : TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4920 0 : if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4921 0 : sctp_flight_size_increase(tp1);
4922 0 : sctp_total_flight_increase(stcb, tp1);
4923 0 : } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4924 0 : sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4925 : }
4926 : }
4927 : }
4928 0 : done_once = 1;
4929 0 : goto again;
4930 : }
4931 : /*********************************************/
4932 : /* Here we perform PR-SCTP procedures */
4933 : /* (section 4.2) */
4934 : /*********************************************/
4935 : /* C1. update advancedPeerAckPoint */
4936 0 : if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4937 0 : asoc->advanced_peer_ack_point = cum_ack;
4938 : }
4939 : /* C2. try to further move advancedPeerAckPoint ahead */
4940 0 : if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4941 : struct sctp_tmit_chunk *lchk;
4942 : uint32_t old_adv_peer_ack_point;
4943 :
4944 0 : old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4945 0 : lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4946 : /* C3. See if we need to send a Fwd-TSN */
4947 0 : if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4948 : /*
4949 : * ISSUE with ECN, see FWD-TSN processing.
4950 : */
4951 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4952 0 : sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4953 : 0xee, cum_ack, asoc->advanced_peer_ack_point,
4954 : old_adv_peer_ack_point);
4955 : }
4956 0 : if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4957 0 : send_forward_tsn(stcb, asoc);
4958 0 : } else if (lchk) {
4959 : /* try to FR fwd-tsn's that get lost too */
4960 0 : if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4961 0 : send_forward_tsn(stcb, asoc);
4962 : }
4963 : }
4964 : }
4965 0 : if (lchk) {
4966 : /* Assure a timer is up */
4967 0 : sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4968 : stcb->sctp_ep, stcb, lchk->whoTo);
4969 : }
4970 : }
4971 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4972 0 : sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4973 : a_rwnd,
4974 : stcb->asoc.peers_rwnd,
4975 : stcb->asoc.total_flight,
4976 : stcb->asoc.total_output_queue_size);
4977 : }
4978 : }
4979 :
4980 : void
4981 0 : sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4982 : {
4983 : /* Copy cum-ack */
4984 : uint32_t cum_ack, a_rwnd;
4985 :
4986 0 : cum_ack = ntohl(cp->cumulative_tsn_ack);
4987 : /* Arrange so a_rwnd does NOT change */
4988 0 : a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4989 :
4990 : /* Now call the express sack handling */
4991 0 : sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4992 0 : }
4993 :
4994 : static void
4995 0 : sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4996 : struct sctp_stream_in *strmin)
4997 : {
4998 : struct sctp_queued_to_read *ctl, *nctl;
4999 : struct sctp_association *asoc;
5000 : uint16_t tt;
5001 :
5002 0 : asoc = &stcb->asoc;
5003 0 : tt = strmin->last_sequence_delivered;
5004 : /*
5005 : * First deliver anything prior to and including the stream no that
5006 : * came in
5007 : */
5008 0 : TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5009 0 : if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5010 : /* this is deliverable now */
5011 0 : TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5012 : /* subtract pending on streams */
5013 0 : asoc->size_on_all_streams -= ctl->length;
5014 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
5015 : /* deliver it to at least the delivery-q */
5016 0 : if (stcb->sctp_socket) {
5017 0 : sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5018 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
5019 : ctl,
5020 0 : &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5021 : }
5022 : } else {
5023 : /* no more delivery now. */
5024 : break;
5025 : }
5026 : }
5027 : /*
5028 : * now we must deliver things in queue the normal way if any are
5029 : * now ready.
5030 : */
5031 0 : tt = strmin->last_sequence_delivered + 1;
5032 0 : TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5033 0 : if (tt == ctl->sinfo_ssn) {
5034 : /* this is deliverable now */
5035 0 : TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5036 : /* subtract pending on streams */
5037 0 : asoc->size_on_all_streams -= ctl->length;
5038 0 : sctp_ucount_decr(asoc->cnt_on_all_streams);
5039 : /* deliver it to at least the delivery-q */
5040 0 : strmin->last_sequence_delivered = ctl->sinfo_ssn;
5041 0 : if (stcb->sctp_socket) {
5042 0 : sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5043 0 : sctp_add_to_readq(stcb->sctp_ep, stcb,
5044 : ctl,
5045 0 : &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5046 :
5047 : }
5048 0 : tt = strmin->last_sequence_delivered + 1;
5049 : } else {
5050 0 : break;
5051 : }
5052 : }
5053 0 : }
5054 :
5055 : static void
5056 0 : sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5057 : struct sctp_association *asoc,
5058 : uint16_t stream, uint16_t seq)
5059 : {
5060 : struct sctp_tmit_chunk *chk, *nchk;
5061 :
5062 : /* For each one on here see if we need to toss it */
5063 : /*
5064 : * For now large messages held on the reasmqueue that are
5065 : * complete will be tossed too. We could in theory do more
5066 : * work to spin through and stop after dumping one msg aka
5067 : * seeing the start of a new msg at the head, and call the
5068 : * delivery function... to see if it can be delivered... But
5069 : * for now we just dump everything on the queue.
5070 : */
5071 0 : TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5072 : /* Do not toss it if on a different stream or
5073 : * marked for unordered delivery in which case
5074 : * the stream sequence number has no meaning.
5075 : */
5076 0 : if ((chk->rec.data.stream_number != stream) ||
5077 0 : ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5078 0 : continue;
5079 : }
5080 0 : if (chk->rec.data.stream_seq == seq) {
5081 : /* It needs to be tossed */
5082 0 : TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5083 0 : if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5084 0 : asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5085 0 : asoc->str_of_pdapi = chk->rec.data.stream_number;
5086 0 : asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5087 0 : asoc->fragment_flags = chk->rec.data.rcv_flags;
5088 : }
5089 0 : asoc->size_on_reasm_queue -= chk->send_size;
5090 0 : sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5091 :
5092 : /* Clear up any stream problem */
5093 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5094 0 : SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5095 : /*
5096 : * We must dump forward this streams
5097 : * sequence number if the chunk is
5098 : * not unordered that is being
5099 : * skipped. There is a chance that
5100 : * if the peer does not include the
5101 : * last fragment in its FWD-TSN we
5102 : * WILL have a problem here since
5103 : * you would have a partial chunk in
5104 : * queue that may not be
5105 : * deliverable. Also if a Partial
5106 : * delivery API as started the user
5107 : * may get a partial chunk. The next
5108 : * read returning a new chunk...
5109 : * really ugly but I see no way
5110 : * around it! Maybe a notify??
5111 : */
5112 0 : asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5113 : }
5114 0 : if (chk->data) {
5115 0 : sctp_m_freem(chk->data);
5116 0 : chk->data = NULL;
5117 : }
5118 0 : sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5119 0 : } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5120 : /* If the stream_seq is > than the purging one, we are done */
5121 : break;
5122 : }
5123 : }
5124 0 : }
5125 :
5126 :
5127 : void
5128 0 : sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5129 : struct sctp_forward_tsn_chunk *fwd,
5130 : int *abort_flag, struct mbuf *m ,int offset)
5131 : {
5132 : /* The pr-sctp fwd tsn */
5133 : /*
5134 : * here we will perform all the data receiver side steps for
5135 : * processing FwdTSN, as required in by pr-sctp draft:
5136 : *
5137 : * Assume we get FwdTSN(x):
5138 : *
5139 : * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5140 : * others we have 3) examine and update re-ordering queue on
5141 : * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5142 : * report where we are.
5143 : */
5144 : struct sctp_association *asoc;
5145 : uint32_t new_cum_tsn, gap;
5146 : unsigned int i, fwd_sz, m_size;
5147 : uint32_t str_seq;
5148 : struct sctp_stream_in *strm;
5149 : struct sctp_tmit_chunk *chk, *nchk;
5150 : struct sctp_queued_to_read *ctl, *sv;
5151 :
5152 0 : asoc = &stcb->asoc;
5153 0 : if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5154 0 : SCTPDBG(SCTP_DEBUG_INDATA1,
5155 : "Bad size too small/big fwd-tsn\n");
5156 0 : return;
5157 : }
5158 0 : m_size = (stcb->asoc.mapping_array_size << 3);
5159 : /*************************************************************/
5160 : /* 1. Here we update local cumTSN and shift the bitmap array */
5161 : /*************************************************************/
5162 0 : new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5163 :
5164 0 : if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5165 : /* Already got there ... */
5166 0 : return;
5167 : }
5168 : /*
5169 : * now we know the new TSN is more advanced, let's find the actual
5170 : * gap
5171 : */
5172 0 : SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5173 0 : asoc->cumulative_tsn = new_cum_tsn;
5174 0 : if (gap >= m_size) {
5175 0 : if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5176 : struct mbuf *op_err;
5177 : char msg[SCTP_DIAG_INFO_LEN];
5178 :
5179 : /*
5180 : * out of range (of single byte chunks in the rwnd I
5181 : * give out). This must be an attacker.
5182 : */
5183 0 : *abort_flag = 1;
5184 0 : snprintf(msg, sizeof(msg),
5185 : "New cum ack %8.8x too high, highest TSN %8.8x",
5186 : new_cum_tsn, asoc->highest_tsn_inside_map);
5187 0 : op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5188 0 : stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33;
5189 0 : sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5190 0 : return;
5191 : }
5192 0 : SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5193 :
5194 0 : memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5195 0 : asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5196 0 : asoc->highest_tsn_inside_map = new_cum_tsn;
5197 :
5198 0 : memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5199 0 : asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5200 :
5201 0 : if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5202 0 : sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5203 : }
5204 : } else {
5205 : SCTP_TCB_LOCK_ASSERT(stcb);
5206 0 : for (i = 0; i <= gap; i++) {
5207 0 : if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5208 0 : !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5209 0 : SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5210 0 : if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5211 0 : asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5212 : }
5213 : }
5214 : }
5215 : }
5216 : /*************************************************************/
5217 : /* 2. Clear up re-assembly queue */
5218 : /*************************************************************/
5219 : /*
5220 : * First service it if pd-api is up, just in case we can progress it
5221 : * forward
5222 : */
5223 0 : if (asoc->fragmented_delivery_inprogress) {
5224 0 : sctp_service_reassembly(stcb, asoc);
5225 : }
5226 : /* For each one on here see if we need to toss it */
5227 : /*
5228 : * For now large messages held on the reasmqueue that are
5229 : * complete will be tossed too. We could in theory do more
5230 : * work to spin through and stop after dumping one msg aka
5231 : * seeing the start of a new msg at the head, and call the
5232 : * delivery function... to see if it can be delivered... But
5233 : * for now we just dump everything on the queue.
5234 : */
5235 0 : TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5236 0 : if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5237 : /* It needs to be tossed */
5238 0 : TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5239 0 : if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5240 0 : asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5241 0 : asoc->str_of_pdapi = chk->rec.data.stream_number;
5242 0 : asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5243 0 : asoc->fragment_flags = chk->rec.data.rcv_flags;
5244 : }
5245 0 : asoc->size_on_reasm_queue -= chk->send_size;
5246 0 : sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5247 :
5248 : /* Clear up any stream problem */
5249 0 : if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5250 0 : SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5251 : /*
5252 : * We must dump forward this streams
5253 : * sequence number if the chunk is
5254 : * not unordered that is being
5255 : * skipped. There is a chance that
5256 : * if the peer does not include the
5257 : * last fragment in its FWD-TSN we
5258 : * WILL have a problem here since
5259 : * you would have a partial chunk in
5260 : * queue that may not be
5261 : * deliverable. Also if a Partial
5262 : * delivery API as started the user
5263 : * may get a partial chunk. The next
5264 : * read returning a new chunk...
5265 : * really ugly but I see no way
5266 : * around it! Maybe a notify??
5267 : */
5268 0 : asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5269 : }
5270 0 : if (chk->data) {
5271 0 : sctp_m_freem(chk->data);
5272 0 : chk->data = NULL;
5273 : }
5274 0 : sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5275 : } else {
5276 : /*
5277 : * Ok we have gone beyond the end of the
5278 : * fwd-tsn's mark.
5279 : */
5280 : break;
5281 : }
5282 : }
5283 : /*******************************************************/
5284 : /* 3. Update the PR-stream re-ordering queues and fix */
5285 : /* delivery issues as needed. */
5286 : /*******************************************************/
5287 0 : fwd_sz -= sizeof(*fwd);
5288 0 : if (m && fwd_sz) {
5289 : /* New method. */
5290 : unsigned int num_str;
5291 : struct sctp_strseq *stseq, strseqbuf;
5292 0 : offset += sizeof(*fwd);
5293 :
5294 0 : SCTP_INP_READ_LOCK(stcb->sctp_ep);
5295 0 : num_str = fwd_sz / sizeof(struct sctp_strseq);
5296 0 : for (i = 0; i < num_str; i++) {
5297 : uint16_t st;
5298 0 : stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5299 : sizeof(struct sctp_strseq),
5300 : (uint8_t *)&strseqbuf);
5301 0 : offset += sizeof(struct sctp_strseq);
5302 0 : if (stseq == NULL) {
5303 0 : break;
5304 : }
5305 : /* Convert */
5306 0 : st = ntohs(stseq->stream);
5307 0 : stseq->stream = st;
5308 0 : st = ntohs(stseq->sequence);
5309 0 : stseq->sequence = st;
5310 :
5311 : /* now process */
5312 :
5313 : /*
5314 : * Ok we now look for the stream/seq on the read queue
5315 : * where its not all delivered. If we find it we transmute the
5316 : * read entry into a PDI_ABORTED.
5317 : */
5318 0 : if (stseq->stream >= asoc->streamincnt) {
5319 : /* screwed up streams, stop! */
5320 0 : break;
5321 : }
5322 0 : if ((asoc->str_of_pdapi == stseq->stream) &&
5323 0 : (asoc->ssn_of_pdapi == stseq->sequence)) {
5324 : /* If this is the one we were partially delivering
5325 : * now then we no longer are. Note this will change
5326 : * with the reassembly re-write.
5327 : */
5328 0 : asoc->fragmented_delivery_inprogress = 0;
5329 : }
5330 0 : sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5331 0 : TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5332 0 : if ((ctl->sinfo_stream == stseq->stream) &&
5333 0 : (ctl->sinfo_ssn == stseq->sequence)) {
5334 0 : str_seq = (stseq->stream << 16) | stseq->sequence;
5335 0 : ctl->end_added = 1;
5336 0 : ctl->pdapi_aborted = 1;
5337 0 : sv = stcb->asoc.control_pdapi;
5338 0 : stcb->asoc.control_pdapi = ctl;
5339 0 : sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5340 : stcb,
5341 : SCTP_PARTIAL_DELIVERY_ABORTED,
5342 : (void *)&str_seq,
5343 : SCTP_SO_NOT_LOCKED);
5344 0 : stcb->asoc.control_pdapi = sv;
5345 0 : break;
5346 0 : } else if ((ctl->sinfo_stream == stseq->stream) &&
5347 0 : SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5348 : /* We are past our victim SSN */
5349 : break;
5350 : }
5351 : }
5352 0 : strm = &asoc->strmin[stseq->stream];
5353 0 : if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5354 : /* Update the sequence number */
5355 0 : strm->last_sequence_delivered = stseq->sequence;
5356 : }
5357 : /* now kick the stream the new way */
5358 : /*sa_ignore NO_NULL_CHK*/
5359 0 : sctp_kick_prsctp_reorder_queue(stcb, strm);
5360 : }
5361 0 : SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5362 : }
5363 : /*
5364 : * Now slide thing forward.
5365 : */
5366 0 : sctp_slide_mapping_arrays(stcb);
5367 :
5368 0 : if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5369 : /* now lets kick out and check for more fragmented delivery */
5370 : /*sa_ignore NO_NULL_CHK*/
5371 0 : sctp_deliver_reasm_check(stcb, &stcb->asoc);
5372 : }
5373 : }
|