Line data Source code
1 : /*-
2 : * Copyright (c) 1982, 1986, 1988, 1993
3 : * The Regents of the University of California.
4 : * All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions
8 : * are met:
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice, this list of conditions and the following disclaimer.
11 : * 2. Redistributions in binary form must reproduce the above copyright
12 : * notice, this list of conditions and the following disclaimer in the
13 : * documentation and/or other materials provided with the distribution.
14 : * 3. Neither the name of the University nor the names of its contributors
15 : * may be used to endorse or promote products derived from this software
16 : * without specific prior written permission.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 : * SUCH DAMAGE.
29 : *
30 : */
31 :
32 : /*
33 : * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
34 : * We are initializing two zones for Mbufs and Clusters.
35 : *
36 : */
37 :
38 : #include <stdio.h>
39 : #include <string.h>
40 : /* #include <sys/param.h> This defines MSIZE 256 */
41 : #if !defined(SCTP_SIMPLE_ALLOCATOR)
42 : #include "umem.h"
43 : #endif
44 : #include "user_mbuf.h"
45 : #include "user_environment.h"
46 : #include "user_atomic.h"
47 : #include "netinet/sctp_pcb.h"
48 :
49 : struct mbstat mbstat;
50 : #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
51 : #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
52 : int max_linkhdr = KIPC_MAX_LINKHDR;
53 : int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
54 :
55 : /*
56 : * Zones from which we allocate.
57 : */
58 : sctp_zone_t zone_mbuf;
59 : sctp_zone_t zone_clust;
60 : sctp_zone_t zone_ext_refcnt;
61 :
62 : /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
63 : * and mb_dtor_clust.
64 : * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
65 : * struct mbuf * clust_mb_args; does not work.
66 : */
67 : struct clust_args clust_mb_args;
68 :
69 :
70 : /* __Userspace__
71 : * Local prototypes.
72 : */
73 : static int mb_ctor_mbuf(void *, void *, int);
74 : static int mb_ctor_clust(void *, void *, int);
75 : static void mb_dtor_mbuf(void *, void *);
76 : static void mb_dtor_clust(void *, void *);
77 :
78 :
79 : /***************** Functions taken from user_mbuf.h *************/
80 :
81 0 : static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
82 : {
83 0 : int flags = pkthdr;
84 0 : if (type == MT_NOINIT)
85 0 : return (0);
86 :
87 0 : m->m_next = NULL;
88 0 : m->m_nextpkt = NULL;
89 0 : m->m_len = 0;
90 0 : m->m_flags = flags;
91 0 : m->m_type = type;
92 0 : if (flags & M_PKTHDR) {
93 0 : m->m_data = m->m_pktdat;
94 0 : m->m_pkthdr.rcvif = NULL;
95 0 : m->m_pkthdr.len = 0;
96 0 : m->m_pkthdr.header = NULL;
97 0 : m->m_pkthdr.csum_flags = 0;
98 0 : m->m_pkthdr.csum_data = 0;
99 0 : m->m_pkthdr.tso_segsz = 0;
100 0 : m->m_pkthdr.ether_vtag = 0;
101 0 : SLIST_INIT(&m->m_pkthdr.tags);
102 : } else
103 0 : m->m_data = m->m_dat;
104 :
105 0 : return (0);
106 : }
107 :
108 : /* __Userspace__ */
109 : struct mbuf *
110 0 : m_get(int how, short type)
111 : {
112 : struct mbuf *mret;
113 : #if defined(SCTP_SIMPLE_ALLOCATOR)
114 : struct mb_args mbuf_mb_args;
115 :
116 : /* The following setter function is not yet being enclosed within
117 : * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
118 : * mb_dtor_mbuf. See comment there
119 : */
120 0 : mbuf_mb_args.flags = 0;
121 0 : mbuf_mb_args.type = type;
122 : #endif
123 : /* Mbuf master zone, zone_mbuf, has already been
124 : * created in mbuf_init() */
125 0 : mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
126 : #if defined(SCTP_SIMPLE_ALLOCATOR)
127 0 : mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
128 : #endif
129 : /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
130 :
131 : /* There are cases when an object available in the current CPU's
132 : * loaded magazine and in those cases the object's constructor is not applied.
133 : * If that is the case, then we are duplicating constructor initialization here,
134 : * so that the mbuf is properly constructed before returning it.
135 : */
136 0 : if (mret) {
137 : #if USING_MBUF_CONSTRUCTOR
138 : if (! (mret->m_type == type) ) {
139 : mbuf_constructor_dup(mret, 0, type);
140 : }
141 : #else
142 0 : mbuf_constructor_dup(mret, 0, type);
143 : #endif
144 :
145 : }
146 0 : return mret;
147 : }
148 :
149 :
150 : /* __Userspace__ */
151 : struct mbuf *
152 0 : m_gethdr(int how, short type)
153 : {
154 : struct mbuf *mret;
155 : #if defined(SCTP_SIMPLE_ALLOCATOR)
156 : struct mb_args mbuf_mb_args;
157 :
158 : /* The following setter function is not yet being enclosed within
159 : * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
160 : * mb_dtor_mbuf. See comment there
161 : */
162 0 : mbuf_mb_args.flags = M_PKTHDR;
163 0 : mbuf_mb_args.type = type;
164 : #endif
165 0 : mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
166 : #if defined(SCTP_SIMPLE_ALLOCATOR)
167 0 : mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
168 : #endif
169 : /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
170 : /* There are cases when an object available in the current CPU's
171 : * loaded magazine and in those cases the object's constructor is not applied.
172 : * If that is the case, then we are duplicating constructor initialization here,
173 : * so that the mbuf is properly constructed before returning it.
174 : */
175 0 : if (mret) {
176 : #if USING_MBUF_CONSTRUCTOR
177 : if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
178 : mbuf_constructor_dup(mret, M_PKTHDR, type);
179 : }
180 : #else
181 0 : mbuf_constructor_dup(mret, M_PKTHDR, type);
182 : #endif
183 : }
184 0 : return mret;
185 : }
186 :
187 : /* __Userspace__ */
188 : struct mbuf *
189 0 : m_free(struct mbuf *m)
190 : {
191 :
192 0 : struct mbuf *n = m->m_next;
193 :
194 0 : if (m->m_flags & M_EXT)
195 0 : mb_free_ext(m);
196 0 : else if ((m->m_flags & M_NOFREE) == 0) {
197 : #if defined(SCTP_SIMPLE_ALLOCATOR)
198 0 : mb_dtor_mbuf(m, NULL);
199 : #endif
200 0 : SCTP_ZONE_FREE(zone_mbuf, m);
201 : }
202 : /*umem_cache_free(zone_mbuf, m);*/
203 0 : return (n);
204 : }
205 :
206 :
207 0 : static int clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
208 : {
209 : u_int *refcnt;
210 : int type, size;
211 :
212 : /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
213 0 : type = EXT_CLUSTER;
214 0 : size = MCLBYTES;
215 :
216 0 : refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
217 : /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
218 0 : if (refcnt == NULL) {
219 : #if !defined(SCTP_SIMPLE_ALLOCATOR)
220 : umem_reap();
221 : #endif
222 0 : refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
223 : /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
224 : }
225 0 : *refcnt = 1;
226 0 : if (m != NULL) {
227 0 : m->m_ext.ext_buf = (caddr_t)m_clust;
228 0 : m->m_data = m->m_ext.ext_buf;
229 0 : m->m_flags |= M_EXT;
230 0 : m->m_ext.ext_free = NULL;
231 0 : m->m_ext.ext_args = NULL;
232 0 : m->m_ext.ext_size = size;
233 0 : m->m_ext.ext_type = type;
234 0 : m->m_ext.ref_cnt = refcnt;
235 : }
236 :
237 0 : return (0);
238 : }
239 :
240 :
241 :
242 : /* __Userspace__ */
243 : void
244 0 : m_clget(struct mbuf *m, int how)
245 : {
246 : caddr_t mclust_ret;
247 : #if defined(SCTP_SIMPLE_ALLOCATOR)
248 : struct clust_args clust_mb_args;
249 : #endif
250 0 : if (m->m_flags & M_EXT) {
251 0 : SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
252 : }
253 0 : m->m_ext.ext_buf = (char *)NULL;
254 : #if defined(SCTP_SIMPLE_ALLOCATOR)
255 0 : clust_mb_args.parent_mbuf = m;
256 : #endif
257 0 : mclust_ret = SCTP_ZONE_GET(zone_clust, char);
258 : #if defined(SCTP_SIMPLE_ALLOCATOR)
259 0 : mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
260 : #endif
261 : /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
262 : /*
263 : On a cluster allocation failure, call umem_reap() and retry.
264 : */
265 :
266 0 : if (mclust_ret == NULL) {
267 : #if !defined(SCTP_SIMPLE_ALLOCATOR)
268 : /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
269 : mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
270 : #else*/
271 : umem_reap();
272 : mclust_ret = SCTP_ZONE_GET(zone_clust, char);
273 : #endif
274 : /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
275 0 : if (NULL == mclust_ret) {
276 0 : SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
277 : }
278 : }
279 :
280 : #if USING_MBUF_CONSTRUCTOR
281 : if ((m->m_ext.ext_buf == NULL)) {
282 : clust_constructor_dup(mclust_ret, m);
283 : }
284 : #else
285 0 : clust_constructor_dup(mclust_ret, m);
286 : #endif
287 0 : }
288 :
289 : /*
290 : * Unlink a tag from the list of tags associated with an mbuf.
291 : */
292 : static __inline void
293 0 : m_tag_unlink(struct mbuf *m, struct m_tag *t)
294 : {
295 :
296 0 : SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
297 0 : }
298 :
299 : /*
300 : * Reclaim resources associated with a tag.
301 : */
302 : static __inline void
303 0 : m_tag_free(struct m_tag *t)
304 : {
305 :
306 0 : (*t->m_tag_free)(t);
307 0 : }
308 :
309 : /*
310 : * Set up the contents of a tag. Note that this does not fill in the free
311 : * method; the caller is expected to do that.
312 : *
313 : * XXX probably should be called m_tag_init, but that was already taken.
314 : */
315 : static __inline void
316 0 : m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
317 : {
318 :
319 0 : t->m_tag_id = type;
320 0 : t->m_tag_len = len;
321 0 : t->m_tag_cookie = cookie;
322 0 : }
323 :
324 : /************ End functions from user_mbuf.h ******************/
325 :
326 :
327 :
328 : /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
329 :
330 : /* __Userspace__
331 : * TODO: mbuf_init must be called in the initialization routines
332 : * of userspace stack.
333 : */
334 : void
335 0 : mbuf_init(void *dummy)
336 : {
337 :
338 : /*
339 : * __Userspace__Configure UMA zones for Mbufs and Clusters.
340 : * (TODO: m_getcl() - using packet secondary zone).
341 : * There is no provision for trash_init and trash_fini in umem.
342 : *
343 : */
344 : /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
345 : mb_ctor_mbuf, mb_dtor_mbuf, NULL,
346 : &mbuf_mb_args,
347 : NULL, 0);
348 : zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
349 : #if defined(SCTP_SIMPLE_ALLOCATOR)
350 0 : SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
351 : #else
352 : zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
353 : mb_ctor_mbuf, mb_dtor_mbuf, NULL,
354 : NUULL,
355 : NULL, 0);
356 : #endif
357 : /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
358 : NULL, NULL, NULL,
359 : NULL,
360 : NULL, 0);*/
361 0 : SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
362 :
363 : /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
364 : mb_ctor_clust, mb_dtor_clust, NULL,
365 : &clust_mb_args,
366 : NULL, 0);
367 : zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
368 : #if defined(SCTP_SIMPLE_ALLOCATOR)
369 0 : SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
370 : #else
371 : zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
372 : mb_ctor_clust, mb_dtor_clust, NULL,
373 : &clust_mb_args,
374 : NULL, 0);
375 : #endif
376 :
377 : /* uma_prealloc() goes here... */
378 :
379 : /* __Userspace__ Add umem_reap here for low memory situation?
380 : *
381 : */
382 :
383 :
384 : /*
385 : * [Re]set counters and local statistics knobs.
386 : *
387 : */
388 :
389 0 : mbstat.m_mbufs = 0;
390 0 : mbstat.m_mclusts = 0;
391 0 : mbstat.m_drain = 0;
392 0 : mbstat.m_msize = MSIZE;
393 0 : mbstat.m_mclbytes = MCLBYTES;
394 0 : mbstat.m_minclsize = MINCLSIZE;
395 0 : mbstat.m_mlen = MLEN;
396 0 : mbstat.m_mhlen = MHLEN;
397 0 : mbstat.m_numtypes = MT_NTYPES;
398 :
399 0 : mbstat.m_mcfail = mbstat.m_mpfail = 0;
400 0 : mbstat.sf_iocnt = 0;
401 0 : mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
402 :
403 0 : }
404 :
405 :
406 :
407 : /*
408 : * __Userspace__
409 : *
410 : * Constructor for Mbuf master zone. We have a different constructor
411 : * for allocating the cluster.
412 : *
413 : * The 'arg' pointer points to a mb_args structure which
414 : * contains call-specific information required to support the
415 : * mbuf allocation API. See user_mbuf.h.
416 : *
417 : * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
418 : * was passed when umem_cache_alloc was called.
419 : * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
420 : * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
421 : * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
422 : * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
423 : * flag.
424 : *
425 : * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
426 : * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
427 : * It also mentions that umem_nofail_callback is Evolving.
428 : *
429 : */
430 : static int
431 0 : mb_ctor_mbuf(void *mem, void *arg, int flgs)
432 : {
433 : #if USING_MBUF_CONSTRUCTOR
434 : struct mbuf *m;
435 : struct mb_args *args;
436 :
437 : int flags;
438 : short type;
439 :
440 : m = (struct mbuf *)mem;
441 : args = (struct mb_args *)arg;
442 : flags = args->flags;
443 : type = args->type;
444 :
445 : /*
446 : * The mbuf is initialized later.
447 : *
448 : */
449 : if (type == MT_NOINIT)
450 : return (0);
451 :
452 : m->m_next = NULL;
453 : m->m_nextpkt = NULL;
454 : m->m_len = 0;
455 : m->m_flags = flags;
456 : m->m_type = type;
457 : if (flags & M_PKTHDR) {
458 : m->m_data = m->m_pktdat;
459 : m->m_pkthdr.rcvif = NULL;
460 : m->m_pkthdr.len = 0;
461 : m->m_pkthdr.header = NULL;
462 : m->m_pkthdr.csum_flags = 0;
463 : m->m_pkthdr.csum_data = 0;
464 : m->m_pkthdr.tso_segsz = 0;
465 : m->m_pkthdr.ether_vtag = 0;
466 : SLIST_INIT(&m->m_pkthdr.tags);
467 : } else
468 : m->m_data = m->m_dat;
469 : #endif
470 0 : return (0);
471 : }
472 :
473 :
474 : /*
475 : * __Userspace__
476 : * The Mbuf master zone destructor.
477 : * This would be called in response to umem_cache_destroy
478 : * TODO: Recheck if this is what we want to do in this destructor.
479 : * (Note: the number of times mb_dtor_mbuf is called is equal to the
480 : * number of individual mbufs allocated from zone_mbuf.
481 : */
482 : static void
483 0 : mb_dtor_mbuf(void *mem, void *arg)
484 : {
485 : struct mbuf *m;
486 :
487 0 : m = (struct mbuf *)mem;
488 0 : if ((m->m_flags & M_PKTHDR) != 0) {
489 0 : m_tag_delete_chain(m, NULL);
490 : }
491 0 : }
492 :
493 :
494 : /* __Userspace__
495 : * The Cluster zone constructor.
496 : *
497 : * Here the 'arg' pointer points to the Mbuf which we
498 : * are configuring cluster storage for. If 'arg' is
499 : * empty we allocate just the cluster without setting
500 : * the mbuf to it. See mbuf.h.
501 : */
502 : static int
503 0 : mb_ctor_clust(void *mem, void *arg, int flgs)
504 : {
505 :
506 : #if USING_MBUF_CONSTRUCTOR
507 : struct mbuf *m;
508 : struct clust_args * cla;
509 : u_int *refcnt;
510 : int type, size;
511 : sctp_zone_t zone;
512 :
513 : /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
514 : type = EXT_CLUSTER;
515 : zone = zone_clust;
516 : size = MCLBYTES;
517 :
518 : cla = (struct clust_args *)arg;
519 : m = cla->parent_mbuf;
520 :
521 : refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
522 : /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
523 : *refcnt = 1;
524 :
525 : if (m != NULL) {
526 : m->m_ext.ext_buf = (caddr_t)mem;
527 : m->m_data = m->m_ext.ext_buf;
528 : m->m_flags |= M_EXT;
529 : m->m_ext.ext_free = NULL;
530 : m->m_ext.ext_args = NULL;
531 : m->m_ext.ext_size = size;
532 : m->m_ext.ext_type = type;
533 : m->m_ext.ref_cnt = refcnt;
534 : }
535 : #endif
536 0 : return (0);
537 : }
538 :
539 : /* __Userspace__ */
540 : static void
541 0 : mb_dtor_clust(void *mem, void *arg)
542 : {
543 :
544 : /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
545 : /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
546 : * mb_dtor_clust is called is equal to the number of individual mbufs allocated
547 : * from zone_clust. Similarly for mb_dtor_mbuf).
548 : * At this point the following:
549 : * struct mbuf *m;
550 : * m = (struct mbuf *)arg;
551 : * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
552 : * has been done in mb_free_ext().
553 : */
554 :
555 0 : }
556 :
557 :
558 :
559 :
560 : /* Unlink and free a packet tag. */
561 : void
562 0 : m_tag_delete(struct mbuf *m, struct m_tag *t)
563 : {
564 : KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
565 0 : m_tag_unlink(m, t);
566 0 : m_tag_free(t);
567 0 : }
568 :
569 :
570 : /* Unlink and free a packet tag chain, starting from given tag. */
571 : void
572 0 : m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
573 : {
574 :
575 : struct m_tag *p, *q;
576 :
577 : KASSERT(m, ("m_tag_delete_chain: null mbuf"));
578 0 : if (t != NULL)
579 0 : p = t;
580 : else
581 0 : p = SLIST_FIRST(&m->m_pkthdr.tags);
582 0 : if (p == NULL)
583 0 : return;
584 0 : while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
585 0 : m_tag_delete(m, q);
586 0 : m_tag_delete(m, p);
587 : }
588 :
589 : #if 0
590 : static void
591 : sctp_print_mbuf_chain(struct mbuf *m)
592 : {
593 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
594 : for(; m; m=m->m_next) {
595 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
596 : if (m->m_flags & M_EXT)
597 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
598 : }
599 : }
600 : #endif
601 :
602 : /*
603 : * Free an entire chain of mbufs and associated external buffers, if
604 : * applicable.
605 : */
606 : void
607 0 : m_freem(struct mbuf *mb)
608 : {
609 0 : while (mb != NULL)
610 0 : mb = m_free(mb);
611 0 : }
612 :
613 : /*
614 : * __Userspace__
615 : * clean mbufs with M_EXT storage attached to them
616 : * if the reference count hits 1.
617 : */
618 : void
619 0 : mb_free_ext(struct mbuf *m)
620 : {
621 :
622 : int skipmbuf;
623 :
624 : KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
625 : KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
626 :
627 : /*
628 : * check if the header is embedded in the cluster
629 : */
630 0 : skipmbuf = (m->m_flags & M_NOFREE);
631 :
632 : /* Free the external attached storage if this
633 : * mbuf is the only reference to it.
634 : *__Userspace__ TODO: jumbo frames
635 : *
636 : */
637 : /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
638 : reduces to here before but the IPHONE malloc commit had changed
639 : this to compare to 0 instead of 1 (see next line). Why?
640 : . .. this caused a huge memory leak in Linux.
641 : */
642 : #ifdef IPHONE
643 : if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
644 : #else
645 0 : if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
646 : #endif
647 : {
648 0 : if (m->m_ext.ext_type == EXT_CLUSTER){
649 : #if defined(SCTP_SIMPLE_ALLOCATOR)
650 0 : mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
651 : #endif
652 0 : SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
653 0 : SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
654 0 : m->m_ext.ref_cnt = NULL;
655 : }
656 : }
657 :
658 0 : if (skipmbuf)
659 0 : return;
660 :
661 :
662 : /* __Userspace__ Also freeing the storage for ref_cnt
663 : * Free this mbuf back to the mbuf zone with all m_ext
664 : * information purged.
665 : */
666 0 : m->m_ext.ext_buf = NULL;
667 0 : m->m_ext.ext_free = NULL;
668 0 : m->m_ext.ext_args = NULL;
669 0 : m->m_ext.ref_cnt = NULL;
670 0 : m->m_ext.ext_size = 0;
671 0 : m->m_ext.ext_type = 0;
672 0 : m->m_flags &= ~M_EXT;
673 : #if defined(SCTP_SIMPLE_ALLOCATOR)
674 0 : mb_dtor_mbuf(m, NULL);
675 : #endif
676 0 : SCTP_ZONE_FREE(zone_mbuf, m);
677 :
678 : /*umem_cache_free(zone_mbuf, m);*/
679 : }
680 :
681 : /*
682 : * "Move" mbuf pkthdr from "from" to "to".
683 : * "from" must have M_PKTHDR set, and "to" must be empty.
684 : */
685 : void
686 0 : m_move_pkthdr(struct mbuf *to, struct mbuf *from)
687 : {
688 :
689 0 : to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
690 0 : if ((to->m_flags & M_EXT) == 0)
691 0 : to->m_data = to->m_pktdat;
692 0 : to->m_pkthdr = from->m_pkthdr; /* especially tags */
693 0 : SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
694 0 : from->m_flags &= ~M_PKTHDR;
695 0 : }
696 :
697 :
698 : /*
699 : * Rearange an mbuf chain so that len bytes are contiguous
700 : * and in the data area of an mbuf (so that mtod and dtom
701 : * will work for a structure of size len). Returns the resulting
702 : * mbuf chain on success, frees it and returns null on failure.
703 : * If there is room, it will add up to max_protohdr-len extra bytes to the
704 : * contiguous region in an attempt to avoid being called next time.
705 : */
706 : struct mbuf *
707 0 : m_pullup(struct mbuf *n, int len)
708 : {
709 : struct mbuf *m;
710 : int count;
711 : int space;
712 :
713 : /*
714 : * If first mbuf has no cluster, and has room for len bytes
715 : * without shifting current data, pullup into it,
716 : * otherwise allocate a new mbuf to prepend to the chain.
717 : */
718 0 : if ((n->m_flags & M_EXT) == 0 &&
719 0 : n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
720 0 : if (n->m_len >= len)
721 0 : return (n);
722 0 : m = n;
723 0 : n = n->m_next;
724 0 : len -= m->m_len;
725 : } else {
726 0 : if (len > MHLEN)
727 0 : goto bad;
728 0 : MGET(m, M_NOWAIT, n->m_type);
729 0 : if (m == NULL)
730 0 : goto bad;
731 0 : m->m_len = 0;
732 0 : if (n->m_flags & M_PKTHDR)
733 0 : M_MOVE_PKTHDR(m, n);
734 : }
735 0 : space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
736 : do {
737 0 : count = min(min(max(len, max_protohdr), space), n->m_len);
738 0 : bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
739 0 : (u_int)count);
740 0 : len -= count;
741 0 : m->m_len += count;
742 0 : n->m_len -= count;
743 0 : space -= count;
744 0 : if (n->m_len)
745 0 : n->m_data += count;
746 : else
747 0 : n = m_free(n);
748 0 : } while (len > 0 && n);
749 0 : if (len > 0) {
750 0 : (void) m_free(m);
751 0 : goto bad;
752 : }
753 0 : m->m_next = n;
754 0 : return (m);
755 : bad:
756 0 : m_freem(n);
757 0 : mbstat.m_mpfail++; /* XXX: No consistency. */
758 0 : return (NULL);
759 : }
760 :
761 :
762 : static struct mbuf *
763 0 : m_dup1(struct mbuf *m, int off, int len, int wait)
764 : {
765 0 : struct mbuf *n = NULL;
766 : int copyhdr;
767 :
768 0 : if (len > MCLBYTES)
769 0 : return NULL;
770 0 : if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
771 0 : copyhdr = 1;
772 : else
773 0 : copyhdr = 0;
774 0 : if (len >= MINCLSIZE) {
775 0 : if (copyhdr == 1) {
776 0 : m_clget(n, wait); /* TODO: include code for copying the header */
777 0 : m_dup_pkthdr(n, m, wait);
778 : } else
779 0 : m_clget(n, wait);
780 : } else {
781 0 : if (copyhdr == 1)
782 0 : n = m_gethdr(wait, m->m_type);
783 : else
784 0 : n = m_get(wait, m->m_type);
785 : }
786 0 : if (!n)
787 0 : return NULL; /* ENOBUFS */
788 :
789 0 : if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
790 0 : m_free(n);
791 0 : return NULL;
792 : }
793 0 : m_copydata(m, off, len, mtod(n, caddr_t));
794 0 : n->m_len = len;
795 0 : return n;
796 : }
797 :
798 :
799 : /* Taken from sys/kern/uipc_mbuf2.c */
800 : struct mbuf *
801 0 : m_pulldown(struct mbuf *m, int off, int len, int *offp)
802 : {
803 : struct mbuf *n, *o;
804 : int hlen, tlen, olen;
805 : int writable;
806 :
807 : /* check invalid arguments. */
808 : KASSERT(m, ("m == NULL in m_pulldown()"));
809 0 : if (len > MCLBYTES) {
810 0 : m_freem(m);
811 0 : return NULL; /* impossible */
812 : }
813 :
814 : #ifdef PULLDOWN_DEBUG
815 : {
816 : struct mbuf *t;
817 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
818 : for (t = m; t; t = t->m_next)
819 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
820 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
821 : }
822 : #endif
823 0 : n = m;
824 0 : while (n != NULL && off > 0) {
825 0 : if (n->m_len > off)
826 0 : break;
827 0 : off -= n->m_len;
828 0 : n = n->m_next;
829 : }
830 : /* be sure to point non-empty mbuf */
831 0 : while (n != NULL && n->m_len == 0)
832 0 : n = n->m_next;
833 0 : if (!n) {
834 0 : m_freem(m);
835 0 : return NULL; /* mbuf chain too short */
836 : }
837 :
838 0 : writable = 0;
839 0 : if ((n->m_flags & M_EXT) == 0 ||
840 0 : (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
841 0 : writable = 1;
842 :
843 : /*
844 : * the target data is on <n, off>.
845 : * if we got enough data on the mbuf "n", we're done.
846 : */
847 0 : if ((off == 0 || offp) && len <= n->m_len - off && writable)
848 0 : goto ok;
849 :
850 : /*
851 : * when len <= n->m_len - off and off != 0, it is a special case.
852 : * len bytes from <n, off> sits in single mbuf, but the caller does
853 : * not like the starting position (off).
854 : * chop the current mbuf into two pieces, set off to 0.
855 : */
856 0 : if (len <= n->m_len - off) {
857 0 : o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
858 0 : if (o == NULL) {
859 0 : m_freem(m);
860 0 : return NULL; /* ENOBUFS */
861 : }
862 0 : n->m_len = off;
863 0 : o->m_next = n->m_next;
864 0 : n->m_next = o;
865 0 : n = n->m_next;
866 0 : off = 0;
867 0 : goto ok;
868 : }
869 : /*
870 : * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
871 : * and construct contiguous mbuf with m_len == len.
872 : * note that hlen + tlen == len, and tlen > 0.
873 : */
874 0 : hlen = n->m_len - off;
875 0 : tlen = len - hlen;
876 :
877 : /*
878 : * ensure that we have enough trailing data on mbuf chain.
879 : * if not, we can do nothing about the chain.
880 : */
881 0 : olen = 0;
882 0 : for (o = n->m_next; o != NULL; o = o->m_next)
883 0 : olen += o->m_len;
884 0 : if (hlen + olen < len) {
885 0 : m_freem(m);
886 0 : return NULL; /* mbuf chain too short */
887 : }
888 :
889 : /*
890 : * easy cases first.
891 : * we need to use m_copydata() to get data from <n->m_next, 0>.
892 : */
893 0 : if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
894 0 : && writable) {
895 0 : m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
896 0 : n->m_len += tlen;
897 0 : m_adj(n->m_next, tlen);
898 0 : goto ok;
899 : }
900 :
901 0 : if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
902 0 : && writable) {
903 0 : n->m_next->m_data -= hlen;
904 0 : n->m_next->m_len += hlen;
905 0 : bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
906 0 : n->m_len -= hlen;
907 0 : n = n->m_next;
908 0 : off = 0;
909 0 : goto ok;
910 : }
911 :
912 : /*
913 : * now, we need to do the hard way. don't m_copy as there's no room
914 : * on both end.
915 : */
916 0 : if (len > MLEN)
917 0 : m_clget(o, M_NOWAIT);
918 : /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
919 : else
920 0 : o = m_get(M_NOWAIT, m->m_type);
921 0 : if (!o) {
922 0 : m_freem(m);
923 0 : return NULL; /* ENOBUFS */
924 : }
925 : /* get hlen from <n, off> into <o, 0> */
926 0 : o->m_len = hlen;
927 0 : bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
928 0 : n->m_len -= hlen;
929 : /* get tlen from <n->m_next, 0> into <o, hlen> */
930 0 : m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
931 0 : o->m_len += tlen;
932 0 : m_adj(n->m_next, tlen);
933 0 : o->m_next = n->m_next;
934 0 : n->m_next = o;
935 0 : n = o;
936 0 : off = 0;
937 : ok:
938 : #ifdef PULLDOWN_DEBUG
939 : {
940 : struct mbuf *t;
941 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
942 : for (t = m; t; t = t->m_next)
943 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
944 : SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
945 : }
946 : #endif
947 0 : if (offp)
948 0 : *offp = off;
949 0 : return n;
950 : }
951 :
952 : /*
953 : * Attach the the cluster from *m to *n, set up m_ext in *n
954 : * and bump the refcount of the cluster.
955 : */
956 : static void
957 0 : mb_dupcl(struct mbuf *n, struct mbuf *m)
958 : {
959 : KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
960 : KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
961 : KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
962 :
963 0 : if (*(m->m_ext.ref_cnt) == 1)
964 0 : *(m->m_ext.ref_cnt) += 1;
965 : else
966 0 : atomic_add_int(m->m_ext.ref_cnt, 1);
967 0 : n->m_ext.ext_buf = m->m_ext.ext_buf;
968 0 : n->m_ext.ext_free = m->m_ext.ext_free;
969 0 : n->m_ext.ext_args = m->m_ext.ext_args;
970 0 : n->m_ext.ext_size = m->m_ext.ext_size;
971 0 : n->m_ext.ref_cnt = m->m_ext.ref_cnt;
972 0 : n->m_ext.ext_type = m->m_ext.ext_type;
973 0 : n->m_flags |= M_EXT;
974 0 : }
975 :
976 :
977 : /*
978 : * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
979 : * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
980 : * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
981 : * Note that the copy is read-only, because clusters are not copied,
982 : * only their reference counts are incremented.
983 : */
984 :
985 : struct mbuf *
986 0 : m_copym(struct mbuf *m, int off0, int len, int wait)
987 : {
988 : struct mbuf *n, **np;
989 0 : int off = off0;
990 : struct mbuf *top;
991 0 : int copyhdr = 0;
992 :
993 : KASSERT(off >= 0, ("m_copym, negative off %d", off));
994 : KASSERT(len >= 0, ("m_copym, negative len %d", len));
995 :
996 0 : if (off == 0 && m->m_flags & M_PKTHDR)
997 0 : copyhdr = 1;
998 0 : while (off > 0) {
999 : KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1000 0 : if (off < m->m_len)
1001 0 : break;
1002 0 : off -= m->m_len;
1003 0 : m = m->m_next;
1004 : }
1005 0 : np = ⊤
1006 0 : top = 0;
1007 0 : while (len > 0) {
1008 0 : if (m == NULL) {
1009 : KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
1010 0 : break;
1011 : }
1012 0 : if (copyhdr)
1013 0 : MGETHDR(n, wait, m->m_type);
1014 : else
1015 0 : MGET(n, wait, m->m_type);
1016 0 : *np = n;
1017 0 : if (n == NULL)
1018 0 : goto nospace;
1019 0 : if (copyhdr) {
1020 0 : if (!m_dup_pkthdr(n, m, wait))
1021 0 : goto nospace;
1022 0 : if (len == M_COPYALL)
1023 0 : n->m_pkthdr.len -= off0;
1024 : else
1025 0 : n->m_pkthdr.len = len;
1026 0 : copyhdr = 0;
1027 : }
1028 0 : n->m_len = min(len, m->m_len - off);
1029 0 : if (m->m_flags & M_EXT) {
1030 0 : n->m_data = m->m_data + off;
1031 0 : mb_dupcl(n, m);
1032 : } else
1033 0 : bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1034 0 : (u_int)n->m_len);
1035 0 : if (len != M_COPYALL)
1036 0 : len -= n->m_len;
1037 0 : off = 0;
1038 0 : m = m->m_next;
1039 0 : np = &n->m_next;
1040 : }
1041 0 : if (top == NULL)
1042 0 : mbstat.m_mcfail++; /* XXX: No consistency. */
1043 :
1044 0 : return (top);
1045 : nospace:
1046 0 : m_freem(top);
1047 0 : mbstat.m_mcfail++; /* XXX: No consistency. */
1048 0 : return (NULL);
1049 : }
1050 :
1051 :
1052 : int
1053 0 : m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
1054 : {
1055 0 : struct m_tag *p, *t, *tprev = NULL;
1056 :
1057 : KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
1058 0 : m_tag_delete_chain(to, NULL);
1059 0 : SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
1060 0 : t = m_tag_copy(p, how);
1061 0 : if (t == NULL) {
1062 0 : m_tag_delete_chain(to, NULL);
1063 0 : return 0;
1064 : }
1065 0 : if (tprev == NULL)
1066 0 : SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
1067 : else
1068 0 : SLIST_INSERT_AFTER(tprev, t, m_tag_link);
1069 0 : tprev = t;
1070 : }
1071 0 : return 1;
1072 : }
1073 :
1074 : /*
1075 : * Duplicate "from"'s mbuf pkthdr in "to".
1076 : * "from" must have M_PKTHDR set, and "to" must be empty.
1077 : * In particular, this does a deep copy of the packet tags.
1078 : */
1079 : int
1080 0 : m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1081 : {
1082 :
1083 0 : to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1084 0 : if ((to->m_flags & M_EXT) == 0)
1085 0 : to->m_data = to->m_pktdat;
1086 0 : to->m_pkthdr = from->m_pkthdr;
1087 0 : SLIST_INIT(&to->m_pkthdr.tags);
1088 0 : return (m_tag_copy_chain(to, from, MBTOM(how)));
1089 : }
1090 :
1091 : /* Copy a single tag. */
1092 : struct m_tag *
1093 0 : m_tag_copy(struct m_tag *t, int how)
1094 : {
1095 : struct m_tag *p;
1096 :
1097 : KASSERT(t, ("m_tag_copy: null tag"));
1098 0 : p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
1099 0 : if (p == NULL)
1100 0 : return (NULL);
1101 0 : bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
1102 0 : return p;
1103 : }
1104 :
1105 : /* Get a packet tag structure along with specified data following. */
1106 : struct m_tag *
1107 0 : m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
1108 : {
1109 : struct m_tag *t;
1110 :
1111 0 : if (len < 0)
1112 0 : return NULL;
1113 0 : t = malloc(len + sizeof(struct m_tag));
1114 0 : if (t == NULL)
1115 0 : return NULL;
1116 0 : m_tag_setup(t, cookie, type, len);
1117 0 : t->m_tag_free = m_tag_free_default;
1118 0 : return t;
1119 : }
1120 :
1121 : /* Free a packet tag. */
1122 : void
1123 0 : m_tag_free_default(struct m_tag *t)
1124 : {
1125 0 : free(t);
1126 0 : }
1127 :
1128 : /*
1129 : * Copy data from a buffer back into the indicated mbuf chain,
1130 : * starting "off" bytes from the beginning, extending the mbuf
1131 : * chain if necessary.
1132 : */
1133 : void
1134 0 : m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1135 : {
1136 : int mlen;
1137 0 : struct mbuf *m = m0, *n;
1138 0 : int totlen = 0;
1139 :
1140 0 : if (m0 == NULL)
1141 0 : return;
1142 0 : while (off > (mlen = m->m_len)) {
1143 0 : off -= mlen;
1144 0 : totlen += mlen;
1145 0 : if (m->m_next == NULL) {
1146 0 : n = m_get(M_NOWAIT, m->m_type);
1147 0 : if (n == NULL)
1148 0 : goto out;
1149 0 : bzero(mtod(n, caddr_t), MLEN);
1150 0 : n->m_len = min(MLEN, len + off);
1151 0 : m->m_next = n;
1152 : }
1153 0 : m = m->m_next;
1154 : }
1155 0 : while (len > 0) {
1156 0 : mlen = min (m->m_len - off, len);
1157 0 : bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1158 0 : cp += mlen;
1159 0 : len -= mlen;
1160 0 : mlen += off;
1161 0 : off = 0;
1162 0 : totlen += mlen;
1163 0 : if (len == 0)
1164 0 : break;
1165 0 : if (m->m_next == NULL) {
1166 0 : n = m_get(M_NOWAIT, m->m_type);
1167 0 : if (n == NULL)
1168 0 : break;
1169 0 : n->m_len = min(MLEN, len);
1170 0 : m->m_next = n;
1171 : }
1172 0 : m = m->m_next;
1173 : }
1174 0 : out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1175 0 : m->m_pkthdr.len = totlen;
1176 : }
1177 :
1178 :
1179 : /*
1180 : * Lesser-used path for M_PREPEND:
1181 : * allocate new mbuf to prepend to chain,
1182 : * copy junk along.
1183 : */
1184 : struct mbuf *
1185 0 : m_prepend(struct mbuf *m, int len, int how)
1186 : {
1187 : struct mbuf *mn;
1188 :
1189 0 : if (m->m_flags & M_PKTHDR)
1190 0 : MGETHDR(mn, how, m->m_type);
1191 : else
1192 0 : MGET(mn, how, m->m_type);
1193 0 : if (mn == NULL) {
1194 0 : m_freem(m);
1195 0 : return (NULL);
1196 : }
1197 0 : if (m->m_flags & M_PKTHDR)
1198 0 : M_MOVE_PKTHDR(mn, m);
1199 0 : mn->m_next = m;
1200 0 : m = mn;
1201 0 : if(m->m_flags & M_PKTHDR) {
1202 0 : if (len < MHLEN)
1203 0 : MH_ALIGN(m, len);
1204 : } else {
1205 0 : if (len < MLEN)
1206 0 : M_ALIGN(m, len);
1207 : }
1208 0 : m->m_len = len;
1209 0 : return (m);
1210 : }
1211 :
1212 : /*
1213 : * Copy data from an mbuf chain starting "off" bytes from the beginning,
1214 : * continuing for "len" bytes, into the indicated buffer.
1215 : */
1216 : void
1217 0 : m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1218 : {
1219 : u_int count;
1220 :
1221 : KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1222 : KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1223 0 : while (off > 0) {
1224 : KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1225 0 : if (off < m->m_len)
1226 0 : break;
1227 0 : off -= m->m_len;
1228 0 : m = m->m_next;
1229 : }
1230 0 : while (len > 0) {
1231 : KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1232 0 : count = min(m->m_len - off, len);
1233 0 : bcopy(mtod(m, caddr_t) + off, cp, count);
1234 0 : len -= count;
1235 0 : cp += count;
1236 0 : off = 0;
1237 0 : m = m->m_next;
1238 : }
1239 0 : }
1240 :
1241 :
1242 : /*
1243 : * Concatenate mbuf chain n to m.
1244 : * Both chains must be of the same type (e.g. MT_DATA).
1245 : * Any m_pkthdr is not updated.
1246 : */
1247 : void
1248 0 : m_cat(struct mbuf *m, struct mbuf *n)
1249 : {
1250 0 : while (m->m_next)
1251 0 : m = m->m_next;
1252 0 : while (n) {
1253 0 : if (m->m_flags & M_EXT ||
1254 0 : m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1255 : /* just join the two chains */
1256 0 : m->m_next = n;
1257 0 : return;
1258 : }
1259 : /* splat the data from one into the other */
1260 0 : bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, (u_int)n->m_len);
1261 0 : m->m_len += n->m_len;
1262 0 : n = m_free(n);
1263 : }
1264 : }
1265 :
1266 :
1267 : void
1268 0 : m_adj(struct mbuf *mp, int req_len)
1269 : {
1270 0 : int len = req_len;
1271 : struct mbuf *m;
1272 : int count;
1273 :
1274 0 : if ((m = mp) == NULL)
1275 0 : return;
1276 0 : if (len >= 0) {
1277 : /*
1278 : * Trim from head.
1279 : */
1280 0 : while (m != NULL && len > 0) {
1281 0 : if (m->m_len <= len) {
1282 0 : len -= m->m_len;
1283 0 : m->m_len = 0;
1284 0 : m = m->m_next;
1285 : } else {
1286 0 : m->m_len -= len;
1287 0 : m->m_data += len;
1288 0 : len = 0;
1289 : }
1290 : }
1291 0 : m = mp;
1292 0 : if (mp->m_flags & M_PKTHDR)
1293 0 : m->m_pkthdr.len -= (req_len - len);
1294 : } else {
1295 : /*
1296 : * Trim from tail. Scan the mbuf chain,
1297 : * calculating its length and finding the last mbuf.
1298 : * If the adjustment only affects this mbuf, then just
1299 : * adjust and return. Otherwise, rescan and truncate
1300 : * after the remaining size.
1301 : */
1302 0 : len = -len;
1303 0 : count = 0;
1304 : for (;;) {
1305 0 : count += m->m_len;
1306 0 : if (m->m_next == (struct mbuf *)0)
1307 0 : break;
1308 0 : m = m->m_next;
1309 : }
1310 0 : if (m->m_len >= len) {
1311 0 : m->m_len -= len;
1312 0 : if (mp->m_flags & M_PKTHDR)
1313 0 : mp->m_pkthdr.len -= len;
1314 0 : return;
1315 : }
1316 0 : count -= len;
1317 0 : if (count < 0)
1318 0 : count = 0;
1319 : /*
1320 : * Correct length for chain is "count".
1321 : * Find the mbuf with last data, adjust its length,
1322 : * and toss data from remaining mbufs on chain.
1323 : */
1324 0 : m = mp;
1325 0 : if (m->m_flags & M_PKTHDR)
1326 0 : m->m_pkthdr.len = count;
1327 0 : for (; m; m = m->m_next) {
1328 0 : if (m->m_len >= count) {
1329 0 : m->m_len = count;
1330 0 : if (m->m_next != NULL) {
1331 0 : m_freem(m->m_next);
1332 0 : m->m_next = NULL;
1333 : }
1334 0 : break;
1335 : }
1336 0 : count -= m->m_len;
1337 : }
1338 : }
1339 : }
1340 :
1341 :
1342 : /* m_split is used within sctp_handle_cookie_echo. */
1343 :
1344 : /*
1345 : * Partition an mbuf chain in two pieces, returning the tail --
1346 : * all but the first len0 bytes. In case of failure, it returns NULL and
1347 : * attempts to restore the chain to its original state.
1348 : *
1349 : * Note that the resulting mbufs might be read-only, because the new
1350 : * mbuf can end up sharing an mbuf cluster with the original mbuf if
1351 : * the "breaking point" happens to lie within a cluster mbuf. Use the
1352 : * M_WRITABLE() macro to check for this case.
1353 : */
1354 : struct mbuf *
1355 0 : m_split(struct mbuf *m0, int len0, int wait)
1356 : {
1357 : struct mbuf *m, *n;
1358 0 : u_int len = len0, remain;
1359 :
1360 : /* MBUF_CHECKSLEEP(wait); */
1361 0 : for (m = m0; m && (int)len > m->m_len; m = m->m_next)
1362 0 : len -= m->m_len;
1363 0 : if (m == NULL)
1364 0 : return (NULL);
1365 0 : remain = m->m_len - len;
1366 0 : if (m0->m_flags & M_PKTHDR) {
1367 0 : MGETHDR(n, wait, m0->m_type);
1368 0 : if (n == NULL)
1369 0 : return (NULL);
1370 0 : n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1371 0 : n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1372 0 : m0->m_pkthdr.len = len0;
1373 0 : if (m->m_flags & M_EXT)
1374 0 : goto extpacket;
1375 0 : if (remain > MHLEN) {
1376 : /* m can't be the lead packet */
1377 0 : MH_ALIGN(n, 0);
1378 0 : n->m_next = m_split(m, len, wait);
1379 0 : if (n->m_next == NULL) {
1380 0 : (void) m_free(n);
1381 0 : return (NULL);
1382 : } else {
1383 0 : n->m_len = 0;
1384 0 : return (n);
1385 : }
1386 : } else
1387 0 : MH_ALIGN(n, remain);
1388 0 : } else if (remain == 0) {
1389 0 : n = m->m_next;
1390 0 : m->m_next = NULL;
1391 0 : return (n);
1392 : } else {
1393 0 : MGET(n, wait, m->m_type);
1394 0 : if (n == NULL)
1395 0 : return (NULL);
1396 0 : M_ALIGN(n, remain);
1397 : }
1398 : extpacket:
1399 0 : if (m->m_flags & M_EXT) {
1400 0 : n->m_data = m->m_data + len;
1401 0 : mb_dupcl(n, m);
1402 : } else {
1403 0 : bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1404 : }
1405 0 : n->m_len = remain;
1406 0 : m->m_len = len;
1407 0 : n->m_next = m->m_next;
1408 0 : m->m_next = NULL;
1409 0 : return (n);
1410 : }
1411 :
1412 :
1413 :
1414 :
1415 : int
1416 0 : pack_send_buffer(caddr_t buffer, struct mbuf* mb){
1417 :
1418 : int count_to_copy;
1419 0 : int total_count_copied = 0;
1420 0 : int offset = 0;
1421 :
1422 : do {
1423 0 : count_to_copy = mb->m_len;
1424 0 : bcopy(mtod(mb, caddr_t), buffer+offset, count_to_copy);
1425 0 : offset += count_to_copy;
1426 0 : total_count_copied += count_to_copy;
1427 0 : mb = mb->m_next;
1428 0 : } while(mb);
1429 :
1430 0 : return (total_count_copied);
1431 : }
|