source: trunk/third/openssh/monitor_mm.c @ 18759

Revision 18759, 8.4 KB checked in by zacheiss, 22 years ago (diff)
This commit was generated by cvs2svn to compensate for changes in r18758, which included commits to RCS files with non-trunk default branches.
Line 
1/*
2 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "includes.h"
27RCSID("$OpenBSD: monitor_mm.c,v 1.8 2002/08/02 14:43:15 millert Exp $");
28
29#ifdef HAVE_SYS_MMAN_H
30#include <sys/mman.h>
31#endif
32
33#include "openbsd-compat/xmmap.h"
34#include "ssh.h"
35#include "xmalloc.h"
36#include "log.h"
37#include "monitor_mm.h"
38
39static int
40mm_compare(struct mm_share *a, struct mm_share *b)
41{
42        long diff = (char *)a->address - (char *)b->address;
43
44        if (diff == 0)
45                return (0);
46        else if (diff < 0)
47                return (-1);
48        else
49                return (1);
50}
51
52RB_GENERATE(mmtree, mm_share, next, mm_compare)
53
54static struct mm_share *
55mm_make_entry(struct mm_master *mm, struct mmtree *head,
56    void *address, size_t size)
57{
58        struct mm_share *tmp, *tmp2;
59
60        if (mm->mmalloc == NULL)
61                tmp = xmalloc(sizeof(struct mm_share));
62        else
63                tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
64        tmp->address = address;
65        tmp->size = size;
66
67        tmp2 = RB_INSERT(mmtree, head, tmp);
68        if (tmp2 != NULL)
69                fatal("mm_make_entry(%p): double address %p->%p(%lu)",
70                    mm, tmp2, address, (u_long)size);
71
72        return (tmp);
73}
74
75/* Creates a shared memory area of a certain size */
76
77struct mm_master *
78mm_create(struct mm_master *mmalloc, size_t size)
79{
80        void *address;
81        struct mm_master *mm;
82
83        if (mmalloc == NULL)
84                mm = xmalloc(sizeof(struct mm_master));
85        else
86                mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
87
88        /*
89         * If the memory map has a mm_master it can be completely
90         * shared including authentication between the child
91         * and the client.
92         */
93        mm->mmalloc = mmalloc;
94
95        address = xmmap(size);
96        if (address == MAP_FAILED)
97                fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
98
99        mm->address = address;
100        mm->size = size;
101
102        RB_INIT(&mm->rb_free);
103        RB_INIT(&mm->rb_allocated);
104
105        mm_make_entry(mm, &mm->rb_free, address, size);
106
107        return (mm);
108}
109
110/* Frees either the allocated or the free list */
111
112static void
113mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
114{
115        struct mm_share *mms, *next;
116
117        for (mms = RB_ROOT(head); mms; mms = next) {
118                next = RB_NEXT(mmtree, head, mms);
119                RB_REMOVE(mmtree, head, mms);
120                if (mmalloc == NULL)
121                        xfree(mms);
122                else
123                        mm_free(mmalloc, mms);
124        }
125}
126
127/* Destroys a memory mapped area */
128
129void
130mm_destroy(struct mm_master *mm)
131{
132        mm_freelist(mm->mmalloc, &mm->rb_free);
133        mm_freelist(mm->mmalloc, &mm->rb_allocated);
134
135#ifdef HAVE_MMAP
136        if (munmap(mm->address, mm->size) == -1)
137                fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
138                    strerror(errno));
139#else
140        fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
141            __func__);
142#endif
143        if (mm->mmalloc == NULL)
144                xfree(mm);
145        else
146                mm_free(mm->mmalloc, mm);
147}
148
149void *
150mm_xmalloc(struct mm_master *mm, size_t size)
151{
152        void *address;
153
154        address = mm_malloc(mm, size);
155        if (address == NULL)
156                fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
157        return (address);
158}
159
160
161/* Allocates data from a memory mapped area */
162
163void *
164mm_malloc(struct mm_master *mm, size_t size)
165{
166        struct mm_share *mms, *tmp;
167
168        if (size == 0)
169                fatal("mm_malloc: try to allocate 0 space");
170        if (size > SIZE_T_MAX - MM_MINSIZE + 1)
171                fatal("mm_malloc: size too big");
172
173        size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
174
175        RB_FOREACH(mms, mmtree, &mm->rb_free) {
176                if (mms->size >= size)
177                        break;
178        }
179
180        if (mms == NULL)
181                return (NULL);
182
183        /* Debug */
184        memset(mms->address, 0xd0, size);
185
186        tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
187
188        /* Does not change order in RB tree */
189        mms->size -= size;
190        mms->address = (u_char *)mms->address + size;
191
192        if (mms->size == 0) {
193                RB_REMOVE(mmtree, &mm->rb_free, mms);
194                if (mm->mmalloc == NULL)
195                        xfree(mms);
196                else
197                        mm_free(mm->mmalloc, mms);
198        }
199
200        return (tmp->address);
201}
202
203/* Frees memory in a memory mapped area */
204
205void
206mm_free(struct mm_master *mm, void *address)
207{
208        struct mm_share *mms, *prev, tmp;
209
210        tmp.address = address;
211        mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
212        if (mms == NULL)
213                fatal("mm_free(%p): can not find %p", mm, address);
214
215        /* Debug */
216        memset(mms->address, 0xd0, mms->size);
217
218        /* Remove from allocated list and insert in free list */
219        RB_REMOVE(mmtree, &mm->rb_allocated, mms);
220        if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
221                fatal("mm_free(%p): double address %p", mm, address);
222
223        /* Find previous entry */
224        prev = mms;
225        if (RB_LEFT(prev, next)) {
226                prev = RB_LEFT(prev, next);
227                while (RB_RIGHT(prev, next))
228                        prev = RB_RIGHT(prev, next);
229        } else {
230                if (RB_PARENT(prev, next) &&
231                    (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
232                        prev = RB_PARENT(prev, next);
233                else {
234                        while (RB_PARENT(prev, next) &&
235                            (prev == RB_LEFT(RB_PARENT(prev, next), next)))
236                                prev = RB_PARENT(prev, next);
237                        prev = RB_PARENT(prev, next);
238                }
239        }
240
241        /* Check if range does not overlap */
242        if (prev != NULL && MM_ADDRESS_END(prev) > address)
243                fatal("mm_free: memory corruption: %p(%lu) > %p",
244                    prev->address, (u_long)prev->size, address);
245
246        /* See if we can merge backwards */
247        if (prev != NULL && MM_ADDRESS_END(prev) == address) {
248                prev->size += mms->size;
249                RB_REMOVE(mmtree, &mm->rb_free, mms);
250                if (mm->mmalloc == NULL)
251                        xfree(mms);
252                else
253                        mm_free(mm->mmalloc, mms);
254        } else
255                prev = mms;
256
257        if (prev == NULL)
258                return;
259
260        /* Check if we can merge forwards */
261        mms = RB_NEXT(mmtree, &mm->rb_free, prev);
262        if (mms == NULL)
263                return;
264
265        if (MM_ADDRESS_END(prev) > mms->address)
266                fatal("mm_free: memory corruption: %p < %p(%lu)",
267                    mms->address, prev->address, (u_long)prev->size);
268        if (MM_ADDRESS_END(prev) != mms->address)
269                return;
270
271        prev->size += mms->size;
272        RB_REMOVE(mmtree, &mm->rb_free, mms);
273
274        if (mm->mmalloc == NULL)
275                xfree(mms);
276        else
277                mm_free(mm->mmalloc, mms);
278}
279
280static void
281mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
282    struct mm_master *mm, struct mm_master *mmold)
283{
284        struct mm_master *mmalloc = mm->mmalloc;
285        struct mm_share *mms, *new;
286
287        /* Sync free list */
288        RB_FOREACH(mms, mmtree, oldtree) {
289                /* Check the values */
290                mm_memvalid(mmold, mms, sizeof(struct mm_share));
291                mm_memvalid(mm, mms->address, mms->size);
292
293                new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
294                memcpy(new, mms, sizeof(struct mm_share));
295                RB_INSERT(mmtree, newtree, new);
296        }
297}
298
299void
300mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
301{
302        struct mm_master *mm;
303        struct mm_master *mmalloc;
304        struct mm_master *mmold;
305        struct mmtree rb_free, rb_allocated;
306
307        debug3("%s: Share sync", __func__);
308
309        mm = *pmm;
310        mmold = mm->mmalloc;
311        mm_memvalid(mmold, mm, sizeof(*mm));
312
313        mmalloc = mm_create(NULL, mm->size);
314        mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
315        memcpy(mm, *pmm, sizeof(struct mm_master));
316        mm->mmalloc = mmalloc;
317
318        rb_free = mm->rb_free;
319        rb_allocated = mm->rb_allocated;
320
321        RB_INIT(&mm->rb_free);
322        RB_INIT(&mm->rb_allocated);
323
324        mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
325        mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
326
327        mm_destroy(mmold);
328
329        *pmm = mm;
330        *pmmalloc = mmalloc;
331
332        debug3("%s: Share sync end", __func__);
333}
334
335void
336mm_memvalid(struct mm_master *mm, void *address, size_t size)
337{
338        void *end = (u_char *)address + size;
339
340        if (address < mm->address)
341                fatal("mm_memvalid: address too small: %p", address);
342        if (end < address)
343                fatal("mm_memvalid: end < address: %p < %p", end, address);
344        if (end > (void *)((u_char *)mm->address + mm->size))
345                fatal("mm_memvalid: address too large: %p", address);
346}
Note: See TracBrowser for help on using the repository browser.