1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)vm_mem.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "systm.h" 13: #include "cmap.h" 14: #include "dir.h" 15: #include "user.h" 16: #include "proc.h" 17: #include "text.h" 18: #include "vm.h" 19: #include "file.h" 20: #include "inode.h" 21: #include "buf.h" 22: #include "mount.h" 23: #include "trace.h" 24: #include "map.h" 25: #include "kernel.h" 26: 27: /* 28: * Allocate memory, and always succeed 29: * by jolting page-out daemon 30: * so as to obtain page frames. 31: * To be used in conjunction with vmemfree(). 32: */ 33: vmemall(pte, size, p, type) 34: register struct pte *pte; 35: int size; 36: struct proc *p; 37: { 38: register int m; 39: 40: if (size <= 0 || size > maxmem) 41: panic("vmemall size"); 42: while (size > 0) { 43: if (freemem < desfree) 44: outofmem(); 45: while (freemem == 0) 46: sleep((caddr_t)&freemem, PSWP+2); 47: m = imin(size, freemem); 48: (void) memall(pte, m, p, type); 49: size -= m; 50: pte += m; 51: } 52: if (freemem < desfree) 53: outofmem(); 54: /* 55: * Always succeeds, but return success for 56: * vgetu and vgetpt (e.g.) which call either 57: * memall or vmemall depending on context. 58: */ 59: return (1); 60: } 61: 62: /* 63: * Free valid and reclaimable page frames belonging to the 64: * count pages starting at pte. If a page is valid 65: * or reclaimable and locked (but not a system page), then 66: * we simply mark the page as c_gone and let the pageout 67: * daemon free the page when it is through with it. 68: * If a page is reclaimable, and already in the free list, then 69: * we mark the page as c_gone, and (of course) don't free it. 70: * 71: * Determines the largest contiguous cluster of 72: * valid pages and frees them in one call to memfree. 73: */ 74: vmemfree(pte, count) 75: register struct pte *pte; 76: register int count; 77: { 78: register struct cmap *c; 79: register struct pte *spte; 80: register int j; 81: int size, pcnt; 82: #ifdef notdef 83: int fileno; 84: #endif 85: 86: if (count % CLSIZE) 87: panic("vmemfree"); 88: for (size = 0, pcnt = 0; count > 0; pte += CLSIZE, count -= CLSIZE) { 89: if (pte->pg_fod == 0 && pte->pg_pfnum) { 90: c = &cmap[pgtocm(pte->pg_pfnum)]; 91: pcnt += CLSIZE; 92: if (c->c_lock && c->c_type != CSYS) { 93: for (j = 0; j < CLSIZE; j++) 94: *(int *)(pte+j) &= PG_PROT; 95: c->c_gone = 1; 96: goto free; 97: } 98: if (c->c_free) { 99: pcnt -= CLSIZE; 100: for (j = 0; j < CLSIZE; j++) 101: *(int *)(pte+j) &= PG_PROT; 102: if (c->c_type == CTEXT) 103: distpte(&text[c->c_ndx], c->c_page, 104: pte); 105: c->c_gone = 1; 106: goto free; 107: } 108: if (size == 0) 109: spte = pte; 110: size += CLSIZE; 111: continue; 112: } 113: #ifdef notdef 114: /* Don't do anything with mapped ptes */ 115: if (pte->pg_fod && pte->pg_v) 116: goto free; 117: #endif 118: if (pte->pg_fod) { 119: #ifdef notdef 120: fileno = ((struct fpte *)pte)->pg_fileno; 121: if (fileno < NOFILE) 122: panic("vmemfree vread"); 123: #endif 124: for (j = 0; j < CLSIZE; j++) 125: *(int *)(pte+j) &= PG_PROT; 126: } 127: free: 128: if (size) { 129: memfree(spte, size, 1); 130: size = 0; 131: } 132: } 133: if (size) 134: memfree(spte, size, 1); 135: return (pcnt); 136: } 137: 138: /* 139: * Unlink a page frame from the free list - 140: * 141: * Performed if the page being reclaimed 142: * is in the free list. 143: */ 144: munlink(c) 145: register struct cmap *c; 146: { 147: register int next, prev; 148: 149: next = c->c_next; 150: prev = c->c_prev; 151: cmap[prev].c_next = next; 152: cmap[next].c_prev = prev; 153: c->c_free = 0; 154: if (freemem < minfree) 155: outofmem(); 156: freemem -= CLSIZE; 157: } 158: 159: /* 160: * Allocate memory - 161: * 162: * The free list appears as a doubly linked list 163: * in the core map with cmap[0] serving as a header. 164: */ 165: memall(pte, size, p, type) 166: register struct pte *pte; 167: int size; 168: struct proc *p; 169: { 170: register struct cmap *c; 171: register struct pte *rpte; 172: register struct proc *rp; 173: int i, j, next, curpos; 174: unsigned pf; 175: struct cmap *c1, *c2; 176: int s; 177: 178: if (size % CLSIZE) 179: panic("memall"); 180: s = splimp(); 181: if (size > freemem) { 182: splx(s); 183: return (0); 184: } 185: trace(TR_MALL, size, u.u_procp->p_pid); 186: for (i = size; i > 0; i -= CLSIZE) { 187: curpos = cmap[CMHEAD].c_next; 188: c = &cmap[curpos]; 189: freemem -= CLSIZE; 190: next = c->c_next; 191: cmap[CMHEAD].c_next = next; 192: cmap[next].c_prev = CMHEAD; 193: if (c->c_free == 0) 194: panic("dup mem alloc"); 195: if (cmtopg(curpos) > maxfree) 196: panic("bad mem alloc"); 197: if (c->c_gone == 0 && c->c_type != CSYS) { 198: if (c->c_type == CTEXT) 199: rp = text[c->c_ndx].x_caddr; 200: else 201: rp = &proc[c->c_ndx]; 202: while (rp->p_flag & SNOVM) 203: rp = rp->p_xlink; 204: switch (c->c_type) { 205: 206: case CTEXT: 207: rpte = tptopte(rp, c->c_page); 208: break; 209: 210: case CDATA: 211: rpte = dptopte(rp, c->c_page); 212: break; 213: 214: case CSTACK: 215: rpte = sptopte(rp, c->c_page); 216: break; 217: } 218: zapcl(rpte, pg_pfnum) = 0; 219: if (c->c_type == CTEXT) 220: distpte(&text[c->c_ndx], c->c_page, rpte); 221: } 222: switch (type) { 223: 224: case CSYS: 225: c->c_ndx = p->p_ndx; 226: break; 227: 228: case CTEXT: 229: c->c_page = vtotp(p, ptetov(p, pte)); 230: c->c_ndx = p->p_textp - &text[0]; 231: break; 232: 233: case CDATA: 234: c->c_page = vtodp(p, ptetov(p, pte)); 235: c->c_ndx = p->p_ndx; 236: break; 237: 238: case CSTACK: 239: c->c_page = vtosp(p, ptetov(p, pte)); 240: c->c_ndx = p->p_ndx; 241: break; 242: } 243: if (c->c_blkno) { 244: /* 245: * This is very like munhash(), except 246: * that we really don't want to bother 247: * to calculate a dev to pass to it. 248: */ 249: j = CMHASH(c->c_blkno); 250: c1 = &cmap[cmhash[j]]; 251: if (c1 == c) 252: cmhash[j] = c1->c_hlink; 253: else { 254: for (;;) { 255: if (c1 == ecmap) 256: panic("memall ecmap"); 257: c2 = c1; 258: c1 = &cmap[c2->c_hlink]; 259: if (c1 == c) 260: break; 261: } 262: c2->c_hlink = c1->c_hlink; 263: } 264: if (mfind(c->c_mdev == MSWAPX ? 265: swapdev : mount[c->c_mdev].m_dev, 266: (daddr_t)(u_long)c->c_blkno)) 267: panic("memall mfind"); 268: c1->c_mdev = 0; 269: c1->c_blkno = 0; 270: c1->c_hlink = 0; 271: } 272: pf = cmtopg(curpos); 273: for (j = 0; j < CLSIZE; j++) 274: *(int *)pte++ = pf++; 275: c->c_free = 0; 276: c->c_gone = 0; 277: if (c->c_intrans || c->c_want) 278: panic("memall intrans|want"); 279: c->c_lock = 1; 280: c->c_type = type; 281: } 282: splx(s); 283: return (size); 284: } 285: 286: /* 287: * Free memory - 288: * 289: * The page frames being returned are inserted 290: * to the head/tail of the free list depending 291: * on whether there is any possible future use of them. 292: * 293: * If the freemem count had been zero, 294: * the processes sleeping for memory 295: * are awakened. 296: */ 297: memfree(pte, size, detach) 298: register struct pte *pte; 299: register int size; 300: { 301: register int i, j, prev, next; 302: register struct cmap *c; 303: int s; 304: 305: if (size % CLSIZE) 306: panic("memfree"); 307: if (freemem < CLSIZE * KLMAX) 308: wakeup((caddr_t)&freemem); 309: while (size > 0) { 310: size -= CLSIZE; 311: i = pte->pg_pfnum; 312: if (i < firstfree || i > maxfree) 313: panic("bad mem free"); 314: i = pgtocm(i); 315: c = &cmap[i]; 316: if (c->c_free) 317: panic("dup mem free"); 318: if (detach && c->c_type != CSYS) { 319: for (j = 0; j < CLSIZE; j++) 320: *(int *)(pte+j) &= PG_PROT; 321: c->c_gone = 1; 322: } 323: s = splimp(); 324: if (detach && c->c_blkno == 0) { 325: next = cmap[CMHEAD].c_next; 326: cmap[next].c_prev = i; 327: c->c_prev = CMHEAD; 328: c->c_next = next; 329: cmap[CMHEAD].c_next = i; 330: } else { 331: prev = cmap[CMHEAD].c_prev; 332: cmap[prev].c_next = i; 333: c->c_next = CMHEAD; 334: c->c_prev = prev; 335: cmap[CMHEAD].c_prev = i; 336: } 337: c->c_free = 1; 338: freemem += CLSIZE; 339: splx(s); 340: pte += CLSIZE; 341: } 342: } 343: 344: /* 345: * Allocate wired-down (non-paged) pages in kernel virtual memory. 346: */ 347: caddr_t 348: wmemall(pmemall, n) 349: int (*pmemall)(), n; 350: { 351: register int npg; 352: register caddr_t va; 353: register int a; 354: 355: npg = clrnd(btoc(n)); 356: a = rmalloc(kernelmap, (long)npg); 357: if (a == 0) 358: return (0); 359: if ((*pmemall)(&Usrptmap[a], npg, &proc[0], CSYS) == 0) { 360: rmfree(kernelmap, (long)npg, (long)a); 361: return (0); 362: } 363: va = (caddr_t) kmxtob(a); 364: vmaccess(&Usrptmap[a], va, npg); 365: return (va); 366: } 367: 368: /* 369: * Allocate wired-down (non-paged) pages in kernel virtual memory. 370: * (and clear them) 371: */ 372: caddr_t 373: zmemall(pmemall, n) 374: int (*pmemall)(), n; 375: { 376: register int npg; 377: register caddr_t va; 378: register int a; 379: 380: npg = clrnd(btoc(n)); 381: a = rmalloc(kernelmap, (long)npg); 382: if (a == 0) 383: return (0); 384: if ((*pmemall)(&Usrptmap[a], npg, &proc[0], CSYS) == 0) { 385: rmfree(kernelmap, (long)npg, (long)a); 386: return (0); 387: } 388: va = (caddr_t) kmxtob(a); 389: vmaccess(&Usrptmap[a], va, npg); 390: while (--npg >= 0) 391: clearseg((unsigned)(PG_PFNUM & *(int *)&Usrptmap[a++])); 392: return (va); 393: } 394: 395: wmemfree(va, n) 396: caddr_t va; 397: int n; 398: { 399: register int a, npg; 400: 401: a = btokmx((struct pte *) va); 402: npg = clrnd(btoc(n)); 403: (void) memfree(&Usrptmap[a], npg, 0); 404: rmfree(kernelmap, (long)npg, (long)a); 405: } 406: 407: /* 408: * Enter clist block c on the hash chains. 409: * It contains file system block bn from device dev. 410: * Dev must either be a mounted file system or the swap device 411: * so we panic if getfsx() cannot find it. 412: */ 413: mhash(c, dev, bn) 414: register struct cmap *c; 415: dev_t dev; 416: daddr_t bn; 417: { 418: register int i = CMHASH(bn); 419: 420: c->c_hlink = cmhash[i]; 421: cmhash[i] = c - cmap; 422: c->c_blkno = bn; 423: i = getfsx(dev); 424: if (i == -1) 425: panic("mhash"); 426: c->c_mdev = i; 427: } 428: 429: /* 430: * Pull the clist entry of <dev,bn> off the hash chains. 431: * We have checked before calling (using mfind) that the 432: * entry really needs to be unhashed, so panic if we can't 433: * find it (can't happen). 434: * Must be called at splimp. 435: */ 436: munhash(dev, bn) 437: dev_t dev; 438: daddr_t bn; 439: { 440: register int i = CMHASH(bn); 441: register struct cmap *c1, *c2; 442: 443: c1 = &cmap[cmhash[i]]; 444: if (c1 == ecmap) 445: panic("munhash"); 446: if (c1->c_blkno == bn && getfsx(dev) == c1->c_mdev) 447: cmhash[i] = c1->c_hlink; 448: else { 449: for (;;) { 450: c2 = c1; 451: c1 = &cmap[c2->c_hlink]; 452: if (c1 == ecmap) 453: panic("munhash"); 454: if (c1->c_blkno == bn && getfsx(dev) == c1->c_mdev) 455: break; 456: } 457: c2->c_hlink = c1->c_hlink; 458: } 459: if (mfind(dev, bn)) 460: panic("munhash mfind"); 461: c1->c_mdev = 0; 462: c1->c_blkno = 0; 463: c1->c_hlink = 0; 464: } 465: 466: /* 467: * Look for block bn of device dev in the free pool. 468: * Currently it should not be possible to find it unless it is 469: * c_free and c_gone, although this may later not be true. 470: * (This is because active texts are locked against file system 471: * writes by the system.) 472: */ 473: struct cmap * 474: mfind(dev, bn) 475: dev_t dev; 476: daddr_t bn; 477: { 478: register struct cmap *c1 = &cmap[cmhash[CMHASH(bn)]]; 479: int si = splimp(); 480: 481: while (c1 != ecmap) { 482: if (c1->c_blkno == bn && c1->c_mdev == getfsx(dev)) { 483: splx(si); 484: return (c1); 485: } 486: c1 = &cmap[c1->c_hlink]; 487: } 488: splx(si); 489: return ((struct cmap *)0); 490: } 491: 492: /* 493: * Purge blocks from device dev from incore cache 494: * before umount(). 495: */ 496: mpurge(mdev) 497: int mdev; 498: { 499: register struct cmap *c1, *c2; 500: register int i; 501: int si = splimp(); 502: 503: for (i = 0; i < CMHSIZ; i++) { 504: more: 505: c1 = &cmap[cmhash[i]]; 506: if (c1 == ecmap) 507: continue; 508: if (c1->c_mdev == mdev) 509: cmhash[i] = c1->c_hlink; 510: else { 511: for (;;) { 512: c2 = c1; 513: c1 = &cmap[c1->c_hlink]; 514: if (c1 == ecmap) 515: goto cont; 516: if (c1->c_mdev == mdev) 517: break; 518: } 519: c2->c_hlink = c1->c_hlink; 520: } 521: c1->c_mdev = 0; 522: c1->c_blkno = 0; 523: c1->c_hlink = 0; 524: goto more; 525: cont: 526: ; 527: } 528: splx(si); 529: } 530: 531: /* 532: * Initialize core map 533: */ 534: meminit(first, last) 535: int first, last; 536: { 537: register int i; 538: register struct cmap *c; 539: 540: firstfree = clrnd(first); 541: maxfree = clrnd(last - (CLSIZE - 1)); 542: freemem = maxfree - firstfree; 543: ecmx = ecmap - cmap; 544: if (ecmx < freemem / CLSIZE) 545: freemem = ecmx * CLSIZE; 546: for (i = 1; i <= freemem / CLSIZE; i++) { 547: cmap[i-1].c_next = i; 548: c = &cmap[i]; 549: c->c_prev = i-1; 550: c->c_free = 1; 551: c->c_gone = 1; 552: c->c_type = CSYS; 553: c->c_mdev = 0; 554: c->c_blkno = 0; 555: } 556: cmap[freemem / CLSIZE].c_next = CMHEAD; 557: for (i = 0; i < CMHSIZ; i++) 558: cmhash[i] = ecmx; 559: cmap[CMHEAD].c_prev = freemem / CLSIZE; 560: cmap[CMHEAD].c_type = CSYS; 561: avefree = freemem; 562: } 563: 564: #ifdef notdef 565: /* 566: * Wait for frame pf to become unlocked 567: * if it is currently locked. 568: */ 569: mwait(c) 570: struct cmap *c; 571: { 572: 573: mlock(c); 574: munlock(c); 575: } 576: 577: /* 578: * Lock a page frame. 579: */ 580: mlock(c) 581: register struct cmap *c; 582: { 583: 584: while (c->c_lock) { 585: c->c_want = 1; 586: sleep((caddr_t)c, PSWP+1); 587: } 588: c->c_lock = 1; 589: } 590: 591: /* 592: * Unlock a page frame. 593: */ 594: munlock(c) 595: register struct cmap *c; 596: { 597: 598: if (c->c_lock == 0) 599: panic("dup page unlock"); 600: if (c->c_want) { 601: wakeup((caddr_t)c); 602: c->c_want = 0; 603: } 604: c->c_lock = 0; 605: } 606: #endif notdef 607: 608: /* 609: * Lock a virtual segment. 610: * 611: * For each cluster of pages, if the cluster is not valid, 612: * touch it to fault it in, otherwise just lock page frame. 613: * Called from physio to ensure that the pages 614: * participating in raw i/o are valid and locked. 615: */ 616: vslock(base, count) 617: caddr_t base; 618: { 619: register unsigned v; 620: register int npf; 621: register struct pte *pte; 622: register struct cmap *c; 623: 624: v = btop(base); 625: pte = vtopte(u.u_procp, v); 626: npf = btoc(count + ((int)base & CLOFSET)); 627: while (npf > 0) { 628: if (pte->pg_v) { 629: c = &cmap[pgtocm(pte->pg_pfnum)]; 630: if (c->c_lock) { 631: MLOCK(c); 632: MUNLOCK(c); 633: continue; 634: } 635: MLOCK(c); 636: } else 637: pagein(ctob(v), 1); /* return it locked */ 638: pte += CLSIZE; 639: v += CLSIZE; 640: npf -= CLSIZE; 641: } 642: } 643: 644: /* 645: * Unlock a virtual segment. 646: */ 647: vsunlock(base, count, rw) 648: caddr_t base; 649: { 650: register struct pte *pte; 651: register struct cmap *c; 652: int npf; 653: 654: pte = vtopte(u.u_procp, btop(base)); 655: npf = btoc(count + ((int)base & CLOFSET)); 656: while (npf > 0) { 657: c = &cmap[pgtocm(pte->pg_pfnum)]; 658: MUNLOCK(c); 659: if (rw == B_READ) /* Reading from device writes memory */ 660: pte->pg_m = 1; 661: pte += CLSIZE; 662: npf -= CLSIZE; 663: } 664: }