1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)vm_drum.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "systm.h" 13: #include "dir.h" 14: #include "user.h" 15: #include "proc.h" 16: #include "buf.h" 17: #include "text.h" 18: #include "map.h" 19: #include "vm.h" 20: #include "cmap.h" 21: #include "kernel.h" 22: 23: /* 24: * Expand the swap area for both the data and stack segments. 25: * If space is not available for both, retract and return 0. 26: */ 27: swpexpand(ds, ss, dmp, smp) 28: size_t ds, ss; 29: register struct dmap *dmp, *smp; 30: { 31: register struct dmap *tmp; 32: register int ts; 33: size_t ods; 34: 35: /* 36: * If dmap isn't growing, do smap first. 37: * This avoids anomalies if smap will try to grow and 38: * fail, which otherwise would shrink ds without expanding 39: * ss, a rather curious side effect! 40: */ 41: if (dmp->dm_alloc > ds) { 42: tmp = dmp; ts = ds; 43: dmp = smp; ds = ss; 44: smp = tmp; ss = ts; 45: } 46: ods = dmp->dm_size; 47: if (vsexpand(ds, dmp, 0) == 0) 48: goto bad; 49: if (vsexpand(ss, smp, 0) == 0) { 50: (void) vsexpand(ods, dmp, 1); 51: goto bad; 52: } 53: return (1); 54: 55: bad: 56: u.u_error = ENOMEM; 57: return (0); 58: } 59: 60: /* 61: * Expand or contract the virtual swap segment mapped 62: * by the argument diskmap so as to just allow the given size. 63: * 64: * FOR NOW CANT RELEASE UNLESS SHRINKING TO ZERO, SINCE PAGEOUTS MAY 65: * BE IN PROGRESS... TYPICALLY NEVER SHRINK ANYWAYS, SO DOESNT MATTER MUCH 66: */ 67: vsexpand(vssize, dmp, canshrink) 68: register size_t vssize; 69: register struct dmap *dmp; 70: { 71: register long blk = dmmin; 72: register int vsbase = 0; 73: register swblk_t *ip = dmp->dm_map; 74: size_t oldsize = dmp->dm_size; 75: size_t oldalloc = dmp->dm_alloc; 76: 77: vssize = ctod(vssize); 78: while (vsbase < oldalloc || vsbase < vssize) { 79: if (ip - dmp->dm_map >= NDMAP) 80: panic("vmdrum NDMAP"); 81: if (vsbase >= oldalloc) { 82: *ip = rmalloc(swapmap, blk); 83: if (*ip == 0) { 84: dmp->dm_size = vsbase; 85: if (vsexpand(dtoc(oldsize), dmp, 1) == 0) 86: panic("vsexpand"); 87: return (0); 88: } 89: dmp->dm_alloc += blk; 90: } else if (vssize == 0 || 91: vsbase >= vssize && canshrink) { 92: rmfree(swapmap, blk, *ip); 93: *ip = 0; 94: dmp->dm_alloc -= blk; 95: } 96: vsbase += blk; 97: if (blk < dmmax) 98: blk *= 2; 99: ip++; 100: } 101: dmp->dm_size = vssize; 102: return (1); 103: } 104: 105: /* 106: * Allocate swap space for a text segment, 107: * in chunks of at most dmtext pages. 108: */ 109: vsxalloc(xp) 110: struct text *xp; 111: { 112: register long blk; 113: register swblk_t *dp; 114: swblk_t vsbase; 115: 116: if (ctod(xp->x_size) > NXDAD * dmtext) 117: return (0); 118: dp = xp->x_daddr; 119: for (vsbase = 0; vsbase < ctod(xp->x_size); vsbase += dmtext) { 120: blk = ctod(xp->x_size) - vsbase; 121: if (blk > dmtext) 122: blk = dmtext; 123: if ((*dp++ = rmalloc(swapmap, blk)) == 0) { 124: vsxfree(xp, vsbase); 125: return (0); 126: } 127: } 128: if (xp->x_flag & XPAGI) { 129: xp->x_ptdaddr = 130: rmalloc(swapmap, (long)ctod(clrnd(ctopt(xp->x_size)))); 131: if (xp->x_ptdaddr == 0) { 132: vsxfree(xp, (long)xp->x_size); 133: return (0); 134: } 135: } 136: return (1); 137: } 138: 139: /* 140: * Free the swap space of a text segment which 141: * has been allocated ts pages. 142: */ 143: vsxfree(xp, ts) 144: struct text *xp; 145: long ts; 146: { 147: register long blk; 148: register swblk_t *dp; 149: swblk_t vsbase; 150: 151: ts = ctod(ts); 152: dp = xp->x_daddr; 153: for (vsbase = 0; vsbase < ts; vsbase += dmtext) { 154: blk = ts - vsbase; 155: if (blk > dmtext) 156: blk = dmtext; 157: rmfree(swapmap, blk, *dp); 158: *dp++ = 0; 159: } 160: if ((xp->x_flag&XPAGI) && xp->x_ptdaddr) { 161: rmfree(swapmap, (long)ctod(clrnd(ctopt(xp->x_size))), 162: xp->x_ptdaddr); 163: xp->x_ptdaddr = 0; 164: } 165: } 166: 167: /* 168: * Swap a segment of virtual memory to disk, 169: * by locating the contiguous dirty pte's 170: * and calling vschunk with each chunk. 171: * We ignore swap errors here because swap() 172: * will panic on an error when writing to disk. 173: */ 174: vsswap(p, pte, type, vsbase, vscount, dmp) 175: struct proc *p; 176: register struct pte *pte; 177: int type; 178: register int vsbase, vscount; 179: struct dmap *dmp; 180: { 181: register int size = 0; 182: register struct cmap *c; 183: 184: if (vscount % CLSIZE) 185: panic("vsswap"); 186: for (;;) { 187: if (vscount == 0 || !dirtycl(pte)) { 188: if (size) { 189: vschunk(p, vsbase, size, type, dmp); 190: vsbase += size; 191: size = 0; 192: } 193: if (vscount == 0) 194: return; 195: vsbase += CLSIZE; 196: if (pte->pg_fod == 0 && pte->pg_pfnum) 197: if (type == CTEXT) 198: p->p_textp->x_rssize -= vmemfree(pte, CLSIZE); 199: else 200: p->p_rssize -= vmemfree(pte, CLSIZE); 201: } else { 202: size += CLSIZE; 203: c = &cmap[pgtocm(pte->pg_pfnum)]; 204: MLOCK(c); 205: MUNLOCK(c); 206: } 207: vscount -= CLSIZE; 208: if (type == CSTACK) 209: pte -= CLSIZE; 210: else 211: pte += CLSIZE; 212: } 213: } 214: 215: vschunk(p, base, size, type, dmp) 216: register struct proc *p; 217: register int base, size; 218: int type; 219: struct dmap *dmp; 220: { 221: register struct pte *pte; 222: struct dblock db; 223: unsigned v; 224: 225: base = ctod(base); 226: size = ctod(size); 227: if (type == CTEXT) { 228: while (size > 0) { 229: db.db_size = dmtext - base % dmtext; 230: if (db.db_size > size) 231: db.db_size = size; 232: (void)swap(p, 233: p->p_textp->x_daddr[base/dmtext] + base%dmtext, 234: ptob(tptov(p, dtoc(base))), (int)dtob(db.db_size), 235: B_WRITE, 0, swapdev, 0); 236: pte = tptopte(p, dtoc(base)); 237: p->p_textp->x_rssize -= 238: vmemfree(pte, (int)dtoc(db.db_size)); 239: base += db.db_size; 240: size -= db.db_size; 241: } 242: return; 243: } 244: do { 245: vstodb(base, size, dmp, &db, type == CSTACK); 246: v = type==CSTACK ? 247: sptov(p, dtoc(base+db.db_size)-1) : 248: dptov(p, dtoc(base)); 249: (void)swap(p, db.db_base, ptob(v), (int)dtob(db.db_size), 250: B_WRITE, 0, swapdev, 0); 251: pte = type==CSTACK ? 252: sptopte(p, dtoc(base+db.db_size)-1) : 253: dptopte(p, dtoc(base)); 254: p->p_rssize -= vmemfree(pte, (int)dtoc(db.db_size)); 255: base += db.db_size; 256: size -= db.db_size; 257: } while (size != 0); 258: } 259: 260: /* 261: * Given a base/size pair in virtual swap area, 262: * return a physical base/size pair which is the 263: * (largest) initial, physically contiguous block. 264: */ 265: vstodb(vsbase, vssize, dmp, dbp, rev) 266: register int vsbase, vssize; 267: struct dmap *dmp; 268: register struct dblock *dbp; 269: { 270: register int blk = dmmin; 271: register swblk_t *ip = dmp->dm_map; 272: 273: if (vsbase < 0 || vssize < 0 || vsbase + vssize > dmp->dm_size) 274: panic("vstodb"); 275: while (vsbase >= blk) { 276: vsbase -= blk; 277: if (blk < dmmax) 278: blk *= 2; 279: ip++; 280: } 281: if (*ip + blk > nswap) 282: panic("vstodb *ip"); 283: dbp->db_size = imin(vssize, blk - vsbase); 284: dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase); 285: } 286: 287: /* 288: * Convert a virtual page number 289: * to its corresponding disk block number. 290: * Used in pagein/pageout to initiate single page transfers. 291: */ 292: swblk_t 293: vtod(p, v, dmap, smap) 294: register struct proc *p; 295: unsigned v; 296: struct dmap *dmap, *smap; 297: { 298: struct dblock db; 299: int tp; 300: 301: if (isatsv(p, v)) { 302: tp = ctod(vtotp(p, v)); 303: return (p->p_textp->x_daddr[tp/dmtext] + tp%dmtext); 304: } 305: if (isassv(p, v)) 306: vstodb(ctod(vtosp(p, v)), ctod(1), smap, &db, 1); 307: else 308: vstodb(ctod(vtodp(p, v)), ctod(1), dmap, &db, 0); 309: return (db.db_base); 310: }