1: /*
   2:  * Copyright (c) 1986 Regents of the University of California.
   3:  * All rights reserved.  The Berkeley software License Agreement
   4:  * specifies the terms and conditions for redistribution.
   5:  *
   6:  *	@(#)vm_text.c	1.2 (2.11BSD GTE) 11/26/94
   7:  */
   8: 
   9: #include "param.h"
  10: #include "../machine/seg.h"
  11: 
  12: #include "map.h"
  13: #include "user.h"
  14: #include "proc.h"
  15: #include "text.h"
  16: #include "inode.h"
  17: #include "buf.h"
  18: #include "uio.h"
  19: #include "systm.h"
  20: 
  21: #define X_LOCK(xp) { \
  22:     while ((xp)->x_flag & XLOCK) { \
  23:         (xp)->x_flag |= XWANT; \
  24:         sleep((caddr_t)(xp), PSWP); \
  25:     } \
  26:     (xp)->x_flag |= XLOCK; \
  27: }
  28: #define XUNLOCK(xp) { \
  29:     if ((xp)->x_flag & XWANT) \
  30:         wakeup((caddr_t)(xp)); \
  31:     (xp)->x_flag &= ~(XLOCK|XWANT); \
  32: }
  33: #define FREE_AT_HEAD(xp) { \
  34:     (xp)->x_forw = xhead; \
  35:     xhead = (xp); \
  36:     (xp)->x_back = &xhead; \
  37:     if (xtail == &xhead) \
  38:         xtail = &(xp)->x_forw; \
  39:     else \
  40:         (xp)->x_forw->x_back = &(xp)->x_forw; \
  41: }
  42: #define FREE_AT_TAIL(xp) { \
  43:     (xp)->x_back = xtail; \
  44:     *xtail = (xp); \
  45:     xtail = &(xp)->x_forw; \
  46:     /* x_forw is NULL */ \
  47: }
  48: #define ALLOC(xp) { \
  49:     *((xp)->x_back) = (xp)->x_forw; \
  50:     if ((xp)->x_forw) \
  51:         (xp)->x_forw->x_back = (xp)->x_back; \
  52:     else \
  53:         xtail = (xp)->x_back; \
  54:     (xp)->x_forw = NULL; \
  55:     (xp)->x_back = NULL; \
  56: }
  57: 
  58: /*
  59:  * We place free text table entries on a free list.
  60:  * All text images are treated as "sticky,"
  61:  * and are placed on the free list (as an LRU cache) when unused.
  62:  * They may be reclaimed from the free list until reused.
  63:  * Files marked sticky are locked into the table, and are never freed.
  64:  */
  65: struct  text *xhead, **xtail;       /* text table free list */
  66: #ifdef UCB_METER
  67: struct  xstats xstats;          /* cache statistics */
  68: #endif
  69: 
  70: /*
  71:  * initialize text table
  72:  */
  73: xinit()
  74: {
  75:     register struct text *xp;
  76: 
  77:     xtail = &xhead;
  78:     for (xp = text; xp < textNTEXT; xp++)
  79:         FREE_AT_TAIL(xp);
  80: }
  81: 
  82: /*
  83:  * Decrement loaded reference count of text object.  If not sticky and
  84:  * count of zero, attach to LRU cache, at the head if traced or the
  85:  * inode has a hard link count of zero, otherwise at the tail.
  86:  */
  87: xfree()
  88: {
  89:     register struct text *xp;
  90: 
  91:     if ((xp = u.u_procp->p_textp) == NULL)
  92:         return;
  93: #ifdef UCB_METER
  94:     xstats.free++;
  95: #endif
  96:     X_LOCK(xp);
  97:     /*
  98: 	 * Don't add the following test to the "if" below:
  99: 	 *
 100: 	 *	(xp->x_iptr->i_mode & ISVTX) == 0
 101: 	 *
 102: 	 * all text under 2.10 is sticky in an LRU cache.  Putting the
 103: 	 * above test in makes sticky text objects ``gluey'' and nearly
 104: 	 * impossible to flush from memory.
 105: 	 */
 106:     if (--xp->x_count == 0) {
 107:         if (xp->x_flag & XTRC || xp->x_iptr->i_nlink == 0) {
 108:             xp->x_flag &= ~XLOCK;
 109:             xuntext(xp);
 110:             FREE_AT_HEAD(xp);
 111:         } else {
 112: #ifdef UCB_METER
 113:             if (xp->x_flag & XWRIT) {
 114:                 xstats.free_cacheswap++;
 115:                 xp->x_flag |= XUNUSED;
 116:             }
 117:             xstats.free_cache++;
 118: #endif
 119:             --xp->x_ccount;
 120:             FREE_AT_TAIL(xp);
 121:         }
 122:     } else {
 123:         --xp->x_ccount;
 124: #ifdef UCB_METER
 125:         xstats.free_inuse++;
 126: #endif
 127:     }
 128:     XUNLOCK(xp);
 129:     u.u_procp->p_textp = NULL;
 130: }
 131: 
 132: /*
 133:  * Attach to a shared text segment.  If there is no shared text, just
 134:  * return.  If there is, hook up to it.  If it is not available from
 135:  * core or swap, it has to be read in from the inode (ip); the written
 136:  * bit is set to force it to be written out as appropriate.  If it is
 137:  * not available from core, a swap has to be done to get it back.
 138:  */
 139: xalloc(ip, ep)
 140:     struct exec *ep;
 141:     register struct inode *ip;
 142: {
 143:     register struct text *xp;
 144:     register u_int  count;
 145:     off_t   offset;
 146:     size_t ts;
 147: 
 148:     if (ep->a_text == 0)
 149:         return;
 150: #ifdef UCB_METER
 151:     xstats.alloc++;
 152: #endif
 153:     while ((xp = ip->i_text) != NULL) {
 154:         if (xp->x_flag&XLOCK) {
 155:             /*
 156: 			 * Wait for text to be unlocked,
 157: 			 * then start over (may have changed state).
 158: 			 */
 159:             xwait(xp);
 160:             continue;
 161:         }
 162:         X_LOCK(xp);
 163:         if (xp->x_back) {
 164:             ALLOC(xp);
 165: #ifdef UCB_METER
 166:             xstats.alloc_cachehit++;
 167:             xp->x_flag &= ~XUNUSED;
 168: #endif
 169:         }
 170: #ifdef UCB_METER
 171:         else
 172:             xstats.alloc_inuse++;
 173: #endif
 174:         xp->x_count++;
 175:         u.u_procp->p_textp = xp;
 176:         if (!xp->x_caddr && !xp->x_ccount)
 177:             xexpand(xp);
 178:         else
 179:             ++xp->x_ccount;
 180:         XUNLOCK(xp);
 181:         return;
 182:     }
 183:     xp = xhead;
 184:     if (xp == NULL) {
 185:         tablefull("text");
 186:         psignal(u.u_procp, SIGKILL);
 187:         return;
 188:     }
 189:     ALLOC(xp);
 190:     if (xp->x_iptr) {
 191: #ifdef UCB_METER
 192:         xstats.alloc_cacheflush++;
 193:         if (xp->x_flag & XUNUSED)
 194:             xstats.alloc_unused++;
 195: #endif
 196:         xuntext(xp);
 197:     }
 198:     xp->x_flag = XLOAD|XLOCK;
 199:     ts = btoc(ep->a_text);
 200:     if (u.u_ovdata.uo_ovbase)
 201:         xp->x_size = u.u_ovdata.uo_ov_offst[NOVL];
 202:     else
 203:         xp->x_size = ts;
 204:     if ((xp->x_daddr = malloc(swapmap, (size_t)ctod(xp->x_size))) == NULL) {
 205:         swkill(u.u_procp, "xalloc");
 206:         return;
 207:     }
 208:     xp->x_count = 1;
 209:     xp->x_ccount = 0;
 210:     xp->x_iptr = ip;
 211:     ip->i_flag |= ITEXT;
 212:     ip->i_text = xp;
 213:     ip->i_count++;
 214:     u.u_procp->p_textp = xp;
 215:     xexpand(xp);
 216:     estabur(ts, (u_int)0, (u_int)0, 0, RW);
 217:     offset = sizeof(struct exec);
 218:     if (u.u_ovdata.uo_ovbase)
 219:         offset += (NOVL + 1) * sizeof(u_int);
 220:     u.u_procp->p_flag |= SLOCK;
 221:     u.u_error = rdwri(UIO_READ, ip, (caddr_t)0, ep->a_text & ~1,
 222:             offset, UIO_USERISPACE, IO_UNIT, (int *)0);
 223: 
 224:     if (u.u_ovdata.uo_ovbase) { /* read in overlays if necessary */
 225:         register int i;
 226: 
 227:         offset += (off_t)(ep->a_text & ~1);
 228:         for (i = 1; i <= NOVL; i++) {
 229:             u.u_ovdata.uo_curov = i;
 230:             count = ctob(u.u_ovdata.uo_ov_offst[i] - u.u_ovdata.uo_ov_offst[i-1]);
 231:             if (count) {
 232:                 choverlay(RW);
 233:                 u.u_error = rdwri(UIO_READ, ip,
 234:                     (caddr_t)(ctob(stoc(u.u_ovdata.uo_ovbase))),
 235:                     count, offset, UIO_USERISPACE,
 236:                     IO_UNIT, (int *)0);
 237:                 offset += (off_t) count;
 238:             }
 239:         }
 240:     }
 241:     u.u_ovdata.uo_curov = 0;
 242:     u.u_procp->p_flag &= ~SLOCK;
 243:     xp->x_flag |= XWRIT;
 244:     xp->x_flag &= ~XLOAD;
 245: }
 246: 
 247: /*
 248:  * Assure core for text segment.  If there isn't enough room to get process
 249:  * in core, swap self out.  x_ccount must be 0.  Text must be locked to keep
 250:  * someone else from freeing it in the meantime.   Don't change the locking,
 251:  * it's correct.
 252:  */
 253: xexpand(xp)
 254:     register struct text *xp;
 255: {
 256:     if ((xp->x_caddr = malloc(coremap, xp->x_size)) != NULL) {
 257:         if ((xp->x_flag & XLOAD) == 0)
 258:             swap(xp->x_daddr, xp->x_caddr, xp->x_size, B_READ);
 259:         xp->x_ccount++;
 260:         XUNLOCK(xp);
 261:         return;
 262:     }
 263:     if (setjmp(&u.u_ssave)) {
 264:         sureg();
 265:         return;
 266:     }
 267:     swapout(u.u_procp, X_FREECORE, X_OLDSIZE, X_OLDSIZE);
 268:     XUNLOCK(xp);
 269:     u.u_procp->p_flag |= SSWAP;
 270:     swtch();
 271:     /* NOTREACHED */
 272: }
 273: 
 274: /*
 275:  * Lock and unlock a text segment from swapping
 276:  */
 277: xlock(xp)
 278:     register struct text *xp;
 279: {
 280: 
 281:     X_LOCK(xp);
 282: }
 283: 
 284: /*
 285:  * Wait for xp to be unlocked if it is currently locked.
 286:  */
 287: xwait(xp)
 288: register struct text *xp;
 289: {
 290: 
 291:     X_LOCK(xp);
 292:     XUNLOCK(xp);
 293: }
 294: 
 295: xunlock(xp)
 296: register struct text *xp;
 297: {
 298: 
 299:     XUNLOCK(xp);
 300: }
 301: 
 302: /*
 303:  * Decrement the in-core usage count of a shared text segment.
 304:  * When it drops to zero, free the core space.  Write the swap
 305:  * copy of the text if as yet unwritten.
 306:  */
 307: xccdec(xp)
 308:     register struct text *xp;
 309: {
 310:     if (!xp->x_ccount)
 311:         return;
 312:     X_LOCK(xp);
 313:     if (--xp->x_ccount == 0) {
 314:         if (xp->x_flag & XWRIT) {
 315:             swap(xp->x_daddr, xp->x_caddr, xp->x_size, B_WRITE);
 316:             xp->x_flag &= ~XWRIT;
 317:         }
 318:         mfree(coremap, xp->x_size, xp->x_caddr);
 319:         xp->x_caddr = NULL;
 320:     }
 321:     XUNLOCK(xp);
 322: }
 323: 
 324: /*
 325:  * Free the swap image of all unused saved-text text segments which are from
 326:  * device dev (used by umount system call).  If dev is NODEV, do all devices
 327:  * (used when rebooting or malloc of swapmap failed).
 328:  */
 329: xumount(dev)
 330:     register dev_t dev;
 331: {
 332:     register struct text *xp;
 333: 
 334:     for (xp = text; xp < textNTEXT; xp++)
 335:         if (xp->x_iptr != NULL &&
 336:             (dev == xp->x_iptr->i_dev || dev == NODEV))
 337:             xuntext(xp);
 338: }
 339: 
 340: /*
 341:  * Remove text image from the text table.
 342:  * the use count must be zero.
 343:  */
 344: xuntext(xp)
 345:     register struct text *xp;
 346: {
 347:     register struct inode *ip;
 348: 
 349:     X_LOCK(xp);
 350:     if (xp->x_count == 0) {
 351:         ip = xp->x_iptr;
 352:         xp->x_iptr = NULL;
 353:         mfree(swapmap, ctod(xp->x_size), xp->x_daddr);
 354:         if (xp->x_caddr)
 355:             mfree(coremap, xp->x_size, xp->x_caddr);
 356:         ip->i_flag &= ~ITEXT;
 357:         ip->i_text = NULL;
 358:         irele(ip);
 359:     }
 360:     XUNLOCK(xp);
 361: }
 362: 
 363: /*
 364:  * Free up "size" core; if swap copy of text has not yet been written,
 365:  * do so.
 366:  */
 367: xuncore(size)
 368:     register size_t size;
 369: {
 370:     register struct text *xp;
 371: 
 372:     for (xp = xhead; xp; xp = xp->x_forw) {
 373:         if (!xp->x_iptr)
 374:             continue;
 375:         X_LOCK(xp);
 376:         if (!xp->x_ccount && xp->x_caddr) {
 377:             if (xp->x_flag & XWRIT) {
 378:                 swap(xp->x_daddr, xp->x_caddr, xp->x_size, B_WRITE);
 379:                 xp->x_flag &= ~XWRIT;
 380:             }
 381:             mfree(coremap, xp->x_size, xp->x_caddr);
 382:             xp->x_caddr = NULL;
 383:             if (xp->x_size >= size) {
 384:                 XUNLOCK(xp);
 385:                 return;
 386:             }
 387:         }
 388:         XUNLOCK(xp);
 389:     }
 390: }

Defined functions

xalloc defined in line 139; used 2 times
xccdec defined in line 307; used 1 times
xexpand defined in line 253; used 2 times
xfree defined in line 87; used 3 times
xinit defined in line 73; used 1 times
xlock defined in line 277; used 1 times
xuncore defined in line 367; used 4 times
xunlock defined in line 295; used 3 times
xuntext defined in line 344; used 6 times
xwait defined in line 287; used 1 times

Defined variables

xhead defined in line 65; used 7 times
xstats defined in line 67; used 9 times
xtail defined in line 65; used 7 times

Defined macros

ALLOC defined in line 48; used 2 times
FREE_AT_HEAD defined in line 33; used 1 times
FREE_AT_TAIL defined in line 42; used 2 times
XUNLOCK defined in line 28; used 10 times
X_LOCK defined in line 21; used 7 times
Last modified: 1994-11-27
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 4186
Valid CSS Valid XHTML 1.0 Strict