1: #include "../h/param.h"
   2: #include "../h/systm.h"
   3: #include "../h/dir.h"
   4: #include "../h/user.h"
   5: #include "../h/buf.h"
   6: #include "../h/conf.h"
   7: #include "../h/proc.h"
   8: #include "../h/seg.h"
   9: 
  10: #define DISKMON 1
  11: 
  12: #ifdef  DISKMON
  13: struct {
  14:     int nbuf;
  15:     long    nread;
  16:     long    nreada;
  17:     long    ncache;
  18:     long    nwrite;
  19:     long    bufcount[NBUF];
  20: } io_info;
  21: #endif
  22: 
  23: /*
  24:  * swap IO headers.
  25:  * they are filled in to point
  26:  * at the desired IO operation.
  27:  */
  28: struct  buf swbuf1;
  29: struct  buf swbuf2;
  30: 
  31: /*
  32:  * The following several routines allocate and free
  33:  * buffers with various side effects.  In general the
  34:  * arguments to an allocate routine are a device and
  35:  * a block number, and the value is a pointer to
  36:  * to the buffer header; the buffer is marked "busy"
  37:  * so that no one else can touch it.  If the block was
  38:  * already in core, no I/O need be done; if it is
  39:  * already busy, the process waits until it becomes free.
  40:  * The following routines allocate a buffer:
  41:  *	getblk
  42:  *	bread
  43:  *	breada
  44:  * Eventually the buffer must be released, possibly with the
  45:  * side effect of writing it out, by using one of
  46:  *	bwrite
  47:  *	bdwrite
  48:  *	bawrite
  49:  *	brelse
  50:  */
  51: 
  52: /*
  53:  * Read in (if necessary) the block and return a buffer pointer.
  54:  */
  55: struct buf *
  56: bread(dev, blkno)
  57: dev_t dev;
  58: daddr_t blkno;
  59: {
  60:     register struct buf *bp;
  61: 
  62:     bp = getblk(dev, blkno);
  63:     if (bp->b_flags&B_DONE) {
  64: #ifdef  DISKMON
  65:         io_info.ncache++;
  66: #endif
  67:         return(bp);
  68:     }
  69:     bp->b_flags |= B_READ;
  70:     bp->b_bcount = BSIZE;
  71:     (*bdevsw[major(dev)].d_strategy)(bp);
  72: #ifdef  DISKMON
  73:     io_info.nread++;
  74: #endif
  75:     iowait(bp);
  76:     return(bp);
  77: }
  78: 
  79: /*
  80:  * Read in the block, like bread, but also start I/O on the
  81:  * read-ahead block (which is not allocated to the caller)
  82:  */
  83: struct buf *
  84: breada(dev, blkno, rablkno)
  85: dev_t dev;
  86: daddr_t blkno, rablkno;
  87: {
  88:     register struct buf *bp, *rabp;
  89: 
  90:     bp = NULL;
  91:     if (!incore(dev, blkno)) {
  92:         bp = getblk(dev, blkno);
  93:         if ((bp->b_flags&B_DONE) == 0) {
  94:             bp->b_flags |= B_READ;
  95:             bp->b_bcount = BSIZE;
  96:             (*bdevsw[major(dev)].d_strategy)(bp);
  97: #ifdef  DISKMON
  98:             io_info.nread++;
  99: #endif
 100:         }
 101:     }
 102:     if (rablkno && !incore(dev, rablkno)) {
 103:         rabp = getblk(dev, rablkno);
 104:         if (rabp->b_flags & B_DONE)
 105:             brelse(rabp);
 106:         else {
 107:             rabp->b_flags |= B_READ|B_ASYNC;
 108:             rabp->b_bcount = BSIZE;
 109:             (*bdevsw[major(dev)].d_strategy)(rabp);
 110: #ifdef  DISKMON
 111:             io_info.nreada++;
 112: #endif
 113:         }
 114:     }
 115:     if(bp == NULL)
 116:         return(bread(dev, blkno));
 117:     iowait(bp);
 118:     return(bp);
 119: }
 120: 
 121: /*
 122:  * Write the buffer, waiting for completion.
 123:  * Then release the buffer.
 124:  */
 125: bwrite(bp)
 126: register struct buf *bp;
 127: {
 128:     register flag;
 129: 
 130:     flag = bp->b_flags;
 131:     bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE);
 132:     bp->b_bcount = BSIZE;
 133: #ifdef  DISKMON
 134:     io_info.nwrite++;
 135: #endif
 136:     (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
 137:     if ((flag&B_ASYNC) == 0) {
 138:         iowait(bp);
 139:         brelse(bp);
 140:     } else if (flag & B_DELWRI)
 141:         bp->b_flags |= B_AGE;
 142:     else
 143:         geterror(bp);
 144: }
 145: 
 146: /*
 147:  * Release the buffer, marking it so that if it is grabbed
 148:  * for another purpose it will be written out before being
 149:  * given up (e.g. when writing a partial block where it is
 150:  * assumed that another write for the same block will soon follow).
 151:  * This can't be done for magtape, since writes must be done
 152:  * in the same order as requested.
 153:  */
 154: bdwrite(bp)
 155: register struct buf *bp;
 156: {
 157:     register struct buf *dp;
 158: 
 159:     dp = bdevsw[major(bp->b_dev)].d_tab;
 160:     if(dp->b_flags & B_TAPE)
 161:         bawrite(bp);
 162:     else {
 163:         bp->b_flags |= B_DELWRI | B_DONE;
 164:         brelse(bp);
 165:     }
 166: }
 167: 
 168: /*
 169:  * Release the buffer, start I/O on it, but don't wait for completion.
 170:  */
 171: bawrite(bp)
 172: register struct buf *bp;
 173: {
 174: 
 175:     bp->b_flags |= B_ASYNC;
 176:     bwrite(bp);
 177: }
 178: 
 179: /*
 180:  * release the buffer, with no I/O implied.
 181:  */
 182: brelse(bp)
 183: register struct buf *bp;
 184: {
 185:     register struct buf **backp;
 186:     register s;
 187: 
 188:     if (bp->b_flags&B_WANTED)
 189:         wakeup((caddr_t)bp);
 190:     if (bfreelist.b_flags&B_WANTED) {
 191:         bfreelist.b_flags &= ~B_WANTED;
 192:         wakeup((caddr_t)&bfreelist);
 193:     }
 194:     if (bp->b_flags&B_ERROR)
 195:         bp->b_dev = NODEV;  /* no assoc. on error */
 196:     s = spl6();
 197:     if(bp->b_flags & B_AGE) {
 198:         backp = &bfreelist.av_forw;
 199:         (*backp)->av_back = bp;
 200:         bp->av_forw = *backp;
 201:         *backp = bp;
 202:         bp->av_back = &bfreelist;
 203:     } else {
 204:         backp = &bfreelist.av_back;
 205:         (*backp)->av_forw = bp;
 206:         bp->av_back = *backp;
 207:         *backp = bp;
 208:         bp->av_forw = &bfreelist;
 209:     }
 210:     bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE);
 211:     splx(s);
 212: }
 213: 
 214: /*
 215:  * See if the block is associated with some buffer
 216:  * (mainly to avoid getting hung up on a wait in breada)
 217:  */
 218: incore(dev, blkno)
 219: dev_t dev;
 220: daddr_t blkno;
 221: {
 222:     register struct buf *bp;
 223:     register struct buf *dp;
 224: 
 225:     dp = bdevsw[major(dev)].d_tab;
 226:     for (bp=dp->b_forw; bp != dp; bp = bp->b_forw)
 227:         if (bp->b_blkno==blkno && bp->b_dev==dev)
 228:             return(1);
 229:     return(0);
 230: }
 231: 
 232: /*
 233:  * Assign a buffer for the given block.  If the appropriate
 234:  * block is already associated, return it; otherwise search
 235:  * for the oldest non-busy buffer and reassign it.
 236:  */
 237: struct buf *
 238: getblk(dev, blkno)
 239: dev_t dev;
 240: daddr_t blkno;
 241: {
 242:     register struct buf *bp;
 243:     register struct buf *dp;
 244: #ifdef  DISKMON
 245:     register i;
 246: #endif
 247: 
 248:     if(major(dev) >= nblkdev)
 249:         panic("blkdev");
 250: 
 251:     loop:
 252:     spl0();
 253:     dp = bdevsw[major(dev)].d_tab;
 254:     if(dp == NULL)
 255:         panic("devtab");
 256:     for (bp=dp->b_forw; bp != dp; bp = bp->b_forw) {
 257:         if (bp->b_blkno!=blkno || bp->b_dev!=dev)
 258:             continue;
 259:         spl6();
 260:         if (bp->b_flags&B_BUSY) {
 261:             bp->b_flags |= B_WANTED;
 262:             sleep((caddr_t)bp, PRIBIO+1);
 263:             goto loop;
 264:         }
 265:         spl0();
 266: #ifdef  DISKMON
 267:         i = 0;
 268:         dp = bp->av_forw;
 269:         while (dp != &bfreelist) {
 270:             i++;
 271:             dp = dp->av_forw;
 272:         }
 273:         if (i<NBUF)
 274:             io_info.bufcount[i]++;
 275: #endif
 276:         notavail(bp);
 277:         return(bp);
 278:     }
 279:     spl6();
 280:     if (bfreelist.av_forw == &bfreelist) {
 281:         bfreelist.b_flags |= B_WANTED;
 282:         sleep((caddr_t)&bfreelist, PRIBIO+1);
 283:         goto loop;
 284:     }
 285:     spl0();
 286:     notavail(bp = bfreelist.av_forw);
 287:     if (bp->b_flags & B_DELWRI) {
 288:         bp->b_flags |= B_ASYNC;
 289:         bwrite(bp);
 290:         goto loop;
 291:     }
 292:     bp->b_flags = B_BUSY;
 293:     bp->b_back->b_forw = bp->b_forw;
 294:     bp->b_forw->b_back = bp->b_back;
 295:     bp->b_forw = dp->b_forw;
 296:     bp->b_back = dp;
 297:     dp->b_forw->b_back = bp;
 298:     dp->b_forw = bp;
 299:     bp->b_dev = dev;
 300:     bp->b_blkno = blkno;
 301:     return(bp);
 302: }
 303: 
 304: /*
 305:  * get an empty block,
 306:  * not assigned to any particular device
 307:  */
 308: struct buf *
 309: geteblk()
 310: {
 311:     register struct buf *bp;
 312:     register struct buf *dp;
 313: 
 314: loop:
 315:     spl6();
 316:     while (bfreelist.av_forw == &bfreelist) {
 317:         bfreelist.b_flags |= B_WANTED;
 318:         sleep((caddr_t)&bfreelist, PRIBIO+1);
 319:     }
 320:     spl0();
 321:     dp = &bfreelist;
 322:     notavail(bp = bfreelist.av_forw);
 323:     if (bp->b_flags & B_DELWRI) {
 324:         bp->b_flags |= B_ASYNC;
 325:         bwrite(bp);
 326:         goto loop;
 327:     }
 328:     bp->b_flags = B_BUSY;
 329:     bp->b_back->b_forw = bp->b_forw;
 330:     bp->b_forw->b_back = bp->b_back;
 331:     bp->b_forw = dp->b_forw;
 332:     bp->b_back = dp;
 333:     dp->b_forw->b_back = bp;
 334:     dp->b_forw = bp;
 335:     bp->b_dev = (dev_t)NODEV;
 336:     return(bp);
 337: }
 338: 
 339: /*
 340:  * Wait for I/O completion on the buffer; return errors
 341:  * to the user.
 342:  */
 343: iowait(bp)
 344: register struct buf *bp;
 345: {
 346: 
 347:     spl6();
 348:     while ((bp->b_flags&B_DONE)==0)
 349:         sleep((caddr_t)bp, PRIBIO);
 350:     spl0();
 351:     geterror(bp);
 352: }
 353: 
 354: /*
 355:  * Unlink a buffer from the available list and mark it busy.
 356:  * (internal interface)
 357:  */
 358: notavail(bp)
 359: register struct buf *bp;
 360: {
 361:     register s;
 362: 
 363:     s = spl6();
 364:     bp->av_back->av_forw = bp->av_forw;
 365:     bp->av_forw->av_back = bp->av_back;
 366:     bp->b_flags |= B_BUSY;
 367:     splx(s);
 368: }
 369: 
 370: /*
 371:  * Mark I/O complete on a buffer, release it if I/O is asynchronous,
 372:  * and wake up anyone waiting for it.
 373:  */
 374: iodone(bp)
 375: register struct buf *bp;
 376: {
 377: 
 378:     if(bp->b_flags&B_MAP)
 379:         mapfree(bp);
 380:     bp->b_flags |= B_DONE;
 381:     if (bp->b_flags&B_ASYNC)
 382:         brelse(bp);
 383:     else {
 384:         bp->b_flags &= ~B_WANTED;
 385:         wakeup((caddr_t)bp);
 386:     }
 387: }
 388: 
 389: /*
 390:  * Zero the core associated with a buffer.
 391:  */
 392: clrbuf(bp)
 393: struct buf *bp;
 394: {
 395:     register *p;
 396:     register c;
 397: 
 398:     p = bp->b_un.b_words;
 399:     c = BSIZE/sizeof(int);
 400:     do
 401:         *p++ = 0;
 402:     while (--c);
 403:     bp->b_resid = 0;
 404: }
 405: 
 406: /*
 407:  * swap I/O
 408:  */
 409: swap(blkno, coreaddr, count, rdflg)
 410: register count;
 411: {
 412:     register struct buf *bp;
 413:     register tcount;
 414: 
 415:     bp = &swbuf1;
 416:     if(bp->b_flags & B_BUSY)
 417:         if((swbuf2.b_flags&B_WANTED) == 0)
 418:             bp = &swbuf2;
 419:     spl6();
 420:     while (bp->b_flags&B_BUSY) {
 421:         bp->b_flags |= B_WANTED;
 422:         sleep((caddr_t)bp, PSWP+1);
 423:     }
 424:     while (count) {
 425:         bp->b_flags = B_BUSY | B_PHYS | rdflg;
 426:         bp->b_dev = swapdev;
 427:         tcount = count;
 428:         if (tcount >= 01700)    /* prevent byte-count wrap */
 429:             tcount = 01700;
 430:         bp->b_bcount = ctob(tcount);
 431:         bp->b_blkno = swplo+blkno;
 432:         bp->b_un.b_addr = (caddr_t)(coreaddr<<6);
 433:         bp->b_xmem = (coreaddr>>10) & 077;
 434:         (*bdevsw[major(swapdev)].d_strategy)(bp);
 435:         spl6();
 436:         while((bp->b_flags&B_DONE)==0)
 437:             sleep((caddr_t)bp, PSWP);
 438:         count -= tcount;
 439:         coreaddr += tcount;
 440:         blkno += ctod(tcount);
 441:     }
 442:     if (bp->b_flags&B_WANTED)
 443:         wakeup((caddr_t)bp);
 444:     spl0();
 445:     bp->b_flags &= ~(B_BUSY|B_WANTED);
 446:     if (bp->b_flags & B_ERROR)
 447:         panic("IO err in swap");
 448: }
 449: 
 450: /*
 451:  * make sure all write-behind blocks
 452:  * on dev (or NODEV for all)
 453:  * are flushed out.
 454:  * (from umount and update)
 455:  */
 456: bflush(dev)
 457: dev_t dev;
 458: {
 459:     register struct buf *bp;
 460: 
 461: loop:
 462:     spl6();
 463:     for (bp = bfreelist.av_forw; bp != &bfreelist; bp = bp->av_forw) {
 464:         if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) {
 465:             bp->b_flags |= B_ASYNC;
 466:             notavail(bp);
 467:             bwrite(bp);
 468:             goto loop;
 469:         }
 470:     }
 471:     spl0();
 472: }
 473: 
 474: /*
 475:  * Raw I/O. The arguments are
 476:  *	The strategy routine for the device
 477:  *	A buffer, which will always be a special buffer
 478:  *	  header owned exclusively by the device for this purpose
 479:  *	The device number
 480:  *	Read/write flag
 481:  * Essentially all the work is computing physical addresses and
 482:  * validating them.
 483:  */
 484: physio(strat, bp, dev, rw)
 485: register struct buf *bp;
 486: int (*strat)();
 487: {
 488:     register unsigned base;
 489:     register int nb;
 490:     int ts;
 491: 
 492:     base = (unsigned)u.u_base;
 493:     /*
 494: 	 * Check odd base, odd count, and address wraparound
 495: 	 */
 496:     if (base&01 || u.u_count&01 || base>=base+u.u_count)
 497:         goto bad;
 498:     ts = (u.u_tsize+127) & ~0177;
 499:     if (u.u_sep)
 500:         ts = 0;
 501:     nb = (base>>6) & 01777;
 502:     /*
 503: 	 * Check overlap with text. (ts and nb now
 504: 	 * in 64-byte clicks)
 505: 	 */
 506:     if (nb < ts)
 507:         goto bad;
 508:     /*
 509: 	 * Check that transfer is either entirely in the
 510: 	 * data or in the stack: that is, either
 511: 	 * the end is in the data or the start is in the stack
 512: 	 * (remember wraparound was already checked).
 513: 	 */
 514:     if ((((base+u.u_count)>>6)&01777) >= ts+u.u_dsize
 515:         && nb < 1024-u.u_ssize)
 516:         goto bad;
 517:     spl6();
 518:     while (bp->b_flags&B_BUSY) {
 519:         bp->b_flags |= B_WANTED;
 520:         sleep((caddr_t)bp, PRIBIO+1);
 521:     }
 522:     bp->b_flags = B_BUSY | B_PHYS | rw;
 523:     bp->b_dev = dev;
 524:     /*
 525: 	 * Compute physical address by simulating
 526: 	 * the segmentation hardware.
 527: 	 */
 528:     ts = (u.u_sep? UDSA: UISA)->r[nb>>7] + (nb&0177);
 529:     bp->b_un.b_addr = (caddr_t)((ts<<6) + (base&077));
 530:     bp->b_xmem = (ts>>10) & 077;
 531:     bp->b_blkno = u.u_offset >> BSHIFT;
 532:     bp->b_bcount = u.u_count;
 533:     bp->b_error = 0;
 534:     u.u_procp->p_flag |= SLOCK;
 535:     (*strat)(bp);
 536:     spl6();
 537:     while ((bp->b_flags&B_DONE) == 0)
 538:         sleep((caddr_t)bp, PRIBIO);
 539:     u.u_procp->p_flag &= ~SLOCK;
 540:     if (bp->b_flags&B_WANTED)
 541:         wakeup((caddr_t)bp);
 542:     spl0();
 543:     bp->b_flags &= ~(B_BUSY|B_WANTED);
 544:     u.u_count = bp->b_resid;
 545:     geterror(bp);
 546:     return;
 547:     bad:
 548:     u.u_error = EFAULT;
 549: }
 550: 
 551: /*
 552:  * Pick up the device's error number and pass it to the user;
 553:  * if there is an error but the number is 0 set a generalized
 554:  * code.  Actually the latter is always true because devices
 555:  * don't yet return specific errors.
 556:  */
 557: geterror(bp)
 558: register struct buf *bp;
 559: {
 560: 
 561:     if (bp->b_flags&B_ERROR)
 562:         if ((u.u_error = bp->b_error)==0)
 563:             u.u_error = EIO;
 564: }

Defined functions

bawrite defined in line 171; used 3 times
bdwrite defined in line 154; used 6 times
bflush defined in line 456; used 2 times
breada defined in line 83; used 2 times
bwrite defined in line 125; used 6 times
clrbuf defined in line 392; used 4 times
getblk defined in line 237; used 9 times
geteblk defined in line 308; used 7 times
geterror defined in line 557; used 3 times
incore defined in line 218; used 2 times
iodone defined in line 374; used 22 times
iowait defined in line 343; used 5 times
notavail defined in line 358; used 4 times
physio defined in line 484; used 14 times
swap defined in line 409; used 5 times

Defined variables

swbuf1 defined in line 28; used 1 times
swbuf2 defined in line 29; used 2 times

Defined macros

DISKMON defined in line 10; used 8 times
Last modified: 1979-05-12
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 1509
Valid CSS Valid XHTML 1.0 Strict