1: /*
   2:  * Copyright (c) 1986 Regents of the University of California.
   3:  * All rights reserved.  The Berkeley software License Agreement
   4:  * specifies the terms and conditions for redistribution.
   5:  *
   6:  *	@(#)ufs_bio.c	2.2 (2.11BSD) 1996/9/13
   7:  */
   8: 
   9: #include "param.h"
  10: #include "buf.h"
  11: #include "user.h"
  12: #include "conf.h"
  13: #include "fs.h"
  14: #include "dk.h"
  15: #include "systm.h"
  16: #include "map.h"
  17: #include "uba.h"
  18: #include "trace.h"
  19: #include "ram.h"
  20: 
  21: /*
  22:  * Read in (if necessary) the block and return a buffer pointer.
  23:  */
  24: struct buf *
  25: bread(dev, blkno)
  26:     dev_t dev;
  27:     daddr_t blkno;
  28: {
  29:     register struct buf *bp;
  30: 
  31:     bp = getblk(dev, blkno);
  32:     if (bp->b_flags&(B_DONE|B_DELWRI)) {
  33:         trace(TR_BREADHIT);
  34:         return (bp);
  35:     }
  36:     bp->b_flags |= B_READ;
  37:     bp->b_bcount = DEV_BSIZE;   /* XXX? KB */
  38:     (*bdevsw[major(dev)].d_strategy)(bp);
  39:     trace(TR_BREADMISS);
  40:     u.u_ru.ru_inblock++;        /* pay for read */
  41:     biowait(bp);
  42:     return(bp);
  43: }
  44: 
  45: /*
  46:  * Read in the block, like bread, but also start I/O on the
  47:  * read-ahead block (which is not allocated to the caller)
  48:  */
  49: struct buf *
  50: breada(dev, blkno, rablkno)
  51:     register dev_t dev;
  52:     daddr_t blkno;
  53:     daddr_t rablkno;
  54: {
  55:     register struct buf *bp, *rabp;
  56: 
  57:     bp = NULL;
  58:     /*
  59: 	 * If the block isn't in core, then allocate
  60: 	 * a buffer and initiate i/o (getblk checks
  61: 	 * for a cache hit).
  62: 	 */
  63:     if (!incore(dev, blkno)) {
  64:         bp = getblk(dev, blkno);
  65:         if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
  66:             bp->b_flags |= B_READ;
  67:             bp->b_bcount = DEV_BSIZE;   /* XXX? KB */
  68:             (*bdevsw[major(dev)].d_strategy)(bp);
  69:             trace(TR_BREADMISS);
  70:             u.u_ru.ru_inblock++;        /* pay for read */
  71:         }
  72:         else
  73:             trace(TR_BREADHIT);
  74:     }
  75: 
  76:     /*
  77: 	 * If there's a read-ahead block, start i/o
  78: 	 * on it also (as above).
  79: 	 */
  80:     if (rablkno) {
  81:         if (!incore(dev, rablkno)) {
  82:             rabp = getblk(dev, rablkno);
  83:             if (rabp->b_flags & (B_DONE|B_DELWRI)) {
  84:                 brelse(rabp);
  85:                 trace(TR_BREADHITRA);
  86:             } else {
  87:                 rabp->b_flags |= B_READ|B_ASYNC;
  88:                 rabp->b_bcount = DEV_BSIZE; /* XXX? KB */
  89:                 (*bdevsw[major(dev)].d_strategy)(rabp);
  90:                 trace(TR_BREADMISSRA);
  91:                 u.u_ru.ru_inblock++;    /* pay in advance */
  92:             }
  93:         } else
  94:             trace(TR_BREADHITRA);
  95:     }
  96: 
  97:     /*
  98: 	 * If block was in core, let bread get it.
  99: 	 * If block wasn't in core, then the read was started
 100: 	 * above, and just wait for it.
 101: 	 */
 102:     if (bp == NULL)
 103:         return (bread(dev, blkno));
 104:     biowait(bp);
 105:     return (bp);
 106: }
 107: 
 108: /*
 109:  * Write the buffer, waiting for completion.
 110:  * Then release the buffer.
 111:  */
 112: bwrite(bp)
 113:     register struct buf *bp;
 114: {
 115:     register flag;
 116: 
 117:     flag = bp->b_flags;
 118:     bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
 119:     if ((flag&B_DELWRI) == 0)
 120:         u.u_ru.ru_oublock++;        /* noone paid yet */
 121:     trace(TR_BWRITE);
 122:     bp->b_bcount = DEV_BSIZE;       /* XXX? KB */
 123:     (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
 124: 
 125:     /*
 126: 	 * If the write was synchronous, then await i/o completion.
 127: 	 * If the write was "delayed", then we put the buffer on
 128: 	 * the q of blocks awaiting i/o completion status.
 129: 	 */
 130:     if ((flag&B_ASYNC) == 0) {
 131:         biowait(bp);
 132:         brelse(bp);
 133:     } else if (flag & B_DELWRI)
 134:         bp->b_flags |= B_AGE;
 135: }
 136: 
 137: /*
 138:  * Release the buffer, marking it so that if it is grabbed
 139:  * for another purpose it will be written out before being
 140:  * given up (e.g. when writing a partial block where it is
 141:  * assumed that another write for the same block will soon follow).
 142:  * This can't be done for magtape, since writes must be done
 143:  * in the same order as requested.
 144:  */
 145: bdwrite(bp)
 146:     register struct buf *bp;
 147: {
 148: 
 149:     if ((bp->b_flags&B_DELWRI) == 0)
 150:         u.u_ru.ru_oublock++;        /* noone paid yet */
 151:     if (bdevsw[major(bp->b_dev)].d_flags & B_TAPE) {
 152:         bawrite(bp);
 153:     }
 154:     else {
 155:         bp->b_flags |= B_DELWRI | B_DONE;
 156:         brelse(bp);
 157:     }
 158: }
 159: 
 160: /*
 161:  * Release the buffer, with no I/O implied.
 162:  */
 163: brelse(bp)
 164:     register struct buf *bp;
 165: {
 166:     register struct buf *flist;
 167:     register s;
 168: 
 169:     trace(TR_BRELSE);
 170:     /*
 171: 	 * If someone's waiting for the buffer, or
 172: 	 * is waiting for a buffer, wake 'em up.
 173: 	 */
 174:     if (bp->b_flags&B_WANTED)
 175:         wakeup((caddr_t)bp);
 176:     if (bfreelist[0].b_flags&B_WANTED) {
 177:         bfreelist[0].b_flags &= ~B_WANTED;
 178:         wakeup((caddr_t)bfreelist);
 179:     }
 180:     if (bp->b_flags&B_ERROR)
 181:         if (bp->b_flags & B_LOCKED)
 182:             bp->b_flags &= ~B_ERROR;    /* try again later */
 183:         else
 184:             bp->b_dev = NODEV;          /* no assoc */
 185: 
 186:     /*
 187: 	 * Stick the buffer back on a free list.
 188: 	 */
 189:     s = splbio();
 190:     if (bp->b_flags & (B_ERROR|B_INVAL)) {
 191:         /* block has no info ... put at front of most free list */
 192:         flist = &bfreelist[BQ_AGE];
 193:         binsheadfree(bp, flist);
 194:     } else {
 195:         if (bp->b_flags & B_LOCKED)
 196:             flist = &bfreelist[BQ_LOCKED];
 197:         else if (bp->b_flags & B_AGE)
 198:             flist = &bfreelist[BQ_AGE];
 199:         else
 200:             flist = &bfreelist[BQ_LRU];
 201:         binstailfree(bp, flist);
 202:     }
 203:     bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE);
 204:     splx(s);
 205: }
 206: 
 207: /*
 208:  * See if the block is associated with some buffer
 209:  * (mainly to avoid getting hung up on a wait in breada)
 210:  */
 211: incore(dev, blkno)
 212:     register dev_t dev;
 213:     daddr_t blkno;
 214: {
 215:     register struct buf *bp;
 216:     register struct buf *dp;
 217: 
 218:     dp = BUFHASH(dev, blkno);
 219:     blkno = fsbtodb(blkno);
 220:     for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
 221:         if (bp->b_blkno == blkno && bp->b_dev == dev &&
 222:             (bp->b_flags & B_INVAL) == 0)
 223:             return (1);
 224:     return (0);
 225: }
 226: 
 227: /*
 228:  * Assign a buffer for the given block.  If the appropriate
 229:  * block is already associated, return it; otherwise search
 230:  * for the oldest non-busy buffer and reassign it.
 231:  *
 232:  * We use splx here because this routine may be called
 233:  * on the interrupt stack during a dump, and we don't
 234:  * want to lower the ipl back to 0.
 235:  */
 236: struct buf *
 237: getblk(dev, blkno)
 238:     register dev_t dev;
 239:     daddr_t blkno;
 240: {
 241:     register struct buf *bp, *dp;
 242:     daddr_t dblkno;
 243:     int s;
 244: 
 245: #ifdef DIAGNOSTIC
 246:     if (major(dev) >= nblkdev)
 247:         panic("blkdev");
 248: #endif
 249:     /*
 250: 	 * Search the cache for the block.  If we hit, but
 251: 	 * the buffer is in use for i/o, then we wait until
 252: 	 * the i/o has completed.
 253: 	 */
 254:     dp = BUFHASH(dev, blkno);
 255:     dblkno = fsbtodb(blkno);
 256: loop:
 257:     for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
 258:         if (bp->b_blkno != dblkno || bp->b_dev != dev ||
 259:             bp->b_flags&B_INVAL)
 260:             continue;
 261:         s = splbio();
 262:         if (bp->b_flags&B_BUSY) {
 263:             bp->b_flags |= B_WANTED;
 264:             sleep((caddr_t)bp, PRIBIO+1);
 265:             splx(s);
 266:             goto loop;
 267:         }
 268:         splx(s);
 269:         notavail(bp);
 270:         return (bp);
 271:     }
 272:     bp = getnewbuf();
 273:     bfree(bp);
 274:     bremhash(bp);
 275:     binshash(bp, dp);
 276:     bp->b_dev = dev;
 277:     bp->b_blkno = dblkno;
 278:     bp->b_error = 0;
 279:     return (bp);
 280: }
 281: 
 282: /*
 283:  * get an empty block,
 284:  * not assigned to any particular device
 285:  */
 286: struct buf *
 287: geteblk()
 288: {
 289:     register struct buf *bp, *flist;
 290: 
 291:     bp = getnewbuf();
 292:     bp->b_flags |= B_INVAL;
 293:     bfree(bp);
 294:     bremhash(bp);
 295:     flist = &bfreelist[BQ_AGE];
 296:     binshash(bp, flist);
 297:     bp->b_dev = (dev_t)NODEV;
 298:     bp->b_error = 0;
 299:     return (bp);
 300: }
 301: 
 302: /*
 303:  * Find a buffer which is available for use.
 304:  * Select something from a free list.
 305:  * Preference is to AGE list, then LRU list.
 306:  */
 307: struct buf *
 308: getnewbuf()
 309: {
 310:     register struct buf *bp, *dp;
 311:     int s;
 312: 
 313: loop:
 314:     s = splbio();
 315:     for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
 316:         if (dp->av_forw != dp)
 317:             break;
 318:     if (dp == bfreelist) {      /* no free blocks */
 319:         dp->b_flags |= B_WANTED;
 320:         sleep((caddr_t)dp, PRIBIO+1);
 321:         splx(s);
 322:         goto loop;
 323:     }
 324:     splx(s);
 325:     bp = dp->av_forw;
 326:     notavail(bp);
 327:     if (bp->b_flags & B_DELWRI) {
 328:         bawrite(bp);
 329:         goto loop;
 330:     }
 331:     if(bp->b_flags & (B_RAMREMAP|B_PHYS)) {
 332:         register memaddr paddr; /* click address of real buffer */
 333:         extern memaddr bpaddr;
 334: 
 335: #ifdef DIAGNOSTIC
 336:         if ((bp < &buf[0]) || (bp >= &buf[nbuf]))
 337:             panic("getnewbuf: RAMREMAP bp addr");
 338: #endif
 339:         paddr = bpaddr + btoc(DEV_BSIZE) * (bp - buf);
 340:         bp->b_un.b_addr = (caddr_t)(paddr << 6);
 341:         bp->b_xmem = (paddr >> 10) & 077;
 342:     }
 343:     trace(TR_BRELSE);
 344:     bp->b_flags = B_BUSY;
 345:     return (bp);
 346: }
 347: 
 348: 
 349: /*
 350:  * Wait for I/O completion on the buffer; return errors
 351:  * to the user.
 352:  */
 353: biowait(bp)
 354:     register struct buf *bp;
 355: {
 356:     register int s;
 357: 
 358:     s = splbio();
 359:     while ((bp->b_flags&B_DONE)==0)
 360:         sleep((caddr_t)bp, PRIBIO);
 361:     splx(s);
 362:     if (!u.u_error)             /* XXX */
 363:         u.u_error = geterror(bp);
 364: }
 365: 
 366: /*
 367:  * Mark I/O complete on a buffer.
 368:  * Wake up anyone waiting for it.
 369:  */
 370: biodone(bp)
 371:     register struct buf *bp;
 372: {
 373: 
 374:     if (bp->b_flags & B_DONE)
 375:         panic("dup biodone");
 376:     if (bp->b_flags & (B_MAP|B_UBAREMAP))
 377:         mapfree(bp);
 378:     bp->b_flags |= B_DONE;
 379:     if (bp->b_flags&B_ASYNC)
 380:         brelse(bp);
 381:     else {
 382:         bp->b_flags &= ~B_WANTED;
 383:         wakeup((caddr_t)bp);
 384:     }
 385: }
 386: 
 387: /*
 388:  * Insure that no part of a specified block is in an incore buffer.
 389:  */
 390: blkflush(dev, blkno)
 391:     register dev_t dev;
 392:     daddr_t blkno;
 393: {
 394:     register struct buf *ep;
 395:     struct buf *dp;
 396:     register int s;
 397: 
 398:     dp = BUFHASH(dev, blkno);
 399:     blkno = fsbtodb(blkno);
 400: loop:
 401:     for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
 402:         if (ep->b_blkno != blkno || ep->b_dev != dev ||
 403:             (ep->b_flags&B_INVAL))
 404:             continue;
 405:         s = splbio();
 406:         if (ep->b_flags&B_BUSY) {
 407:             ep->b_flags |= B_WANTED;
 408:             sleep((caddr_t)ep, PRIBIO+1);
 409:             splx(s);
 410:             goto loop;
 411:         }
 412:         if (ep->b_flags & B_DELWRI) {
 413:             splx(s);
 414:             notavail(ep);
 415:             bwrite(ep);
 416:             goto loop;
 417:         }
 418:         splx(s);
 419:     }
 420: }
 421: 
 422: /*
 423:  * Make sure all write-behind blocks on dev are flushed out.
 424:  * (from umount and sync)
 425:  */
 426: bflush(dev)
 427:     register dev_t dev;
 428: {
 429:     register struct buf *bp;
 430:     register struct buf *flist;
 431:     int s;
 432: 
 433: loop:
 434:     s = splbio();
 435:     for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++)
 436:     for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {
 437:         if ((bp->b_flags & B_DELWRI) == 0)
 438:             continue;
 439:         if (dev == bp->b_dev) {
 440:             bp->b_flags |= B_ASYNC;
 441:             notavail(bp);
 442:             bwrite(bp);
 443:             splx(s);
 444:             goto loop;
 445:         }
 446:     }
 447:     splx(s);
 448: }
 449: 
 450: /*
 451:  * Pick up the device's error number and pass it to the user;
 452:  * if there is an error but the number is 0 set a generalized code.
 453:  */
 454: geterror(bp)
 455:     register struct buf *bp;
 456: {
 457:     register int error = 0;
 458: 
 459:     if (bp->b_flags&B_ERROR)
 460:         if ((error = bp->b_error)==0)
 461:             return(EIO);
 462:     return (error);
 463: }
 464: 
 465: /*
 466:  * Invalidate in core blocks belonging to closed or umounted filesystem
 467:  *
 468:  * This is not nicely done at all - the buffer ought to be removed from the
 469:  * hash chains & have its dev/blkno fields clobbered, but unfortunately we
 470:  * can't do that here, as it is quite possible that the block is still
 471:  * being used for i/o. Eventually, all disc drivers should be forced to
 472:  * have a close routine, which ought ensure that the queue is empty, then
 473:  * properly flush the queues. Until that happy day, this suffices for
 474:  * correctness.						... kre
 475:  */
 476: binval(dev)
 477:     register dev_t dev;
 478: {
 479:     register struct buf *bp;
 480:     register struct bufhd *hp;
 481: #define dp ((struct buf *)hp)
 482: 
 483:     for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++)
 484:         for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
 485:             if (bp->b_dev == dev)
 486:                 bp->b_flags |= B_INVAL;
 487: }

Defined functions

bflush defined in line 426; used 2 times
binval defined in line 476; used 3 times
biodone defined in line 370; used 1 times
biowait defined in line 353; used 7 times
blkflush defined in line 390; used 1 times
breada defined in line 49; used 2 times
getnewbuf defined in line 307; used 3 times
incore defined in line 211; used 3 times

Defined macros

dp defined in line 481; used 25 times
Last modified: 1996-09-20
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 4359
Valid CSS Valid XHTML 1.0 Strict