1: /* 2: * SCCS id @(#)hk.c 2.1 (Berkeley) 12/21/83 3: */ 4: 5: /* 6: * RK611/RK0[67] disk driver 7: * 8: * This driver mimics the 4.1bsd rk driver. 9: * It does overlapped seeks, ECC, and bad block handling. 10: * 11: * salkind@nyu 12: */ 13: 14: #include "hk.h" 15: #if NHK > 0 16: #include "param.h" 17: #include <sys/systm.h> 18: #include <sys/buf.h> 19: #include <sys/conf.h> 20: #include <sys/dir.h> 21: #include <sys/user.h> 22: #include <sys/uba.h> 23: #ifndef INTRLVE 24: #include <sys/inline.h> 25: #endif 26: #include <sys/hkreg.h> 27: #include <sys/dkbad.h> 28: 29: #define NHK7CYL 815 30: #define NHK6CYL 411 31: #define HK_NSECT 22 32: #define HK_NTRAC 3 33: #define HK_NSPC (HK_NTRAC*HK_NSECT) 34: 35: extern struct size hk_sizes[]; 36: extern struct hkdevice *HKADDR; 37: 38: int hkpip; /* DEBUG */ 39: int hknosval; /* DEBUG */ 40: #ifdef HKDEBUG 41: int hkdebug; 42: #endif 43: 44: int hk_offset[] = 45: { 46: HKAS_P400, HKAS_M400, HKAS_P400, HKAS_M400, 47: HKAS_P800, HKAS_M800, HKAS_P800, HKAS_M800, 48: HKAS_P1200, HKAS_M1200, HKAS_P1200, HKAS_M1200, 49: 0, 0, 0, 0, 50: }; 51: 52: int hk_type[NHK]; 53: int hk_cyl[NHK]; 54: char hk_mntflg[NHK]; 55: char hk_pack[NHK]; 56: 57: struct hk_softc { 58: int sc_softas; 59: int sc_recal; 60: } hk; 61: 62: struct buf hktab; 63: struct buf hkutab[NHK]; 64: #ifdef UCB_DBUFS 65: struct buf rhkbuf[NHK]; 66: #else 67: struct buf rhkbuf; 68: #endif 69: #ifdef BADSECT 70: struct dkbad hkbad[NHK]; 71: struct buf bhkbuf[NHK]; 72: #endif 73: 74: #define hkwait(hkaddr) while ((hkaddr->hkcs1 & HK_CRDY) == 0) 75: #define hkncyl(unit) (hk_type[unit] ? NHK7CYL : NHK6CYL) 76: 77: #ifdef INTRLVE 78: extern daddr_t dkblock(); 79: #endif 80: 81: void 82: hkroot() 83: { 84: hkattach(HKADDR, 0); 85: } 86: 87: hkattach(addr, unit) 88: struct hkdevice *addr; 89: { 90: if (unit == 0) { 91: HKADDR = addr; 92: return(1); 93: } 94: return(0); 95: } 96: 97: hkdsel(unit) 98: register unit; 99: { 100: register struct hkdevice *hkaddr = HKADDR; 101: 102: hk_type[unit] = 0; 103: hkaddr->hkcs1 = HK_CCLR; 104: hkaddr->hkcs2 = unit; 105: hkaddr->hkcs1 = HK_DCLR | HK_GO; 106: hkwait(hkaddr); 107: if((hkaddr->hkcs2&HKCS2_NED) || (hkaddr->hkds&HKDS_SVAL) == 0) { 108: hkaddr->hkcs1 = HK_CCLR; 109: hkwait(hkaddr); 110: return(-1); 111: } 112: if((hkaddr->hkcs1&HK_CERR) && (hkaddr->hker&HKER_DTYE)) { 113: hk_type[unit] = HK_CDT; 114: hkaddr->hkcs1 = HK_CCLR; 115: hkwait(hkaddr); 116: } 117: 118: hk_mntflg[unit] = 1; 119: hk_cyl[unit] = -1; 120: return(0); 121: } 122: 123: hkstrategy(bp) 124: register struct buf *bp; 125: { 126: register struct buf *dp; 127: register unit; 128: int s; 129: long bn; 130: long sz; 131: 132: unit = minor(bp->b_dev) & 077; 133: sz = (bp->b_bcount + 511) >> 9; 134: if ((unit >= (NHK << 3)) || (HKADDR == (struct hkdevice *) NULL)) { 135: bp->b_error = ENXIO; 136: goto bad; 137: } 138: if (bp->b_blkno < 0 || (bn = dkblock(bp))+sz > hk_sizes[unit & 07].nblocks) { 139: bp->b_error = EINVAL; 140: goto bad; 141: } 142: bp->b_cylin = bn / HK_NSPC + hk_sizes[unit & 07].cyloff; 143: unit = dkunit(bp); 144: if (hk_mntflg[unit] == 0) { 145: /* SHOULD BE DONE AT BOOT TIME */ 146: if (hkdsel(unit) < 0) 147: goto bad; 148: } 149: #ifdef UNIBUS_MAP 150: mapalloc(bp); 151: #endif 152: dp = &hkutab[unit]; 153: s = spl5(); 154: disksort(dp, bp); 155: if (dp->b_active == 0) { 156: hkustart(unit); 157: if (hktab.b_active == 0) 158: hkstart(); 159: } 160: splx(s); 161: return; 162: bad: 163: bp->b_flags |= B_ERROR; 164: iodone(bp); 165: } 166: 167: hkustart(unit) 168: int unit; 169: { 170: register struct hkdevice *hkaddr = HKADDR; 171: register struct buf *bp, *dp; 172: int didie = 0; 173: 174: if (unit >= NHK || hk_mntflg[unit] == 0) 175: return(0); 176: #ifdef HK_DKN 177: dk_busy &= ~(1<<(unit + HK_DKN)); 178: #endif 179: if (hktab.b_active) { 180: hk.sc_softas |= (1 << unit); 181: return(0); 182: } 183: 184: hkaddr->hkcs1 = HK_CCLR; 185: hkaddr->hkcs2 = unit; 186: hkaddr->hkcs1 = hk_type[unit] | HK_DCLR | HK_GO; 187: hkwait(hkaddr); 188: 189: dp = &hkutab[unit]; 190: if ((bp = dp->b_actf) == NULL) 191: return(0); 192: if (dp->b_active) 193: goto done; 194: dp->b_active = 1; 195: if ((hkaddr->hkds & HKDS_VV) == 0 || hk_pack[unit] == 0) { 196: /* SHOULD WARN SYSTEM THAT THIS HAPPENED */ 197: #ifdef BADSECT 198: struct buf *bbp = &bhkbuf[unit]; 199: #endif 200: 201: hkaddr->hkcs1 = hk_type[unit]|HK_PACK|HK_GO; 202: hk_pack[unit]++; 203: #ifdef BADSECT 204: bbp->b_flags = B_READ|B_BUSY|B_PHYS; 205: bbp->b_dev = bp->b_dev; 206: bbp->b_bcount = sizeof(struct dkbad); 207: bbp->b_un.b_addr = (caddr_t)&hkbad[unit]; 208: bbp->b_blkno = (long)hkncyl(unit)*HK_NSPC - HK_NSECT; 209: bbp->b_cylin = hkncyl(unit) - 1; 210: #ifdef UNIBUS_MAP 211: mapalloc(bbp); 212: #endif 213: dp->b_actf = bbp; 214: bbp->av_forw = bp; 215: bp = bbp; 216: #endif 217: hkwait(hkaddr); 218: } 219: if ((hkaddr->hkds & HKDS_DREADY) != HKDS_DREADY) 220: goto done; 221: #ifdef NHK > 1 222: if (bp->b_cylin == hk_cyl[unit]) 223: goto done; 224: hkaddr->hkcyl = bp->b_cylin; 225: hk_cyl[unit] = bp->b_cylin; 226: hkaddr->hkcs1 = hk_type[unit] | HK_IE | HK_SEEK | HK_GO; 227: didie = 1; 228: #ifdef HK_DKN 229: unit += HK_DKN; 230: dk_busy |= 1 << unit; 231: dk_numb[unit] += 1; 232: #endif HK_DKN 233: return (didie); 234: #endif NHK > 1 235: 236: done: 237: if (dp->b_active != 2) { 238: dp->b_forw = NULL; 239: if (hktab.b_actf == NULL) 240: hktab.b_actf = dp; 241: else 242: hktab.b_actl->b_forw = dp; 243: hktab.b_actl = dp; 244: dp->b_active = 2; 245: } 246: return (didie); 247: } 248: 249: hkstart() 250: { 251: register struct buf *bp, *dp; 252: register struct hkdevice *hkaddr = HKADDR; 253: daddr_t bn; 254: int sn, tn, cmd, unit; 255: 256: loop: 257: if ((dp = hktab.b_actf) == NULL) 258: return(0); 259: if ((bp = dp->b_actf) == NULL) { 260: hktab.b_actf = dp->b_forw; 261: goto loop; 262: } 263: hktab.b_active++; 264: unit = dkunit(bp); 265: bn = dkblock(bp); 266: 267: sn = bn % HK_NSPC; 268: tn = sn / HK_NSECT; 269: sn %= HK_NSECT; 270: retry: 271: hkaddr->hkcs1 = HK_CCLR; 272: hkaddr->hkcs2 = unit; 273: hkaddr->hkcs1 = hk_type[unit] | HK_DCLR | HK_GO; 274: hkwait(hkaddr); 275: 276: if ((hkaddr->hkds & HKDS_SVAL) == 0) { 277: hknosval++; 278: goto nosval; 279: } 280: if (hkaddr->hkds & HKDS_PIP) { 281: hkpip++; 282: goto retry; 283: } 284: if ((hkaddr->hkds&HKDS_DREADY) != HKDS_DREADY) { 285: printf("hk%d: not ready", unit); 286: if ((hkaddr->hkds&HKDS_DREADY) != HKDS_DREADY) { 287: printf("\n"); 288: hkaddr->hkcs1 = hk_type[unit] | HK_DCLR | HK_GO; 289: hkwait(hkaddr); 290: hkaddr->hkcs1 = HK_CCLR; 291: hkwait(hkaddr); 292: hktab.b_active = 0; 293: hktab.b_errcnt = 0; 294: dp->b_actf = bp->av_forw; 295: dp->b_active = 0; 296: bp->b_flags |= B_ERROR; 297: iodone(bp); 298: goto loop; 299: } 300: else 301: printf(" (came back!)\n"); 302: } 303: nosval: 304: hkaddr->hkcyl = bp->b_cylin; 305: hk_cyl[unit] = bp->b_cylin; 306: hkaddr->hkda = (tn << 8) + sn; 307: hkaddr->hkwc = -(bp->b_bcount >> 1); 308: hkaddr->hkba = bp->b_un.b_addr; 309: cmd = hk_type[unit] | ((bp->b_xmem & 3) << 8) | HK_IE | HK_GO; 310: if (bp->b_flags & B_READ) 311: cmd |= HK_READ; 312: else 313: cmd |= HK_WRITE; 314: hkaddr->hkcs1 = cmd; 315: #ifdef HK_DKN 316: dk_busy |= 1 << (HK_DKN + NHK); 317: dk_numb[HK_DKN + NHK] += 1; 318: dk_wds[HK_DKN + NHK] += bp->b_bcount >> 6; 319: #endif 320: return(1); 321: } 322: 323: hkintr() 324: { 325: register struct hkdevice *hkaddr = HKADDR; 326: register struct buf *bp, *dp; 327: int unit; 328: int as = (hkaddr->hkatt >> 8) | hk.sc_softas; 329: int needie = 1; 330: 331: hk.sc_softas = 0; 332: if (hktab.b_active) { 333: dp = hktab.b_actf; 334: bp = dp->b_actf; 335: unit = dkunit(bp); 336: #ifdef HK_DKN 337: dk_busy &= ~(1 << (HK_DKN + NHK)); 338: #endif 339: #ifdef BADSECT 340: if (bp->b_flags&B_BAD) 341: if (hkecc(bp, CONT)) 342: return; 343: #endif 344: if (hkaddr->hkcs1 & HK_CERR) { 345: int recal; 346: u_short ds = hkaddr->hkds; 347: u_short cs2 = hkaddr->hkcs2; 348: u_short er = hkaddr->hker; 349: #ifdef HKDEBUG 350: if (hkdebug) { 351: printf("cs2=%b ds=%b er=%b\n", 352: cs2, HKCS2_BITS, ds, 353: HKDS_BITS, er, HKER_BITS); 354: } 355: #endif 356: if (er & HKER_WLE) { 357: printf("hk%d: write locked\n", unit); 358: bp->b_flags |= B_ERROR; 359: } else if (++hktab.b_errcnt > 28 || 360: ds&HKDS_HARD || er&HKER_HARD || cs2&HKCS2_HARD) { 361: hard: 362: #ifdef UCB_DEVERR 363: harderr(bp, "hk"); 364: printf("cs2=%b ds=%b er=%b\n", 365: cs2, HKCS2_BITS, ds, 366: HKDS_BITS, er, HKER_BITS); 367: #else 368: deverror(bp, cs2, er); 369: #endif 370: bp->b_flags |= B_ERROR; 371: hk.sc_recal = 0; 372: } else if (er & HKER_BSE) { 373: #ifdef BADSECT 374: if (hkecc(bp, BSE)) 375: return; 376: else 377: #endif 378: goto hard; 379: } else 380: hktab.b_active = 0; 381: if (cs2&HKCS2_MDS) { 382: hkaddr->hkcs2 = HKCS2_SCLR; 383: goto retry; 384: } 385: recal = 0; 386: if (ds&HKDS_DROT || er&(HKER_OPI|HKER_SKI|HKER_UNS) || 387: (hktab.b_errcnt&07) == 4) 388: recal = 1; 389: #ifdef UCB_ECC 390: if ((er & (HKER_DCK|HKER_ECH)) == HKER_DCK) 391: if (hkecc(bp, ECC)) 392: return; 393: #endif 394: hkaddr->hkcs1 = HK_CCLR; 395: hkaddr->hkcs2 = unit; 396: hkaddr->hkcs1 = hk_type[unit]|HK_DCLR|HK_GO; 397: hkwait(hkaddr); 398: if (recal && hktab.b_active == 0) { 399: hkaddr->hkcs1 = hk_type[unit]|HK_IE|HK_RECAL|HK_GO; 400: hk_cyl[unit] = -1; 401: hk.sc_recal = 0; 402: goto nextrecal; 403: } 404: } 405: retry: 406: switch (hk.sc_recal) { 407: 408: case 1: 409: hkaddr->hkcyl = bp->b_cylin; 410: hk_cyl[unit] = bp->b_cylin; 411: hkaddr->hkcs1 = hk_type[unit]|HK_IE|HK_SEEK|HK_GO; 412: goto nextrecal; 413: case 2: 414: if (hktab.b_errcnt < 16 || 415: (bp->b_flags&B_READ) == 0) 416: goto donerecal; 417: hkaddr->hkatt = hk_offset[hktab.b_errcnt & 017]; 418: hkaddr->hkcs1 = hk_type[unit]|HK_IE|HK_OFFSET|HK_GO; 419: /* fall into ... */ 420: nextrecal: 421: hk.sc_recal++; 422: hkwait(hkaddr); 423: hktab.b_active = 1; 424: return; 425: donerecal: 426: case 3: 427: hk.sc_recal = 0; 428: hktab.b_active = 0; 429: break; 430: } 431: if (hktab.b_active) { 432: hktab.b_active = 0; 433: hktab.b_errcnt = 0; 434: hktab.b_actf = dp->b_forw; 435: dp->b_active = 0; 436: dp->b_errcnt = 0; 437: dp->b_actf = bp->av_forw; 438: bp->b_resid = -(hkaddr->hkwc << 1); 439: iodone(bp); 440: if (dp->b_actf) 441: if (hkustart(unit)) 442: needie = 0; 443: } 444: as &= ~(1<<unit); 445: } 446: for (unit = 0; as; as >>= 1, unit++) 447: if (as & 1) { 448: if (unit < NHK && hk_mntflg[unit]) { 449: if (hkustart(unit)) 450: needie = 0; 451: } else { 452: hkaddr->hkcs1 = HK_CCLR; 453: hkaddr->hkcs2 = unit; 454: hkaddr->hkcs1 = HK_DCLR | HK_GO; 455: hkwait(hkaddr); 456: hkaddr->hkcs1 = HK_CCLR; 457: } 458: } 459: if (hktab.b_actf && hktab.b_active == 0) 460: if (hkstart()) 461: needie = 0; 462: if (needie) 463: hkaddr->hkcs1 = HK_IE; 464: } 465: 466: hkread(dev) 467: dev_t dev; 468: { 469: #ifdef UCB_DBUFS 470: register int unit = (minor(dev) >> 3) & 07; 471: 472: if (unit >= NHK) 473: u.u_error = ENXIO; 474: else 475: physio(hkstrategy, &rhkbuf[unit], dev, B_READ); 476: #else 477: physio(hkstrategy, &rhkbuf, dev, B_READ); 478: #endif 479: } 480: 481: hkwrite(dev) 482: dev_t dev; 483: { 484: #ifdef UCB_DBUFS 485: register int unit = (minor(dev) >> 3) & 07; 486: 487: if (unit >= NHK) 488: u.u_error = ENXIO; 489: else 490: physio(hkstrategy, &rhkbuf[unit], dev, B_WRITE); 491: #else 492: physio(hkstrategy, &rhkbuf, dev, B_WRITE); 493: #endif 494: } 495: 496: #ifdef HK_DUMP 497: /* 498: * Dump routine for RK06/07 499: * Dumps from dumplo to end of memory/end of disk section for minor(dev). 500: * It uses the UNIBUS map to dump all of memory if there is a UNIBUS map. 501: */ 502: #ifdef UNIBUS_MAP 503: #define DBSIZE (UBPAGE/PGSIZE) /* unit of transfer, one UBPAGE */ 504: #else 505: #define DBSIZE 16 /* unit of transfer, same number */ 506: #endif 507: 508: hkdump(dev) 509: dev_t dev; 510: { 511: register struct hkdevice *hkaddr = HKADDR; 512: daddr_t bn, dumpsize; 513: long paddr; 514: register count; 515: #ifdef UNIBUS_MAP 516: extern bool_t ubmap; 517: register struct ubmap *ubp; 518: #endif 519: int com, cn, tn, sn, unit; 520: 521: unit = minor(dev) >> 3; 522: if ((bdevsw[major(dev)].d_strategy != hkstrategy) /* paranoia */ 523: || unit >= NHK) 524: return(EINVAL); 525: dumpsize = hk_sizes[minor(dev)&07].nblocks; 526: if ((dumplo < 0) || (dumplo >= dumpsize)) 527: return(EINVAL); 528: dumpsize -= dumplo; 529: 530: hkaddr->hkcs1 = HK_CCLR; 531: hkwait(hkaddr); 532: hkaddr->hkcs2 = unit; 533: hkaddr->hkcs1 = hk_type[unit] | HK_DCLR | HK_GO; 534: hkwait(hkaddr); 535: if ((hkaddr->hkds & HKDS_VV) == 0) { 536: hkaddr->hkcs1 = hk_type[unit]|HK_IE|HK_PACK|HK_GO; 537: hkwait(hkaddr); 538: } 539: #ifdef UNIBUS_MAP 540: ubp = &UBMAP[0]; 541: #endif 542: for (paddr = 0L; dumpsize > 0; dumpsize -= count) { 543: count = dumpsize>DBSIZE? DBSIZE: dumpsize; 544: bn = dumplo + (paddr >> PGSHIFT); 545: cn = (bn/HK_NSPC) + hk_sizes[minor(dev)&07].cyloff; 546: sn = bn%HK_NSPC; 547: tn = sn/HK_NSECT; 548: sn = sn%HK_NSECT; 549: hkaddr->hkcyl = cn; 550: hkaddr->hkda = (tn << 8) | sn; 551: hkaddr->hkwc = -(count << (PGSHIFT-1)); 552: com = hk_type[unit]|HK_GO|HK_WRITE; 553: #ifdef UNIBUS_MAP 554: /* 555: * If UNIBUS_MAP exists, use the map. 556: */ 557: if (ubmap) { 558: ubp->ub_lo = loint(paddr); 559: ubp->ub_hi = hiint(paddr); 560: hkaddr->hkba = 0; 561: } else { 562: #endif 563: /* non UNIBUS map */ 564: hkaddr->hkba = loint(paddr); 565: com |= ((paddr >> 8) & (03 << 8)); 566: #ifdef UNIBUS_MAP 567: } 568: #endif 569: hkaddr->hkcs2 = unit; 570: hkaddr->hkcs1 = com; 571: hkwait(hkaddr); 572: if (hkaddr->hkcs1 & HK_CERR) { 573: if (hkaddr->hkcs2 & HKCS2_NEM) 574: return(0); /* made it to end of memory */ 575: return(EIO); 576: } 577: paddr += (DBSIZE << PGSHIFT); 578: } 579: return(0); /* filled disk minor dev */ 580: } 581: #endif HK_DUMP 582: 583: #ifdef UCB_ECC 584: #define exadr(x,y) (((long)(x) << 16) | (unsigned)(y)) 585: 586: /* 587: * Correct an ECC error and restart the i/o to complete 588: * the transfer if necessary. This is quite complicated because 589: * the transfer may be going to an odd memory address base 590: * and/or across a page boundary. 591: */ 592: hkecc(bp, flag) 593: register struct buf *bp; 594: { 595: register struct hkdevice *hkaddr = HKADDR; 596: ubadr_t addr; 597: int npx, wc; 598: int cn, tn, sn; 599: daddr_t bn; 600: unsigned ndone; 601: int cmd; 602: int unit; 603: 604: #ifdef BADSECT 605: if (flag == CONT) { 606: npx = bp->b_error; 607: ndone = npx * PGSIZE; 608: wc = ((int)(ndone - bp->b_bcount)) / NBPW; 609: } else 610: #endif 611: { 612: wc = hkaddr->hkwc; 613: ndone = (wc * NBPW) + bp->b_bcount; 614: npx = ndone / PGSIZE; 615: } 616: unit = dkunit(bp); 617: bn = dkblock(bp); 618: cn = bp->b_cylin - bn / HK_NSPC; 619: bn += npx; 620: cn += bn / HK_NSPC; 621: sn = bn % HK_NSPC; 622: tn = sn / HK_NSECT; 623: sn %= HK_NSECT; 624: hktab.b_active++; 625: 626: switch (flag) { 627: case ECC: 628: { 629: register byte; 630: int bit; 631: long mask; 632: ubadr_t bb; 633: unsigned o; 634: #ifdef UNIBUS_MAP 635: struct ubmap *ubp; 636: #endif 637: printf("hk%d%c: soft ecc sn %D\n", 638: unit, 'a' + (minor(bp->b_dev) & 07), 639: bp->b_blkno + npx - 1); 640: 641: mask = hkaddr->hkecpt; 642: byte = hkaddr->hkecps - 1; 643: bit = byte & 07; 644: byte >>= 3; 645: mask <<= bit; 646: o = (ndone - PGSIZE) + byte; 647: bb = exadr(bp->b_xmem, bp->b_un.b_addr); 648: bb += o; 649: #ifdef UNIBUS_MAP 650: if (bp->b_flags & (B_MAP|B_UBAREMAP)) { 651: ubp = UBMAP + ((bb >> 13) & 037); 652: bb = exadr(ubp->ub_hi, ubp->ub_lo) + (bb & 017777); 653: } 654: #endif 655: /* 656: * Correct until mask is zero or until end of 657: * sector or transfer, whichever comes first. 658: */ 659: while (byte < PGSIZE && o < bp->b_bcount && mask != 0) { 660: putmemc(bb, getmemc(bb) ^ (int)mask); 661: byte++; 662: o++; 663: bb++; 664: mask >>= 8; 665: } 666: if (wc == 0) 667: return(0); 668: break; 669: } 670: 671: #ifdef BADSECT 672: case BSE: 673: #ifdef HKDEBUG 674: if (hkdebug) 675: printf("hkecc, BSE: bn %D cn %d tn %d sn %d\n", 676: bn, cn, tn, sn); 677: #endif 678: if ((bn = isbad(&hkbad[unit], cn, tn, sn)) < 0) 679: return(0); 680: bp->b_flags |= B_BAD; 681: bp->b_error = npx + 1; 682: bn = (long)hkncyl(unit)*HK_NSPC - HK_NSECT - 1 - bn; 683: cn = bn/HK_NSPC; 684: sn = bn%HK_NSPC; 685: tn = sn/HK_NSECT; 686: sn %= HK_NSECT; 687: #ifdef HKDEBUG 688: if (hkdebug) 689: printf("revector to cn %d tn %d sn %d\n", cn, tn, sn); 690: #endif 691: wc = -(512 / NBPW); 692: break; 693: 694: case CONT: 695: bp->b_flags &= ~B_BAD; 696: if (wc == 0) 697: return(0); 698: #ifdef HKDEBUG 699: if (hkdebug) 700: printf("hkecc, CONT: bn %D cn %d tn %d sn %d\n", 701: bn, cn, tn, sn); 702: #endif 703: break; 704: #endif BADSECT 705: } 706: /* 707: * Have to continue the transfer. Clear the drive 708: * and compute the position where the transfer is to continue. 709: * We have completed npx sectors of the transfer already. 710: */ 711: hkaddr->hkcs1 = HK_CCLR; 712: hkwait(hkaddr); 713: hkaddr->hkcs2 = unit; 714: hkaddr->hkcs1 = hk_type[unit] | HK_DCLR | HK_GO; 715: hkwait(hkaddr); 716: 717: addr = exadr(bp->b_xmem, bp->b_un.b_addr); 718: addr += ndone; 719: hkaddr->hkcyl = cn; 720: hkaddr->hkda = (tn << 8) + sn; 721: hkaddr->hkwc = wc; 722: hkaddr->hkba = (int)addr; 723: cmd = hk_type[unit] | ((hiint(addr) & 3) << 8) | HK_IE | HK_GO; 724: if (bp->b_flags & B_READ) 725: cmd |= HK_READ; 726: else 727: cmd |= HK_WRITE; 728: hkaddr->hkcs1 = cmd; 729: hktab.b_errcnt = 0; /* error has been corrected */ 730: return (1); 731: } 732: #endif UCB_ECC 733: #endif NHK > 0