1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)machdep.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "reg.h" 10: #include "pte.h" 11: #include "psl.h" 12: 13: #include "param.h" 14: #include "systm.h" 15: #include "dir.h" 16: #include "user.h" 17: #include "kernel.h" 18: #include "map.h" 19: #include "vm.h" 20: #include "proc.h" 21: #include "buf.h" 22: #include "reboot.h" 23: #include "conf.h" 24: #include "inode.h" 25: #include "file.h" 26: #include "text.h" 27: #include "clist.h" 28: #include "callout.h" 29: #include "cmap.h" 30: #include "mbuf.h" 31: #include "msgbuf.h" 32: #include "quota.h" 33: 34: #include "frame.h" 35: #include "clock.h" 36: #include "cons.h" 37: #include "cpu.h" 38: #include "mem.h" 39: #include "mtpr.h" 40: #include "rpb.h" 41: #include "ka630.h" 42: #include "../vaxuba/ubavar.h" 43: #include "../vaxuba/ubareg.h" 44: 45: /* 46: * Declare these as initialized data so we can patch them. 47: */ 48: int nswbuf = 0; 49: #ifdef NBUF 50: int nbuf = NBUF; 51: #else 52: int nbuf = 0; 53: #endif 54: #ifdef BUFPAGES 55: int bufpages = BUFPAGES; 56: #else 57: int bufpages = 0; 58: #endif 59: 60: /* 61: * Machine-dependent startup code 62: */ 63: startup(firstaddr) 64: int firstaddr; 65: { 66: register int unixsize; 67: register unsigned i; 68: register struct pte *pte; 69: int mapaddr, j; 70: register caddr_t v; 71: int maxbufs, base, residual; 72: 73: #if VAX630 74: /* 75: * Leave last 5k of phys. memory as console work area. 76: */ 77: if (cpu == VAX_630) 78: maxmem -= 10; 79: #endif 80: /* 81: * Initialize error message buffer (at end of core). 82: */ 83: maxmem -= btoc(sizeof (struct msgbuf)); 84: pte = msgbufmap; 85: for (i = 0; i < btoc(sizeof (struct msgbuf)); i++) 86: *(int *)pte++ = PG_V | PG_KW | (maxmem + i); 87: mtpr(TBIA, 0); 88: 89: /* 90: * Good {morning,afternoon,evening,night}. 91: */ 92: printf(version); 93: printf("real mem = %d\n", ctob(physmem)); 94: 95: /* 96: * Allocate space for system data structures. 97: * The first available real memory address is in "firstaddr". 98: * The first available kernel virtual address is in "v". 99: * As pages of kernel virtual memory are allocated, "v" is incremented. 100: * As pages of memory are allocated and cleared, 101: * "firstaddr" is incremented. 102: * An index into the kernel page table corresponding to the 103: * virtual memory address maintained in "v" is kept in "mapaddr". 104: */ 105: v = (caddr_t)(0x80000000 | (firstaddr * NBPG)); 106: #define valloc(name, type, num) \ 107: (name) = (type *)v; v = (caddr_t)((name)+(num)) 108: #define valloclim(name, type, num, lim) \ 109: (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 110: valloclim(inode, struct inode, ninode, inodeNINODE); 111: valloclim(file, struct file, nfile, fileNFILE); 112: valloclim(proc, struct proc, nproc, procNPROC); 113: valloclim(text, struct text, ntext, textNTEXT); 114: valloc(cfree, struct cblock, nclist); 115: valloc(callout, struct callout, ncallout); 116: valloc(swapmap, struct map, nswapmap = nproc * 2); 117: valloc(argmap, struct map, ARGMAPSIZE); 118: valloc(kernelmap, struct map, nproc); 119: valloc(mbmap, struct map, nmbclusters/4); 120: valloc(namecache, struct namecache, nchsize); 121: #ifdef QUOTA 122: valloclim(quota, struct quota, nquota, quotaNQUOTA); 123: valloclim(dquot, struct dquot, ndquot, dquotNDQUOT); 124: #endif 125: 126: /* 127: * Determine how many buffers to allocate. 128: * Use 10% of memory for the first 2 Meg, 5% of the remaining 129: * memory. Insure a minimum of 16 buffers. 130: * We allocate 1/2 as many swap buffer headers as file i/o buffers. 131: */ 132: if (bufpages == 0) 133: if (physmem < (2 * 1024 * CLSIZE)) 134: bufpages = physmem / 10 / CLSIZE; 135: else 136: bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE; 137: if (nbuf == 0) { 138: nbuf = bufpages / 2; 139: if (nbuf < 16) 140: nbuf = 16; 141: } 142: if (nswbuf == 0) { 143: nswbuf = (nbuf / 2) &~ 1; /* force even */ 144: if (nswbuf > 256) 145: nswbuf = 256; /* sanity */ 146: } 147: valloc(swbuf, struct buf, nswbuf); 148: 149: /* 150: * Now the amount of virtual memory remaining for buffers 151: * can be calculated, estimating needs for the cmap. 152: */ 153: ncmap = (maxmem*NBPG - ((int)v &~ 0x80000000)) / 154: (CLBYTES + sizeof(struct cmap)) + 2; 155: maxbufs = ((SYSPTSIZE * NBPG) - 156: ((int)(v + ncmap * sizeof(struct cmap)) - 0x80000000)) / 157: (MAXBSIZE + sizeof(struct buf)); 158: if (maxbufs < 16) 159: panic("sys pt too small"); 160: if (nbuf > maxbufs) { 161: printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs); 162: nbuf = maxbufs; 163: } 164: if (bufpages > nbuf * (MAXBSIZE / CLBYTES)) 165: bufpages = nbuf * (MAXBSIZE / CLBYTES); 166: valloc(buf, struct buf, nbuf); 167: 168: /* 169: * Allocate space for core map. 170: * Allow space for all of phsical memory minus the amount 171: * dedicated to the system. The amount of physical memory 172: * dedicated to the system is the total virtual memory of 173: * the system thus far, plus core map, buffer pages, 174: * and buffer headers not yet allocated. 175: * Add 2: 1 because the 0th entry is unused, 1 for rounding. 176: */ 177: ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0x80000000)) / 178: (CLBYTES + sizeof(struct cmap)) + 2; 179: valloclim(cmap, struct cmap, ncmap, ecmap); 180: 181: /* 182: * Clear space allocated thus far, and make r/w entries 183: * for the space in the kernel map. 184: */ 185: unixsize = btoc((int)v &~ 0x80000000); 186: while (firstaddr < unixsize) { 187: *(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr; 188: clearseg((unsigned)firstaddr); 189: firstaddr++; 190: } 191: 192: /* 193: * Now allocate buffers proper. They are different than the above 194: * in that they usually occupy more virtual memory than physical. 195: */ 196: v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET); 197: valloc(buffers, char, MAXBSIZE * nbuf); 198: base = bufpages / nbuf; 199: residual = bufpages % nbuf; 200: mapaddr = firstaddr; 201: for (i = 0; i < residual; i++) { 202: for (j = 0; j < (base + 1) * CLSIZE; j++) { 203: *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr; 204: clearseg((unsigned)firstaddr); 205: firstaddr++; 206: } 207: mapaddr += MAXBSIZE / NBPG; 208: } 209: for (i = residual; i < nbuf; i++) { 210: for (j = 0; j < base * CLSIZE; j++) { 211: *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr; 212: clearseg((unsigned)firstaddr); 213: firstaddr++; 214: } 215: mapaddr += MAXBSIZE / NBPG; 216: } 217: 218: unixsize = btoc((int)v &~ 0x80000000); 219: if (firstaddr >= physmem - 8*UPAGES) 220: panic("no memory"); 221: mtpr(TBIA, 0); /* After we just cleared it all! */ 222: 223: /* 224: * Initialize callouts 225: */ 226: callfree = callout; 227: for (i = 1; i < ncallout; i++) 228: callout[i-1].c_next = &callout[i]; 229: 230: /* 231: * Initialize memory allocator and swap 232: * and user page table maps. 233: * 234: * THE USER PAGE TABLE MAP IS CALLED ``kernelmap'' 235: * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME. 236: */ 237: meminit(firstaddr, maxmem); 238: maxmem = freemem; 239: printf("avail mem = %d\n", ctob(maxmem)); 240: printf("using %d buffers containing %d bytes of memory\n", 241: nbuf, bufpages * CLBYTES); 242: rminit(kernelmap, (long)USRPTSIZE, (long)1, 243: "usrpt", nproc); 244: rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE, 245: "mbclusters", nmbclusters/4); 246: 247: /* 248: * Set up CPU-specific registers, cache, etc. 249: */ 250: initcpu(); 251: 252: /* 253: * Configure the system. 254: */ 255: configure(); 256: 257: /* 258: * Clear restart inhibit flags. 259: */ 260: tocons(TXDB_CWSI); 261: tocons(TXDB_CCSI); 262: } 263: 264: #ifdef PGINPROF 265: /* 266: * Return the difference (in microseconds) 267: * between the current time and a previous 268: * time as represented by the arguments. 269: * If there is a pending clock interrupt 270: * which has not been serviced due to high 271: * ipl, return error code. 272: */ 273: vmtime(otime, olbolt, oicr) 274: register int otime, olbolt, oicr; 275: { 276: 277: if (mfpr(ICCS)&ICCS_INT) 278: return(-1); 279: else 280: return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr); 281: } 282: #endif 283: 284: /* 285: * Clear registers on exec 286: */ 287: setregs(entry) 288: u_long entry; 289: { 290: #ifdef notdef 291: register int *rp; 292: 293: /* should pass args to init on the stack */ 294: /* should also fix this code before using it, it's wrong */ 295: /* wanna clear the scb? */ 296: for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];) 297: *rp++ = 0; 298: #endif 299: u.u_ar0[PC] = entry + 2; 300: } 301: 302: /* 303: * Send an interrupt to process. 304: * 305: * Stack is set up to allow sigcode stored 306: * in u. to call routine, followed by chmk 307: * to sigreturn routine below. After sigreturn 308: * resets the signal mask, the stack, the frame 309: * pointer, and the argument pointer, it returns 310: * to the user specified pc, psl. 311: */ 312: sendsig(p, sig, mask) 313: int (*p)(), sig, mask; 314: { 315: register struct sigcontext *scp; 316: register int *regs; 317: register struct sigframe { 318: int sf_signum; 319: int sf_code; 320: struct sigcontext *sf_scp; 321: int (*sf_handler)(); 322: int sf_argcount; 323: struct sigcontext *sf_scpcopy; 324: } *fp; 325: int oonstack; 326: 327: regs = u.u_ar0; 328: oonstack = u.u_onstack; 329: /* 330: * Allocate and validate space for the signal handler 331: * context. Note that if the stack is in P0 space, the 332: * call to grow() is a nop, and the useracc() check 333: * will fail if the process has not already allocated 334: * the space with a `brk'. 335: */ 336: if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) { 337: scp = (struct sigcontext *)u.u_sigsp - 1; 338: u.u_onstack = 1; 339: } else 340: scp = (struct sigcontext *)regs[SP] - 1; 341: fp = (struct sigframe *)scp - 1; 342: if ((int)fp <= USRSTACK - ctob(u.u_ssize)) 343: (void)grow((unsigned)fp); 344: if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) { 345: /* 346: * Process has trashed its stack; give it an illegal 347: * instruction to halt it in its tracks. 348: */ 349: u.u_signal[SIGILL] = SIG_DFL; 350: sig = sigmask(SIGILL); 351: u.u_procp->p_sigignore &= ~sig; 352: u.u_procp->p_sigcatch &= ~sig; 353: u.u_procp->p_sigmask &= ~sig; 354: psignal(u.u_procp, SIGILL); 355: return; 356: } 357: /* 358: * Build the argument list for the signal handler. 359: */ 360: fp->sf_signum = sig; 361: if (sig == SIGILL || sig == SIGFPE) { 362: fp->sf_code = u.u_code; 363: u.u_code = 0; 364: } else 365: fp->sf_code = 0; 366: fp->sf_scp = scp; 367: fp->sf_handler = p; 368: /* 369: * Build the calls argument frame to be used to call sigreturn 370: */ 371: fp->sf_argcount = 1; 372: fp->sf_scpcopy = scp; 373: /* 374: * Build the signal context to be used by sigreturn. 375: */ 376: scp->sc_onstack = oonstack; 377: scp->sc_mask = mask; 378: scp->sc_sp = regs[SP]; 379: scp->sc_fp = regs[FP]; 380: scp->sc_ap = regs[AP]; 381: scp->sc_pc = regs[PC]; 382: scp->sc_ps = regs[PS]; 383: regs[SP] = (int)fp; 384: regs[PS] &= ~(PSL_CM|PSL_FPD); 385: regs[PC] = (int)u.u_pcb.pcb_sigc; 386: return; 387: } 388: 389: /* 390: * System call to cleanup state after a signal 391: * has been taken. Reset signal mask and 392: * stack state from context left by sendsig (above). 393: * Return to previous pc and psl as specified by 394: * context left by sendsig. Check carefully to 395: * make sure that the user has not modified the 396: * psl to gain improper priviledges or to cause 397: * a machine fault. 398: */ 399: sigreturn() 400: { 401: struct a { 402: struct sigcontext *sigcntxp; 403: }; 404: register struct sigcontext *scp; 405: register int *regs = u.u_ar0; 406: 407: scp = ((struct a *)(u.u_ap))->sigcntxp; 408: if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 409: return; 410: if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 || 411: (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) || 412: ((scp->sc_ps & PSL_CM) && 413: (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) { 414: u.u_error = EINVAL; 415: return; 416: } 417: u.u_eosys = JUSTRETURN; 418: u.u_onstack = scp->sc_onstack & 01; 419: u.u_procp->p_sigmask = scp->sc_mask &~ 420: (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP)); 421: regs[FP] = scp->sc_fp; 422: regs[AP] = scp->sc_ap; 423: regs[SP] = scp->sc_sp; 424: regs[PC] = scp->sc_pc; 425: regs[PS] = scp->sc_ps; 426: } 427: 428: /* XXX - BEGIN 4.2 COMPATIBILITY */ 429: /* 430: * Compatibility with 4.2 chmk $139 used by longjmp() 431: */ 432: osigcleanup() 433: { 434: register struct sigcontext *scp; 435: register int *regs = u.u_ar0; 436: 437: scp = (struct sigcontext *)fuword((caddr_t)regs[SP]); 438: if ((int)scp == -1) 439: return; 440: if (useracc((caddr_t)scp, 3 * sizeof (int), B_WRITE) == 0) 441: return; 442: u.u_onstack = scp->sc_onstack & 01; 443: u.u_procp->p_sigmask = scp->sc_mask &~ 444: (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP)); 445: regs[SP] = scp->sc_sp; 446: } 447: /* XXX - END 4.2 COMPATIBILITY */ 448: 449: #ifdef notdef 450: dorti() 451: { 452: struct frame frame; 453: register int sp; 454: register int reg, mask; 455: extern int ipcreg[]; 456: 457: (void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame)); 458: sp = u.u_ar0[FP] + sizeof (frame); 459: u.u_ar0[PC] = frame.fr_savpc; 460: u.u_ar0[FP] = frame.fr_savfp; 461: u.u_ar0[AP] = frame.fr_savap; 462: mask = frame.fr_mask; 463: for (reg = 0; reg <= 11; reg++) { 464: if (mask&1) { 465: u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp); 466: sp += 4; 467: } 468: mask >>= 1; 469: } 470: sp += frame.fr_spa; 471: u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw; 472: if (frame.fr_s) 473: sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff); 474: /* phew, now the rei */ 475: u.u_ar0[PC] = fuword((caddr_t)sp); 476: sp += 4; 477: u.u_ar0[PS] = fuword((caddr_t)sp); 478: sp += 4; 479: u.u_ar0[PS] |= PSL_USERSET; 480: u.u_ar0[PS] &= ~PSL_USERCLR; 481: u.u_ar0[SP] = (int)sp; 482: } 483: #endif 484: 485: /* 486: * Memenable enables the memory controlle corrected data reporting. 487: * This runs at regular intervals, turning on the interrupt. 488: * The interrupt is turned off, per memory controller, when error 489: * reporting occurs. Thus we report at most once per memintvl. 490: */ 491: int memintvl = MEMINTVL; 492: 493: memenable() 494: { 495: register struct mcr *mcr; 496: register int m; 497: 498: #if VAX630 499: if (cpu == VAX_630) 500: return; 501: #endif 502: #ifdef VAX8600 503: if (cpu == VAX_8600) { 504: M8600_ENA; 505: } else 506: #endif 507: for (m = 0; m < nmcr; m++) { 508: mcr = mcraddr[m]; 509: switch (mcrtype[m]) { 510: #if VAX780 511: case M780C: 512: M780C_ENA(mcr); 513: break; 514: case M780EL: 515: M780EL_ENA(mcr); 516: break; 517: case M780EU: 518: M780EU_ENA(mcr); 519: break; 520: #endif 521: #if VAX750 522: case M750: 523: M750_ENA(mcr); 524: break; 525: #endif 526: #if VAX730 527: case M730: 528: M730_ENA(mcr); 529: break; 530: #endif 531: } 532: } 533: if (memintvl > 0) 534: timeout(memenable, (caddr_t)0, memintvl*hz); 535: } 536: 537: /* 538: * Memerr is the interrupt routine for corrected read data 539: * interrupts. It looks to see which memory controllers have 540: * unreported errors, reports them, and disables further 541: * reporting for a time on those controller. 542: */ 543: memerr() 544: { 545: #ifdef VAX8600 546: register int reg11; /* known to be r11 below */ 547: #endif 548: register struct mcr *mcr; 549: register int m; 550: 551: #if VAX630 552: if (cpu == VAX_630) 553: return; 554: #endif 555: #ifdef VAX8600 556: if (cpu == VAX_8600) { 557: int mdecc, mear, mstat1, mstat2, array; 558: 559: /* 560: * Scratchpad registers in the Ebox must be read by 561: * storing their ID number in ESPA and then immediately 562: * reading ESPD's contents with no other intervening 563: * machine instructions! 564: * 565: * The asm's below have a number of constants which 566: * are defined correctly in mem.h and mtpr.h. 567: */ 568: #ifdef lint 569: reg11 = 0; 570: #else 571: asm("mtpr $0x27,$0x4e; mfpr $0x4f,r11"); 572: #endif 573: mdecc = reg11; /* must acknowledge interrupt? */ 574: if (M8600_MEMERR(mdecc)) { 575: asm("mtpr $0x2a,$0x4e; mfpr $0x4f,r11"); 576: mear = reg11; 577: asm("mtpr $0x25,$0x4e; mfpr $0x4f,r11"); 578: mstat1 = reg11; 579: asm("mtpr $0x26,$0x4e; mfpr $0x4f,r11"); 580: mstat2 = reg11; 581: array = M8600_ARRAY(mear); 582: 583: printf("mcr0: ecc error, addr %x (array %d) syn %x\n", 584: M8600_ADDR(mear), array, M8600_SYN(mdecc)); 585: printf("\tMSTAT1 = %b\n\tMSTAT2 = %b\n", 586: mstat1, M8600_MSTAT1_BITS, 587: mstat2, M8600_MSTAT2_BITS); 588: M8600_INH; 589: } 590: } else 591: #endif 592: for (m = 0; m < nmcr; m++) { 593: mcr = mcraddr[m]; 594: switch (mcrtype[m]) { 595: #if VAX780 596: case M780C: 597: if (M780C_ERR(mcr)) { 598: printf("mcr%d: soft ecc addr %x syn %x\n", 599: m, M780C_ADDR(mcr), M780C_SYN(mcr)); 600: #ifdef TRENDATA 601: memlog(m, mcr); 602: #endif 603: M780C_INH(mcr); 604: } 605: break; 606: 607: case M780EL: 608: if (M780EL_ERR(mcr)) { 609: printf("mcr%d: soft ecc addr %x syn %x\n", 610: m, M780EL_ADDR(mcr), M780EL_SYN(mcr)); 611: M780EL_INH(mcr); 612: } 613: break; 614: 615: case M780EU: 616: if (M780EU_ERR(mcr)) { 617: printf("mcr%d: soft ecc addr %x syn %x\n", 618: m, M780EU_ADDR(mcr), M780EU_SYN(mcr)); 619: M780EU_INH(mcr); 620: } 621: break; 622: #endif 623: #if VAX750 624: case M750: 625: if (M750_ERR(mcr)) { 626: struct mcr amcr; 627: amcr.mc_reg[0] = mcr->mc_reg[0]; 628: printf("mcr%d: %s", 629: m, (amcr.mc_reg[0] & M750_UNCORR) ? 630: "hard error" : "soft ecc"); 631: printf(" addr %x syn %x\n", 632: M750_ADDR(&amcr), M750_SYN(&amcr)); 633: M750_INH(mcr); 634: } 635: break; 636: #endif 637: #if VAX730 638: case M730: { 639: struct mcr amcr; 640: 641: /* 642: * Must be careful on the 730 not to use invalid 643: * instructions in I/O space, so make a copy; 644: */ 645: amcr.mc_reg[0] = mcr->mc_reg[0]; 646: amcr.mc_reg[1] = mcr->mc_reg[1]; 647: if (M730_ERR(&amcr)) { 648: printf("mcr%d: %s", 649: m, (amcr.mc_reg[1] & M730_UNCORR) ? 650: "hard error" : "soft ecc"); 651: printf(" addr %x syn %x\n", 652: M730_ADDR(&amcr), M730_SYN(&amcr)); 653: M730_INH(mcr); 654: } 655: break; 656: } 657: #endif 658: } 659: } 660: } 661: 662: #ifdef TRENDATA 663: /* 664: * Figure out what chip to replace on Trendata boards. 665: * Assumes all your memory is Trendata or the non-Trendata 666: * memory never fails.. 667: */ 668: struct { 669: u_char m_syndrome; 670: char m_chip[4]; 671: } memlogtab[] = { 672: 0x01, "C00", 0x02, "C01", 0x04, "C02", 0x08, "C03", 673: 0x10, "C04", 0x19, "L01", 0x1A, "L02", 0x1C, "L04", 674: 0x1F, "L07", 0x20, "C05", 0x38, "L00", 0x3B, "L03", 675: 0x3D, "L05", 0x3E, "L06", 0x40, "C06", 0x49, "L09", 676: 0x4A, "L10", 0x4c, "L12", 0x4F, "L15", 0x51, "L17", 677: 0x52, "L18", 0x54, "L20", 0x57, "L23", 0x58, "L24", 678: 0x5B, "L27", 0x5D, "L29", 0x5E, "L30", 0x68, "L08", 679: 0x6B, "L11", 0x6D, "L13", 0x6E, "L14", 0x70, "L16", 680: 0x73, "L19", 0x75, "L21", 0x76, "L22", 0x79, "L25", 681: 0x7A, "L26", 0x7C, "L28", 0x7F, "L31", 0x80, "C07", 682: 0x89, "U01", 0x8A, "U02", 0x8C, "U04", 0x8F, "U07", 683: 0x91, "U09", 0x92, "U10", 0x94, "U12", 0x97, "U15", 684: 0x98, "U16", 0x9B, "U19", 0x9D, "U21", 0x9E, "U22", 685: 0xA8, "U00", 0xAB, "U03", 0xAD, "U05", 0xAE, "U06", 686: 0xB0, "U08", 0xB3, "U11", 0xB5, "U13", 0xB6, "U14", 687: 0xB9, "U17", 0xBA, "U18", 0xBC, "U20", 0xBF, "U23", 688: 0xC1, "U25", 0xC2, "U26", 0xC4, "U28", 0xC7, "U31", 689: 0xE0, "U24", 0xE3, "U27", 0xE5, "U29", 0xE6, "U30" 690: }; 691: 692: memlog (m, mcr) 693: int m; 694: struct mcr *mcr; 695: { 696: register i; 697: 698: switch (mcrtype[m]) { 699: 700: #if VAX780 701: case M780C: 702: for (i = 0; i < (sizeof (memlogtab) / sizeof (memlogtab[0])); i++) 703: if ((u_char)(M780C_SYN(mcr)) == memlogtab[i].m_syndrome) { 704: printf ( 705: "mcr%d: replace %s chip in %s bank of memory board %d (0-15)\n", 706: m, 707: memlogtab[i].m_chip, 708: (M780C_ADDR(mcr) & 0x8000) ? "upper" : "lower", 709: (M780C_ADDR(mcr) >> 16)); 710: return; 711: } 712: printf ("mcr%d: multiple errors, not traceable\n", m); 713: break; 714: #endif 715: } 716: } 717: #endif 718: 719: /* 720: * Invalidate single all pte's in a cluster 721: */ 722: tbiscl(v) 723: unsigned v; 724: { 725: register caddr_t addr; /* must be first reg var */ 726: register int i; 727: 728: asm(".set TBIS,58"); 729: addr = ptob(v); 730: for (i = 0; i < CLSIZE; i++) { 731: #ifdef lint 732: mtpr(TBIS, addr); 733: #else 734: asm("mtpr r11,$TBIS"); 735: #endif 736: addr += NBPG; 737: } 738: } 739: 740: int waittime = -1; 741: 742: boot(paniced, arghowto) 743: int paniced, arghowto; 744: { 745: register int howto; /* r11 == how to boot */ 746: register int devtype; /* r10 == major of root dev */ 747: 748: #ifdef lint 749: howto = 0; devtype = 0; 750: printf("howto %d, devtype %d\n", arghowto, devtype); 751: #endif 752: howto = arghowto; 753: if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) { 754: waittime = 0; 755: (void) splnet(); 756: printf("syncing disks... "); 757: /* 758: * Release inodes held by texts before update. 759: */ 760: xumount(NODEV); 761: update(); 762: { register struct buf *bp; 763: int iter, nbusy; 764: 765: for (iter = 0; iter < 20; iter++) { 766: nbusy = 0; 767: for (bp = &buf[nbuf]; --bp >= buf; ) 768: if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) 769: nbusy++; 770: if (nbusy == 0) 771: break; 772: printf("%d ", nbusy); 773: DELAY(40000 * iter); 774: } 775: } 776: printf("done\n"); 777: /* 778: * If we've been adjusting the clock, the todr 779: * will be out of synch; adjust it now. 780: */ 781: resettodr(); 782: } 783: splx(0x1f); /* extreme priority */ 784: devtype = major(rootdev); 785: if (howto&RB_HALT) { 786: printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n"); 787: mtpr(IPL, 0x1f); 788: for (;;) 789: ; 790: } else { 791: if (paniced == RB_PANIC) { 792: doadump(); /* TXDB_BOOT's itself */ 793: /*NOTREACHED*/ 794: } 795: tocons(TXDB_BOOT); 796: } 797: #if defined(VAX750) || defined(VAX730) || defined(VAX630) 798: if (cpu == VAX_750 || cpu == VAX_730 || cpu == VAX_630) 799: { asm("movl r11,r5"); } /* boot flags go in r5 */ 800: #endif 801: for (;;) 802: asm("halt"); 803: /*NOTREACHED*/ 804: } 805: 806: tocons(c) 807: { 808: register oldmask; 809: 810: while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0) 811: continue; 812: 813: switch (cpu) { 814: 815: #if VAX780 || VAX750 || VAX730 || VAX630 816: case VAX_780: 817: case VAX_750: 818: case VAX_730: 819: case VAX_630: 820: c |= TXDB_CONS; 821: break; 822: #endif 823: 824: #if VAX8600 825: case VAX_8600: 826: mtpr(TXCS, TXCS_LCONS | TXCS_WMASK); 827: while ((mfpr(TXCS) & TXCS_RDY) == 0) 828: continue; 829: break; 830: #endif 831: } 832: 833: mtpr(TXDB, c); 834: 835: #if VAX8600 836: switch (cpu) { 837: 838: case VAX_8600: 839: while ((mfpr(TXCS) & TXCS_RDY) == 0) 840: continue; 841: mtpr(TXCS, oldmask | TXCS_WMASK); 842: break; 843: } 844: #endif 845: } 846: 847: int dumpmag = 0x8fca0101; /* magic number for savecore */ 848: int dumpsize = 0; /* also for savecore */ 849: /* 850: * Doadump comes here after turning off memory management and 851: * getting on the dump stack, either when called above, or by 852: * the auto-restart code. 853: */ 854: dumpsys() 855: { 856: 857: rpb.rp_flag = 1; 858: #ifdef notdef 859: if ((minor(dumpdev)&07) != 1) 860: return; 861: #endif 862: dumpsize = physmem; 863: printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo); 864: printf("dump "); 865: switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) { 866: 867: case ENXIO: 868: printf("device bad\n"); 869: break; 870: 871: case EFAULT: 872: printf("device not ready\n"); 873: break; 874: 875: case EINVAL: 876: printf("area improper\n"); 877: break; 878: 879: case EIO: 880: printf("i/o error"); 881: break; 882: 883: default: 884: printf("succeeded"); 885: break; 886: } 887: } 888: 889: /* 890: * Machine check error recovery code. 891: * Print out the machine check frame and then give up. 892: */ 893: #if VAX8600 894: #define NMC8600 7 895: char *mc8600[] = { 896: "unkn type", "fbox error", "ebox error", "ibox error", 897: "mbox error", "tbuf error", "mbox 1D error" 898: }; 899: /* codes for above */ 900: #define MC_FBOX 1 901: #define MC_EBOX 2 902: #define MC_IBOX 3 903: #define MC_MBOX 4 904: #define MC_TBUF 5 905: #define MC_MBOX1D 6 906: 907: /* error bits */ 908: #define MBOX_FE 0x8000 /* Mbox fatal error */ 909: #define FBOX_SERV 0x10000000 /* Fbox service error */ 910: #define IBOX_ERR 0x2000 /* Ibox error */ 911: #define EBOX_ERR 0x1e00 /* Ebox error */ 912: #define MBOX_1D 0x81d0000 /* Mbox 1D error */ 913: #define EDP_PE 0x200 914: #endif 915: 916: #if defined(VAX780) || defined(VAX750) 917: char *mc780[] = { 918: "cp read", "ctrl str par", "cp tbuf par", "cp cache par", 919: "cp rdtimo", "cp rds", "ucode lost", 0, 920: 0, 0, "ib tbuf par", 0, 921: "ib rds", "ib rd timo", 0, "ib cache par" 922: }; 923: #define MC750_TBERR 2 /* type code of cp tbuf par */ 924: #define MC750_TBPAR 4 /* tbuf par bit in mcesr */ 925: #endif 926: 927: #if VAX730 928: #define NMC730 12 929: char *mc730[] = { 930: "tb par", "bad retry", "bad intr id", "cant write ptem", 931: "unkn mcr err", "iib rd err", "nxm ref", "cp rds", 932: "unalgn ioref", "nonlw ioref", "bad ioaddr", "unalgn ubaddr", 933: }; 934: #endif 935: #if VAX630 936: #define NMC630 10 937: extern struct ka630cpu ka630cpu; 938: char *mc630[] = { 939: 0, "immcr (fsd)", "immcr (ssd)", "fpu err 0", 940: "fpu err 7", "mmu st(tb)", "mmu st(m=0)", "pte in p0", 941: "pte in p1", "un intr id", 942: }; 943: #endif 944: 945: /* 946: * Frame for each cpu 947: */ 948: struct mc780frame { 949: int mc8_bcnt; /* byte count == 0x28 */ 950: int mc8_summary; /* summary parameter (as above) */ 951: int mc8_cpues; /* cpu error status */ 952: int mc8_upc; /* micro pc */ 953: int mc8_vaviba; /* va/viba register */ 954: int mc8_dreg; /* d register */ 955: int mc8_tber0; /* tbuf error reg 0 */ 956: int mc8_tber1; /* tbuf error reg 1 */ 957: int mc8_timo; /* timeout address divided by 4 */ 958: int mc8_parity; /* parity */ 959: int mc8_sbier; /* sbi error register */ 960: int mc8_pc; /* trapped pc */ 961: int mc8_psl; /* trapped psl */ 962: }; 963: struct mc750frame { 964: int mc5_bcnt; /* byte count == 0x28 */ 965: int mc5_summary; /* summary parameter (as above) */ 966: int mc5_va; /* virtual address register */ 967: int mc5_errpc; /* error pc */ 968: int mc5_mdr; 969: int mc5_svmode; /* saved mode register */ 970: int mc5_rdtimo; /* read lock timeout */ 971: int mc5_tbgpar; /* tb group parity error register */ 972: int mc5_cacherr; /* cache error register */ 973: int mc5_buserr; /* bus error register */ 974: int mc5_mcesr; /* machine check status register */ 975: int mc5_pc; /* trapped pc */ 976: int mc5_psl; /* trapped psl */ 977: }; 978: struct mc730frame { 979: int mc3_bcnt; /* byte count == 0xc */ 980: int mc3_summary; /* summary parameter */ 981: int mc3_parm[2]; /* parameter 1 and 2 */ 982: int mc3_pc; /* trapped pc */ 983: int mc3_psl; /* trapped psl */ 984: }; 985: struct mc630frame { 986: int mc63_bcnt; /* byte count == 0xc */ 987: int mc63_summary; /* summary parameter */ 988: int mc63_mrvaddr; /* most recent vad */ 989: int mc63_istate; /* internal state */ 990: int mc63_pc; /* trapped pc */ 991: int mc63_psl; /* trapped psl */ 992: }; 993: struct mc8600frame { 994: int mc6_bcnt; /* byte count == 0x58 */ 995: int mc6_ehmsts; 996: int mc6_evmqsav; 997: int mc6_ebcs; 998: int mc6_edpsr; 999: int mc6_cslint; 1000: int mc6_ibesr; 1001: int mc6_ebxwd1; 1002: int mc6_ebxwd2; 1003: int mc6_ivasav; 1004: int mc6_vibasav; 1005: int mc6_esasav; 1006: int mc6_isasav; 1007: int mc6_cpc; 1008: int mc6_mstat1; 1009: int mc6_mstat2; 1010: int mc6_mdecc; 1011: int mc6_merg; 1012: int mc6_cshctl; 1013: int mc6_mear; 1014: int mc6_medr; 1015: int mc6_accs; 1016: int mc6_cses; 1017: int mc6_pc; /* trapped pc */ 1018: int mc6_psl; /* trapped psl */ 1019: }; 1020: 1021: machinecheck(cmcf) 1022: caddr_t cmcf; 1023: { 1024: register u_int type = ((struct mc780frame *)cmcf)->mc8_summary; 1025: 1026: printf("machine check %x: ", type); 1027: switch (cpu) { 1028: #if VAX8600 1029: case VAX_8600: { 1030: register struct mc8600frame *mcf = (struct mc8600frame *)cmcf; 1031: 1032: if (mcf->mc6_ebcs & MBOX_FE) 1033: mcf->mc6_ehmsts |= MC_MBOX; 1034: else if (mcf->mc6_ehmsts & FBOX_SERV) 1035: mcf->mc6_ehmsts |= MC_FBOX; 1036: else if (mcf->mc6_ebcs & EBOX_ERR) { 1037: if (mcf->mc6_ebcs & EDP_PE) 1038: mcf->mc6_ehmsts |= MC_MBOX; 1039: else 1040: mcf->mc6_ehmsts |= MC_EBOX; 1041: } else if (mcf->mc6_ehmsts & IBOX_ERR) 1042: mcf->mc6_ehmsts |= MC_IBOX; 1043: else if (mcf->mc6_mstat1 & M8600_TB_ERR) 1044: mcf->mc6_ehmsts |= MC_TBUF; 1045: else if ((mcf->mc6_cslint & MBOX_1D) == MBOX_1D) 1046: mcf->mc6_ehmsts |= MC_MBOX1D; 1047: 1048: type = mcf->mc6_ehmsts & 0x7; 1049: if (type < NMC8600) 1050: printf("machine check %x: %s", type, mc8600[type]); 1051: printf("\n"); 1052: printf("\tehm.sts %x evmqsav %x ebcs %x edpsr %x cslint %x\n", 1053: mcf->mc6_ehmsts, mcf->mc6_evmqsav, mcf->mc6_ebcs, 1054: mcf->mc6_edpsr, mcf->mc6_cslint); 1055: printf("\tibesr %x ebxwd %x %x ivasav %x vibasav %x\n", 1056: mcf->mc6_ibesr, mcf->mc6_ebxwd1, mcf->mc6_ebxwd2, 1057: mcf->mc6_ivasav, mcf->mc6_vibasav); 1058: printf("\tesasav %x isasav %x cpc %x mstat %x %x mdecc %x\n", 1059: mcf->mc6_esasav, mcf->mc6_isasav, mcf->mc6_cpc, 1060: mcf->mc6_mstat1, mcf->mc6_mstat2, mcf->mc6_mdecc); 1061: printf("\tmerg %x cshctl %x mear %x medr %x accs %x cses %x\n", 1062: mcf->mc6_merg, mcf->mc6_cshctl, mcf->mc6_mear, 1063: mcf->mc6_medr, mcf->mc6_accs, mcf->mc6_cses); 1064: printf("\tpc %x psl %x\n", mcf->mc6_pc, mcf->mc6_psl); 1065: mtpr(EHSR, 0); 1066: break; 1067: }; 1068: #endif 1069: #if VAX780 1070: case VAX_780: { 1071: register struct mc780frame *mcf = (struct mc780frame *)cmcf; 1072: 1073: register int sbifs; 1074: printf("%s%s\n", mc780[type&0xf], 1075: (type&0xf0) ? " abort" : " fault"); 1076: printf("\tcpues %x upc %x va/viba %x dreg %x tber %x %x\n", 1077: mcf->mc8_cpues, mcf->mc8_upc, mcf->mc8_vaviba, 1078: mcf->mc8_dreg, mcf->mc8_tber0, mcf->mc8_tber1); 1079: sbifs = mfpr(SBIFS); 1080: printf("\ttimo %x parity %x sbier %x pc %x psl %x sbifs %x\n", 1081: mcf->mc8_timo*4, mcf->mc8_parity, mcf->mc8_sbier, 1082: mcf->mc8_pc, mcf->mc8_psl, sbifs); 1083: /* THE FUNNY BITS IN THE FOLLOWING ARE FROM THE ``BLACK */ 1084: /* BOOK'' AND SHOULD BE PUT IN AN ``sbi.h'' */ 1085: mtpr(SBIFS, sbifs &~ 0x2000000); 1086: mtpr(SBIER, mfpr(SBIER) | 0x70c0); 1087: break; 1088: } 1089: #endif 1090: #if VAX750 1091: case VAX_750: { 1092: register struct mc750frame *mcf = (struct mc750frame *)cmcf; 1093: 1094: int mcsr = mfpr(MCSR); 1095: printf("%s%s\n", mc780[type&0xf], 1096: (type&0xf0) ? " abort" : " fault"); 1097: mtpr(TBIA, 0); 1098: mtpr(MCESR, 0xf); 1099: printf("\tva %x errpc %x mdr %x smr %x rdtimo %x tbgpar %x cacherr %x\n", 1100: mcf->mc5_va, mcf->mc5_errpc, mcf->mc5_mdr, mcf->mc5_svmode, 1101: mcf->mc5_rdtimo, mcf->mc5_tbgpar, mcf->mc5_cacherr); 1102: printf("\tbuserr %x mcesr %x pc %x psl %x mcsr %x\n", 1103: mcf->mc5_buserr, mcf->mc5_mcesr, mcf->mc5_pc, mcf->mc5_psl, 1104: mcsr); 1105: if (type == MC750_TBERR && (mcf->mc5_mcesr&0xe) == MC750_TBPAR){ 1106: printf("tbuf par: flushing and returning\n"); 1107: return; 1108: } 1109: break; 1110: } 1111: #endif 1112: #if VAX730 1113: case VAX_730: { 1114: register struct mc730frame *mcf = (struct mc730frame *)cmcf; 1115: 1116: if (type < NMC730) 1117: printf("%s", mc730[type]); 1118: printf("\n"); 1119: printf("params %x,%x pc %x psl %x mcesr %x\n", 1120: mcf->mc3_parm[0], mcf->mc3_parm[1], 1121: mcf->mc3_pc, mcf->mc3_psl, mfpr(MCESR)); 1122: mtpr(MCESR, 0xf); 1123: break; 1124: } 1125: #endif 1126: #if VAX630 1127: case VAX_630: { 1128: register struct ka630cpu *ka630addr = &ka630cpu; 1129: register struct mc630frame *mcf = (struct mc630frame *)cmcf; 1130: printf("vap %x istate %x pc %x psl %x\n", 1131: mcf->mc63_mrvaddr, mcf->mc63_istate, 1132: mcf->mc63_pc, mcf->mc63_psl); 1133: if (ka630addr->ka630_mser & KA630MSER_MERR) { 1134: printf("mser=0x%x ",ka630addr->ka630_mser); 1135: if (ka630addr->ka630_mser & KA630MSER_CPUER) 1136: printf("page=%d",ka630addr->ka630_cear); 1137: if (ka630addr->ka630_mser & KA630MSER_DQPE) 1138: printf("page=%d",ka630addr->ka630_dear); 1139: printf("\n"); 1140: } 1141: break; 1142: } 1143: #endif 1144: } 1145: memerr(); 1146: panic("mchk"); 1147: } 1148: 1149: /* 1150: * Return the best possible estimate of the time in the timeval 1151: * to which tvp points. We do this by reading the interval count 1152: * register to determine the time remaining to the next clock tick. 1153: * We must compensate for wraparound which is not yet reflected in the time 1154: * (which happens when the ICR hits 0 and wraps after the splhigh(), 1155: * but before the mfpr(ICR)). Also check that this time is no less than 1156: * any previously-reported time, which could happen around the time 1157: * of a clock adjustment. Just for fun, we guarantee that the time 1158: * will be greater than the value obtained by a previous call. 1159: */ 1160: microtime(tvp) 1161: register struct timeval *tvp; 1162: { 1163: int s = splhigh(); 1164: static struct timeval lasttime; 1165: register long t; 1166: 1167: *tvp = time; 1168: t = mfpr(ICR); 1169: if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT)) 1170: t += tick; 1171: tvp->tv_usec += tick + t; 1172: if (tvp->tv_usec > 1000000) { 1173: tvp->tv_sec++; 1174: tvp->tv_usec -= 1000000; 1175: } 1176: if (tvp->tv_sec == lasttime.tv_sec && 1177: tvp->tv_usec <= lasttime.tv_usec && 1178: (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) { 1179: tvp->tv_sec++; 1180: tvp->tv_usec -= 1000000; 1181: } 1182: lasttime = *tvp; 1183: splx(s); 1184: } 1185: 1186: physstrat(bp, strat, prio) 1187: struct buf *bp; 1188: int (*strat)(), prio; 1189: { 1190: int s; 1191: 1192: (*strat)(bp); 1193: /* pageout daemon doesn't wait for pushed pages */ 1194: if (bp->b_flags & B_DIRTY) 1195: return; 1196: s = splbio(); 1197: while ((bp->b_flags & B_DONE) == 0) 1198: sleep((caddr_t)bp, prio); 1199: splx(s); 1200: } 1201: 1202: initcpu() 1203: { 1204: /* 1205: * Enable cache. 1206: */ 1207: switch (cpu) { 1208: 1209: #if VAX780 1210: case VAX_780: 1211: mtpr(SBIMT, 0x200000); 1212: break; 1213: #endif 1214: #if VAX750 1215: case VAX_750: 1216: mtpr(CADR, 0); 1217: break; 1218: #endif 1219: #if VAX8600 1220: case VAX_8600: 1221: mtpr(CSWP, 3); 1222: break; 1223: #endif 1224: default: 1225: break; 1226: } 1227: 1228: /* 1229: * Enable floating point accelerator if it exists 1230: * and has control register. 1231: */ 1232: switch(cpu) { 1233: 1234: #if VAX8600 || VAX780 1235: case VAX_780: 1236: case VAX_8600: 1237: if ((mfpr(ACCS) & 0xff) != 0) { 1238: printf("Enabling FPA\n"); 1239: mtpr(ACCS, 0x8000); 1240: } 1241: #endif 1242: default: 1243: break; 1244: } 1245: }