1: # 2: /* 3: */ 4: 5: #include "../param.h" 6: #include "../user.h" 7: #include "../proc.h" 8: #include "../text.h" 9: #include "../systm.h" 10: #include "../file.h" 11: #include "../inode.h" 12: #include "../buf.h" 13: 14: /* 15: * Give up the processor till a wakeup occurs 16: * on chan, at which time the process 17: * enters the scheduling queue at priority pri. 18: * The most important effect of pri is that when 19: * pri<0 a signal cannot disturb the sleep; 20: * if pri>=0 signals will be processed. 21: * Callers of this routine must be prepared for 22: * premature return, and check that the reason for 23: * sleeping has gone away. 24: */ 25: sleep(chan, pri) 26: { 27: register *rp, s; 28: 29: s = PS->integ; 30: rp = u.u_procp; 31: if(pri >= 0) { 32: if(issig()) 33: goto psig; 34: spl6(); 35: rp->p_wchan = chan; 36: rp->p_stat = SWAIT; 37: rp->p_pri = pri; 38: spl0(); 39: if(runin != 0) { 40: runin = 0; 41: wakeup(&runin); 42: } 43: swtch(); 44: if(issig()) 45: goto psig; 46: } else { 47: spl6(); 48: rp->p_wchan = chan; 49: rp->p_stat = SSLEEP; 50: rp->p_pri = pri; 51: spl0(); 52: swtch(); 53: } 54: PS->integ = s; 55: return; 56: 57: /* 58: * If priority was low (>=0) and 59: * there has been a signal, 60: * execute non-local goto to 61: * the qsav location. 62: * (see trap1/trap.c) 63: */ 64: psig: 65: aretu(u.u_qsav); 66: } 67: 68: /* 69: * Wake up all processes sleeping on chan. 70: */ 71: wakeup(chan) 72: { 73: register struct proc *p; 74: register c, i; 75: 76: c = chan; 77: p = &proc[0]; 78: i = NPROC; 79: do { 80: if(p->p_wchan == c) { 81: setrun(p); 82: } 83: p++; 84: } while(--i); 85: } 86: 87: /* 88: * Set the process running; 89: * arrange for it to be swapped in if necessary. 90: */ 91: setrun(p) 92: { 93: register struct proc *rp; 94: 95: rp = p; 96: rp->p_wchan = 0; 97: rp->p_stat = SRUN; 98: if(rp->p_pri < curpri) 99: runrun++; 100: if(runout != 0 && (rp->p_flag&SLOAD) == 0) { 101: runout = 0; 102: wakeup(&runout); 103: } 104: } 105: 106: /* 107: * Set user priority. 108: * The rescheduling flag (runrun) 109: * is set if the priority is higher 110: * than the currently running process. 111: */ 112: setpri(up) 113: { 114: register *pp, p; 115: 116: pp = up; 117: p = (pp->p_cpu & 0377)/16; 118: p =+ PUSER + pp->p_nice; 119: if(p > 127) 120: p = 127; 121: if(p > curpri) 122: runrun++; 123: pp->p_pri = p; 124: } 125: 126: /* 127: * The main loop of the scheduling (swapping) 128: * process. 129: * The basic idea is: 130: * see if anyone wants to be swapped in; 131: * swap out processes until there is room; 132: * swap him in; 133: * repeat. 134: * Although it is not remarkably evident, the basic 135: * synchronization here is on the runin flag, which is 136: * slept on and is set once per second by the clock routine. 137: * Core shuffling therefore takes place once per second. 138: * 139: * panic: swap error -- IO error while swapping. 140: * this is the one panic that should be 141: * handled in a less drastic way. Its 142: * very hard. 143: */ 144: sched() 145: { 146: struct proc *p1; 147: register struct proc *rp; 148: register a, n; 149: 150: /* 151: * find user to swap in 152: * of users ready, select one out longest 153: */ 154: 155: goto loop; 156: 157: sloop: 158: runin++; 159: sleep(&runin, PSWP); 160: 161: loop: 162: spl6(); 163: n = -1; 164: for(rp = &proc[0]; rp < &proc[NPROC]; rp++) 165: if(rp->p_stat==SRUN && (rp->p_flag&SLOAD)==0 && 166: rp->p_time > n) { 167: p1 = rp; 168: n = rp->p_time; 169: } 170: if(n == -1) { 171: runout++; 172: sleep(&runout, PSWP); 173: goto loop; 174: } 175: 176: /* 177: * see if there is core for that process 178: */ 179: 180: spl0(); 181: rp = p1; 182: a = rp->p_size; 183: if((rp=rp->p_textp) != NULL) 184: if(rp->x_ccount == 0) 185: a =+ rp->x_size; 186: if((a=malloc(coremap, a)) != NULL) 187: goto found2; 188: 189: /* 190: * none found, 191: * look around for easy core 192: */ 193: 194: spl6(); 195: for(rp = &proc[0]; rp < &proc[NPROC]; rp++) 196: if((rp->p_flag&(SSYS|SLOCK|SLOAD))==SLOAD && 197: (rp->p_stat == SWAIT || rp->p_stat==SSTOP)) 198: goto found1; 199: 200: /* 201: * no easy core, 202: * if this process is deserving, 203: * look around for 204: * oldest process in core 205: */ 206: 207: if(n < 3) 208: goto sloop; 209: n = -1; 210: for(rp = &proc[0]; rp < &proc[NPROC]; rp++) 211: if((rp->p_flag&(SSYS|SLOCK|SLOAD))==SLOAD && 212: (rp->p_stat==SRUN || rp->p_stat==SSLEEP) && 213: rp->p_time > n) { 214: p1 = rp; 215: n = rp->p_time; 216: } 217: if(n < 2) 218: goto sloop; 219: rp = p1; 220: 221: /* 222: * swap user out 223: */ 224: 225: found1: 226: spl0(); 227: rp->p_flag =& ~SLOAD; 228: xswap(rp, 1, 0); 229: goto loop; 230: 231: /* 232: * swap user in 233: */ 234: 235: found2: 236: if((rp=p1->p_textp) != NULL) { 237: if(rp->x_ccount == 0) { 238: if(swap(rp->x_daddr, a, rp->x_size, B_READ)) 239: goto swaper; 240: rp->x_caddr = a; 241: a =+ rp->x_size; 242: } 243: rp->x_ccount++; 244: } 245: rp = p1; 246: if(swap(rp->p_addr, a, rp->p_size, B_READ)) 247: goto swaper; 248: mfree(swapmap, (rp->p_size+7)/8, rp->p_addr); 249: rp->p_addr = a; 250: rp->p_flag =| SLOAD; 251: rp->p_time = 0; 252: goto loop; 253: 254: swaper: 255: panic("swap error"); 256: } 257: 258: /* 259: * This routine is called to reschedule the CPU. 260: * if the calling process is not in RUN state, 261: * arrangements for it to restart must have 262: * been made elsewhere, usually by calling via sleep. 263: */ 264: swtch() 265: { 266: static struct proc *p; 267: register i, n; 268: register struct proc *rp; 269: 270: if(p == NULL) 271: p = &proc[0]; 272: /* 273: * Remember stack of caller 274: */ 275: savu(u.u_rsav); 276: /* 277: * Switch to scheduler's stack 278: */ 279: retu(proc[0].p_addr); 280: 281: loop: 282: runrun = 0; 283: rp = p; 284: p = NULL; 285: n = 128; 286: /* 287: * Search for highest-priority runnable process 288: */ 289: i = NPROC; 290: do { 291: rp++; 292: if(rp >= &proc[NPROC]) 293: rp = &proc[0]; 294: if(rp->p_stat==SRUN && (rp->p_flag&SLOAD)!=0) { 295: if(rp->p_pri < n) { 296: p = rp; 297: n = rp->p_pri; 298: } 299: } 300: } while(--i); 301: /* 302: * If no process is runnable, idle. 303: */ 304: if(p == NULL) { 305: p = rp; 306: idle(); 307: goto loop; 308: } 309: rp = p; 310: curpri = n; 311: /* 312: * Switch to stack of the new process and set up 313: * his segmentation registers. 314: */ 315: retu(rp->p_addr); 316: sureg(); 317: /* 318: * If the new process paused because it was 319: * swapped out, set the stack level to the last call 320: * to savu(u_ssav). This means that the return 321: * which is executed immediately after the call to aretu 322: * actually returns from the last routine which did 323: * the savu. 324: * 325: * You are not expected to understand this. 326: */ 327: if(rp->p_flag&SSWAP) { 328: rp->p_flag =& ~SSWAP; 329: aretu(u.u_ssav); 330: } 331: /* 332: * The value returned here has many subtle implications. 333: * See the newproc comments. 334: */ 335: return(1); 336: } 337: 338: /* 339: * Create a new process-- the internal version of 340: * sys fork. 341: * It returns 1 in the new process. 342: * How this happens is rather hard to understand. 343: * The essential fact is that the new process is created 344: * in such a way that appears to have started executing 345: * in the same call to newproc as the parent; 346: * but in fact the code that runs is that of swtch. 347: * The subtle implication of the returned value of swtch 348: * (see above) is that this is the value that newproc's 349: * caller in the new process sees. 350: */ 351: newproc() 352: { 353: int a1, a2; 354: struct proc *p, *up; 355: register struct proc *rpp; 356: register *rip, n; 357: 358: p = NULL; 359: /* 360: * First, just locate a slot for a process 361: * and copy the useful info from this process into it. 362: * The panic "cannot happen" because fork has already 363: * checked for the existence of a slot. 364: */ 365: retry: 366: mpid++; 367: if(mpid < 0) { 368: mpid = 0; 369: goto retry; 370: } 371: for(rpp = &proc[0]; rpp < &proc[NPROC]; rpp++) { 372: if(rpp->p_stat == NULL && p==NULL) 373: p = rpp; 374: if (rpp->p_pid==mpid) 375: goto retry; 376: } 377: if ((rpp = p)==NULL) 378: panic("no procs"); 379: 380: /* 381: * make proc entry for new proc 382: */ 383: 384: rip = u.u_procp; 385: up = rip; 386: rpp->p_stat = SRUN; 387: rpp->p_flag = SLOAD; 388: rpp->p_uid = rip->p_uid; 389: rpp->p_ttyp = rip->p_ttyp; 390: rpp->p_nice = rip->p_nice; 391: rpp->p_textp = rip->p_textp; 392: rpp->p_pid = mpid; 393: rpp->p_ppid = rip->p_pid; 394: rpp->p_time = 0; 395: 396: /* 397: * make duplicate entries 398: * where needed 399: */ 400: 401: for(rip = &u.u_ofile[0]; rip < &u.u_ofile[NOFILE];) 402: if((rpp = *rip++) != NULL) 403: rpp->f_count++; 404: if((rpp=up->p_textp) != NULL) { 405: rpp->x_count++; 406: rpp->x_ccount++; 407: } 408: u.u_cdir->i_count++; 409: /* 410: * Partially simulate the environment 411: * of the new process so that when it is actually 412: * created (by copying) it will look right. 413: */ 414: savu(u.u_rsav); 415: rpp = p; 416: u.u_procp = rpp; 417: rip = up; 418: n = rip->p_size; 419: a1 = rip->p_addr; 420: rpp->p_size = n; 421: a2 = malloc(coremap, n); 422: /* 423: * If there is not enough core for the 424: * new process, swap out the current process to generate the 425: * copy. 426: */ 427: if(a2 == NULL) { 428: rip->p_stat = SIDL; 429: rpp->p_addr = a1; 430: savu(u.u_ssav); 431: xswap(rpp, 0, 0); 432: rpp->p_flag =| SSWAP; 433: rip->p_stat = SRUN; 434: } else { 435: /* 436: * There is core, so just copy. 437: */ 438: rpp->p_addr = a2; 439: while(n--) 440: copyseg(a1++, a2++); 441: } 442: u.u_procp = rip; 443: return(0); 444: } 445: 446: /* 447: * Change the size of the data+stack regions of the process. 448: * If the size is shrinking, it's easy-- just release the extra core. 449: * If it's growing, and there is core, just allocate it 450: * and copy the image, taking care to reset registers to account 451: * for the fact that the system's stack has moved. 452: * If there is no core, arrange for the process to be swapped 453: * out after adjusting the size requirement-- when it comes 454: * in, enough core will be allocated. 455: * Because of the ssave and SSWAP flags, control will 456: * resume after the swap in swtch, which executes the return 457: * from this stack level. 458: * 459: * After the expansion, the caller will take care of copying 460: * the user's stack towards or away from the data area. 461: */ 462: expand(newsize) 463: { 464: int i, n; 465: register *p, a1, a2; 466: 467: p = u.u_procp; 468: n = p->p_size; 469: p->p_size = newsize; 470: a1 = p->p_addr; 471: if(n >= newsize) { 472: mfree(coremap, n-newsize, a1+newsize); 473: return; 474: } 475: savu(u.u_rsav); 476: a2 = malloc(coremap, newsize); 477: if(a2 == NULL) { 478: savu(u.u_ssav); 479: xswap(p, 1, n); 480: p->p_flag =| SSWAP; 481: swtch(); 482: /* no return */ 483: } 484: p->p_addr = a2; 485: for(i=0; i<n; i++) 486: copyseg(a1+i, a2++); 487: mfree(coremap, n, a1); 488: retu(p->p_addr); 489: sureg(); 490: }