1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)if_uba.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "systm.h" 13: #include "mbuf.h" 14: #include "map.h" 15: #include "buf.h" 16: #include "cmap.h" 17: #include "vmmac.h" 18: #include "socket.h" 19: #include "syslog.h" 20: 21: #include "../net/if.h" 22: 23: #include "../vax/mtpr.h" 24: #include "if_uba.h" 25: #include "../vaxuba/ubareg.h" 26: #include "../vaxuba/ubavar.h" 27: 28: /* 29: * Routines supporting UNIBUS network interfaces. 30: * 31: * TODO: 32: * Support interfaces using only one BDP statically. 33: */ 34: 35: /* 36: * Init UNIBUS for interface on uban whose headers of size hlen are to 37: * end on a page boundary. We allocate a UNIBUS map register for the page 38: * with the header, and nmr more UNIBUS map registers for i/o on the adapter, 39: * doing this once for each read and once for each write buffer. We also 40: * allocate page frames in the mbuffer pool for these pages. 41: */ 42: if_ubaminit(ifu, uban, hlen, nmr, ifr, nr, ifw, nw) 43: register struct ifubinfo *ifu; 44: int uban, hlen, nmr, nr, nw; 45: register struct ifrw *ifr; 46: register struct ifxmt *ifw; 47: { 48: register caddr_t p; 49: caddr_t cp; 50: int i, ncl, off; 51: 52: if (hlen) 53: off = CLBYTES - hlen; 54: else 55: off = 0; 56: ncl = clrnd(nmr) / CLSIZE; 57: if (hlen) 58: ncl++; 59: if (ifr[0].ifrw_addr) 60: cp = ifr[0].ifrw_addr - off; 61: else { 62: cp = m_clalloc((nr + nw) * ncl, MPG_SPACE, M_DONTWAIT); 63: if (cp == 0) 64: return (0); 65: p = cp; 66: for (i = 0; i < nr; i++) { 67: ifr[i].ifrw_addr = p + off; 68: p += ncl * CLBYTES; 69: } 70: for (i = 0; i < nw; i++) { 71: ifw[i].ifw_base = p; 72: ifw[i].ifw_addr = p + off; 73: p += ncl * CLBYTES; 74: } 75: ifu->iff_hlen = hlen; 76: ifu->iff_uban = uban; 77: ifu->iff_uba = uba_hd[uban].uh_uba; 78: } 79: for (i = 0; i < nr; i++) 80: if (if_ubaalloc(ifu, &ifr[i], nmr) == 0) { 81: nr = i; 82: nw = 0; 83: goto bad; 84: } 85: for (i = 0; i < nw; i++) 86: if (if_ubaalloc(ifu, &ifw[i].ifrw, nmr) == 0) { 87: nw = i; 88: goto bad; 89: } 90: while (--nw >= 0) { 91: for (i = 0; i < nmr; i++) 92: ifw[nw].ifw_wmap[i] = ifw[nw].ifw_mr[i]; 93: ifw[nw].ifw_xswapd = 0; 94: ifw[nw].ifw_flags = IFRW_W; 95: ifw[nw].ifw_nmr = nmr; 96: } 97: return (1); 98: bad: 99: while (--nw >= 0) 100: ubarelse(ifu->iff_uban, &ifr[nw].ifrw_info); 101: while (--nr >= 0) 102: ubarelse(ifu->iff_uban, &ifw[nr].ifw_info); 103: m_pgfree(cp, (nr + nw) * ncl); 104: ifr[0].ifrw_addr = 0; 105: return (0); 106: } 107: 108: /* 109: * Setup an ifrw structure by allocating UNIBUS map registers, 110: * possibly a buffered data path, and initializing the fields of 111: * the ifrw structure to minimize run-time overhead. 112: */ 113: static 114: if_ubaalloc(ifu, ifrw, nmr) 115: struct ifubinfo *ifu; 116: register struct ifrw *ifrw; 117: int nmr; 118: { 119: register int info; 120: 121: info = 122: uballoc(ifu->iff_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->iff_hlen, 123: ifu->iff_flags); 124: if (info == 0) 125: return (0); 126: ifrw->ifrw_info = info; 127: ifrw->ifrw_bdp = UBAI_BDP(info); 128: ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT); 129: ifrw->ifrw_mr = &ifu->iff_uba->uba_map[UBAI_MR(info) + (ifu->iff_hlen? 130: 1 : 0)]; 131: return (1); 132: } 133: 134: /* 135: * Pull read data off a interface. 136: * Len is length of data, with local net header stripped. 137: * Off is non-zero if a trailer protocol was used, and 138: * gives the offset of the trailer information. 139: * We copy the trailer information and then all the normal 140: * data into mbufs. When full cluster sized units are present 141: * on the interface on cluster boundaries we can get them more 142: * easily by remapping, and take advantage of this here. 143: * Prepend a pointer to the interface structure, 144: * so that protocols can determine where incoming packets arrived. 145: * Note: we may be called to receive from a transmit buffer by some 146: * devices. In that case, we must force normal mapping of the buffer, 147: * so that the correct data will appear (only unibus maps are 148: * changed when remapping the transmit buffers). 149: */ 150: struct mbuf * 151: if_ubaget(ifu, ifr, totlen, off0, ifp) 152: struct ifubinfo *ifu; 153: register struct ifrw *ifr; 154: int totlen, off0; 155: struct ifnet *ifp; 156: { 157: struct mbuf *top, **mp; 158: register struct mbuf *m; 159: int off = off0, len; 160: register caddr_t cp = ifr->ifrw_addr + ifu->iff_hlen, pp; 161: 162: top = 0; 163: mp = ⊤ 164: if (ifr->ifrw_flags & IFRW_W) 165: rcv_xmtbuf((struct ifxmt *)ifr); 166: while (totlen > 0) { 167: MGET(m, M_DONTWAIT, MT_DATA); 168: if (m == 0) { 169: m_freem(top); 170: top = 0; 171: goto out; 172: } 173: if (off) { 174: len = totlen - off; 175: cp = ifr->ifrw_addr + ifu->iff_hlen + off; 176: } else 177: len = totlen; 178: if (len >= CLBYTES/2) { 179: struct pte *cpte, *ppte; 180: int x, *ip, i; 181: 182: /* 183: * If doing the first mbuf and 184: * the interface pointer hasn't been put in, 185: * put it in a separate mbuf to preserve alignment. 186: */ 187: if (ifp) { 188: len = 0; 189: goto nopage; 190: } 191: MCLGET(m); 192: if (m->m_len != CLBYTES) 193: goto nopage; 194: m->m_len = MIN(len, CLBYTES); 195: if (!claligned(cp)) 196: goto copy; 197: 198: /* 199: * Switch pages mapped to UNIBUS with new page pp, 200: * as quick form of copy. Remap UNIBUS and invalidate. 201: */ 202: pp = mtod(m, char *); 203: cpte = &Mbmap[mtocl(cp)*CLSIZE]; 204: ppte = &Mbmap[mtocl(pp)*CLSIZE]; 205: x = btop(cp - ifr->ifrw_addr); 206: ip = (int *)&ifr->ifrw_mr[x]; 207: for (i = 0; i < CLSIZE; i++) { 208: struct pte t; 209: t = *ppte; *ppte++ = *cpte; *cpte = t; 210: *ip++ = 211: cpte++->pg_pfnum|ifr->ifrw_proto; 212: mtpr(TBIS, cp); 213: cp += NBPG; 214: mtpr(TBIS, (caddr_t)pp); 215: pp += NBPG; 216: } 217: goto nocopy; 218: } 219: nopage: 220: m->m_off = MMINOFF; 221: if (ifp) { 222: /* 223: * Leave room for ifp. 224: */ 225: m->m_len = MIN(MLEN - sizeof(ifp), len); 226: m->m_off += sizeof(ifp); 227: } else 228: m->m_len = MIN(MLEN, len); 229: copy: 230: bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len); 231: cp += m->m_len; 232: nocopy: 233: *mp = m; 234: mp = &m->m_next; 235: if (off) { 236: /* sort of an ALGOL-W style for statement... */ 237: off += m->m_len; 238: if (off == totlen) { 239: cp = ifr->ifrw_addr + ifu->iff_hlen; 240: off = 0; 241: totlen = off0; 242: } 243: } else 244: totlen -= m->m_len; 245: if (ifp) { 246: /* 247: * Prepend interface pointer to first mbuf. 248: */ 249: m->m_len += sizeof(ifp); 250: m->m_off -= sizeof(ifp); 251: *(mtod(m, struct ifnet **)) = ifp; 252: ifp = (struct ifnet *)0; 253: } 254: } 255: out: 256: if (ifr->ifrw_flags & IFRW_W) 257: restor_xmtbuf((struct ifxmt *)ifr); 258: return (top); 259: } 260: 261: /* 262: * Change the mapping on a transmit buffer so that if_ubaget may 263: * receive from that buffer. Copy data from any pages mapped to Unibus 264: * into the pages mapped to normal kernel virtual memory, so that 265: * they can be accessed and swapped as usual. We take advantage 266: * of the fact that clusters are placed on the xtofree list 267: * in inverse order, finding the last one. 268: */ 269: static 270: rcv_xmtbuf(ifw) 271: register struct ifxmt *ifw; 272: { 273: register struct mbuf *m; 274: struct mbuf **mprev; 275: register i; 276: int t; 277: char *cp; 278: 279: while (i = ffs((long)ifw->ifw_xswapd)) { 280: cp = ifw->ifw_base + i * CLBYTES; 281: i--; 282: ifw->ifw_xswapd &= ~(1<<i); 283: i *= CLSIZE; 284: mprev = &ifw->ifw_xtofree; 285: for (m = ifw->ifw_xtofree; m && m->m_next; m = m->m_next) 286: mprev = &m->m_next; 287: if (m == NULL) 288: panic("rcv_xmtbuf"); 289: bcopy(mtod(m, caddr_t), cp, CLBYTES); 290: (void) m_free(m); 291: *mprev = NULL; 292: for (t = 0; t < CLSIZE; t++) { 293: ifw->ifw_mr[i] = ifw->ifw_wmap[i]; 294: i++; 295: } 296: } 297: } 298: 299: /* 300: * Put a transmit buffer back together after doing an if_ubaget on it, 301: * which may have swapped pages. 302: */ 303: static 304: restor_xmtbuf(ifw) 305: register struct ifxmt *ifw; 306: { 307: register i; 308: 309: for (i = 0; i < ifw->ifw_nmr; i++) 310: ifw->ifw_wmap[i] = ifw->ifw_mr[i]; 311: } 312: 313: /* 314: * Map a chain of mbufs onto a network interface 315: * in preparation for an i/o operation. 316: * The argument chain of mbufs includes the local network 317: * header which is copied to be in the mapped, aligned 318: * i/o space. 319: */ 320: if_ubaput(ifu, ifw, m) 321: struct ifubinfo *ifu; 322: register struct ifxmt *ifw; 323: register struct mbuf *m; 324: { 325: register struct mbuf *mp; 326: register caddr_t cp, dp; 327: register int i; 328: int xswapd = 0; 329: int x, cc, t; 330: 331: cp = ifw->ifw_addr; 332: while (m) { 333: dp = mtod(m, char *); 334: if (claligned(cp) && claligned(dp) && 335: (m->m_len == CLBYTES || m->m_next == (struct mbuf *)0)) { 336: struct pte *pte; int *ip; 337: pte = &Mbmap[mtocl(dp)*CLSIZE]; 338: x = btop(cp - ifw->ifw_addr); 339: ip = (int *)&ifw->ifw_mr[x]; 340: for (i = 0; i < CLSIZE; i++) 341: *ip++ = 342: ifw->ifw_proto | pte++->pg_pfnum; 343: xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT)); 344: mp = m->m_next; 345: m->m_next = ifw->ifw_xtofree; 346: ifw->ifw_xtofree = m; 347: cp += m->m_len; 348: } else { 349: bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len); 350: cp += m->m_len; 351: MFREE(m, mp); 352: } 353: m = mp; 354: } 355: 356: /* 357: * Xswapd is the set of clusters we just mapped out. Ifu->iff_xswapd 358: * is the set of clusters mapped out from before. We compute 359: * the number of clusters involved in this operation in x. 360: * Clusters mapped out before and involved in this operation 361: * should be unmapped so original pages will be accessed by the device. 362: */ 363: cc = cp - ifw->ifw_addr; 364: x = ((cc - ifu->iff_hlen) + CLBYTES - 1) >> CLSHIFT; 365: ifw->ifw_xswapd &= ~xswapd; 366: while (i = ffs((long)ifw->ifw_xswapd)) { 367: i--; 368: if (i >= x) 369: break; 370: ifw->ifw_xswapd &= ~(1<<i); 371: i *= CLSIZE; 372: for (t = 0; t < CLSIZE; t++) { 373: ifw->ifw_mr[i] = ifw->ifw_wmap[i]; 374: i++; 375: } 376: } 377: ifw->ifw_xswapd |= xswapd; 378: return (cc); 379: }