001/* 002 * This file is part of the Jikes RVM project (http://jikesrvm.org). 003 * 004 * This file is licensed to You under the Eclipse Public License (EPL); 005 * You may not use this file except in compliance with the License. You 006 * may obtain a copy of the License at 007 * 008 * http://www.opensource.org/licenses/eclipse-1.0.php 009 * 010 * See the COPYRIGHT.txt file distributed with this work for information 011 * regarding copyright ownership. 012 */ 013package org.mmtk.utility.heap; 014 015import static org.mmtk.utility.Constants.*; 016 017import org.mmtk.plan.Plan; 018import org.mmtk.policy.Space; 019 020import static org.mmtk.policy.Space.PAGES_IN_CHUNK; 021import org.mmtk.utility.alloc.EmbeddedMetaData; 022import org.mmtk.utility.Conversions; 023import org.mmtk.utility.GenericFreeList; 024import org.mmtk.vm.VM; 025 026import org.vmmagic.unboxed.*; 027import org.vmmagic.pragma.*; 028 029/** 030 * This class manages the allocation of pages for a space. When a 031 * page is requested by the space both a page budget and the use of 032 * virtual address space are checked. If the request for space can't 033 * be satisfied (for either reason) a GC may be triggered. 034 */ 035@Uninterruptible 036public final class FreeListPageResource extends PageResource { 037 038 private final GenericFreeList freeList; 039 private int highWaterMark = 0; 040 private final int metaDataPagesPerRegion; 041 private int pagesCurrentlyOnFreeList = 0; 042 043 /** 044 * Constructor 045 * 046 * Contiguous free list resource. The address range is pre-defined at 047 * initialization time and is immutable. 048 * 049 * @param space The space to which this resource is attached 050 * @param start The start of the address range allocated to this resource 051 * @param bytes The size of the address rage allocated to this resource 052 */ 053 public FreeListPageResource(Space space, Address start, Extent bytes) { 054 super(space, start); 055 int pages = Conversions.bytesToPages(bytes); 056 freeList = new GenericFreeList(pages); 057 pagesCurrentlyOnFreeList = pages; 058 this.metaDataPagesPerRegion = 0; 059 } 060 061 /** 062 * Constructor 063 * 064 * Contiguous free list resource. The address range is pre-defined at 065 * initialization time and is immutable. 066 * 067 * @param space The space to which this resource is attached 068 * @param start The start of the address range allocated to this resource 069 * @param bytes The size of the address rage allocated to this resource 070 * @param metaDataPagesPerRegion The number of pages of meta data 071 * that are embedded in each region. 072 */ 073 public FreeListPageResource(Space space, Address start, Extent bytes, int metaDataPagesPerRegion) { 074 super(space, start); 075 this.metaDataPagesPerRegion = metaDataPagesPerRegion; 076 int pages = Conversions.bytesToPages(bytes); 077 freeList = new GenericFreeList(pages, EmbeddedMetaData.PAGES_IN_REGION); 078 pagesCurrentlyOnFreeList = pages; 079 reserveMetaData(space.getExtent()); 080 } 081 082 /** 083 * Constructor 084 * 085 * Discontiguous monotone resource. The address range is <i>not</i> 086 * pre-defined at initialization time and is dynamically defined to 087 * be some set of pages, according to demand and availability. 088 * 089 * @param space The space to which this resource is attached 090 * @param metaDataPagesPerRegion the number of meta data pages per region 091 */ 092 public FreeListPageResource(Space space, int metaDataPagesPerRegion) { 093 super(space); 094 this.metaDataPagesPerRegion = metaDataPagesPerRegion; 095 this.start = Space.AVAILABLE_START; 096 freeList = new GenericFreeList(Map.globalPageMap, Map.getDiscontigFreeListPROrdinal(this)); 097 pagesCurrentlyOnFreeList = 0; 098 } 099 100 /** 101 * Return the number of available physical pages for this resource. 102 * This includes all pages currently free on the resource's free list. 103 * If the resource is using discontiguous space it also includes 104 * currently unassigned discontiguous space.<p> 105 * 106 * Note: This just considers physical pages (ie virtual memory pages 107 * allocated for use by this resource). This calculation is orthogonal 108 * to and does not consider any restrictions on the number of pages 109 * this resource may actually use at any time (ie the number of 110 * committed and reserved pages).<p> 111 * 112 * Note: The calculation is made on the assumption that all space that 113 * could be assigned to this resource would be assigned to this resource 114 * (ie the unused discontiguous space could just as likely be assigned 115 * to another competing resource). 116 * 117 * @return The number of available physical pages for this resource. 118 */ 119 @Override 120 public int getAvailablePhysicalPages() { 121 int rtn = pagesCurrentlyOnFreeList; 122 if (!contiguous) { 123 int chunks = Map.getAvailableDiscontiguousChunks() - Map.getChunkConsumerCount(); 124 if (chunks < 0) chunks = 0; 125 rtn += chunks * (Space.PAGES_IN_CHUNK - metaDataPagesPerRegion); 126 } 127 return rtn; 128 } 129 130 /** 131 * Allocate <code>pages</code> pages from this resource.<p> 132 * 133 * If the request can be satisfied, then ensure the pages are 134 * mmpapped and zeroed before returning the address of the start of 135 * the region. If the request cannot be satisfied, return zero. 136 * 137 * @param reservedPages The number of pages reserved due to the initial request. 138 * @param requiredPages The number of pages required to be allocated. 139 * @param zeroed If true allocated pages are zeroed. 140 * @return The start of the first page if successful, zero on 141 * failure. 142 */ 143 @Override 144 @Inline 145 protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) { 146 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(metaDataPagesPerRegion == 0 || requiredPages <= PAGES_IN_CHUNK - metaDataPagesPerRegion); 147 lock(); 148 boolean newChunk = false; 149 int pageOffset = freeList.alloc(requiredPages); 150 if (pageOffset == GenericFreeList.FAILURE && !contiguous) { 151 pageOffset = allocateContiguousChunks(requiredPages); 152 newChunk = true; 153 } 154 if (pageOffset == GenericFreeList.FAILURE) { 155 unlock(); 156 return Address.zero(); 157 } else { 158 pagesCurrentlyOnFreeList -= requiredPages; 159 if (pageOffset > highWaterMark) { 160 if (highWaterMark == 0 || (pageOffset ^ highWaterMark) > EmbeddedMetaData.PAGES_IN_REGION) { 161 int regions = 1 + ((pageOffset - highWaterMark) >> EmbeddedMetaData.LOG_PAGES_IN_REGION); 162 int metapages = regions * metaDataPagesPerRegion; 163 reserved += metapages; 164 committed += metapages; 165 newChunk = true; 166 } 167 highWaterMark = pageOffset; 168 } 169 Address rtn = start.plus(Conversions.pagesToBytes(pageOffset)); 170 Extent bytes = Conversions.pagesToBytes(requiredPages); 171 // The meta-data portion of reserved Pages was committed above. 172 commitPages(reservedPages, requiredPages); 173 space.growSpace(rtn, bytes, newChunk); 174 unlock(); 175 Mmapper.ensureMapped(rtn, requiredPages); 176 if (zeroed) 177 VM.memory.zero(zeroNT, rtn, bytes); 178 VM.events.tracePageAcquired(space, rtn, requiredPages); 179 return rtn; 180 } 181 } 182 183 /** 184 * Release a group of pages, associated with this page resource, 185 * that were allocated together, optionally zeroing on release and 186 * optionally memory protecting on release. 187 * 188 * @param first The first page in the group of pages that were 189 * allocated together. 190 */ 191 @Inline 192 public void releasePages(Address first) { 193 if (VM.VERIFY_ASSERTIONS) 194 VM.assertions._assert(Conversions.isPageAligned(first)); 195 196 int pageOffset = Conversions.bytesToPages(first.diff(start)); 197 198 int pages = freeList.size(pageOffset); 199 if (VM.config.ZERO_PAGES_ON_RELEASE) 200 VM.memory.zero(false, first, Conversions.pagesToBytes(pages)); 201 /* Can't use protect here because of the chunk sizes involved! 202 if (protectOnRelease.getValue()) 203 LazyMmapper.protect(first, pages); 204 */ 205 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(pages <= committed); 206 207 lock(); 208 reserved -= pages; 209 committed -= pages; 210 int freed = freeList.free(pageOffset, true); 211 pagesCurrentlyOnFreeList += pages; 212 213 if (!contiguous) // only discontiguous spaces use chunks 214 releaseFreeChunks(first, freed); 215 216 unlock(); 217 218 VM.events.tracePageReleased(space, first, pages); 219 } 220 221 /** 222 * The release of a page may have freed up an entire chunk or 223 * set of chunks. We need to check whether any chunks can be 224 * freed, and if so, free them. 225 * 226 * @param freedPage The address of the page that was just freed. 227 * @param pagesFreed The number of pages made available when the page was freed. 228 */ 229 private void releaseFreeChunks(Address freedPage, int pagesFreed) { 230 int pageOffset = Conversions.bytesToPages(freedPage.diff(start)); 231 232 if (metaDataPagesPerRegion > 0) { // can only be a single chunk 233 if (pagesFreed == (PAGES_IN_CHUNK - metaDataPagesPerRegion)) { 234 freeContiguousChunk(Space.chunkAlign(freedPage, true)); 235 } 236 } else { // may be multiple chunks 237 if (pagesFreed % PAGES_IN_CHUNK == 0) { // necessary, but not sufficient condition 238 /* grow a region of chunks, starting with the chunk containing the freed page */ 239 int regionStart = pageOffset & ~(PAGES_IN_CHUNK - 1); 240 int nextRegionStart = regionStart + PAGES_IN_CHUNK; 241 /* now try to grow (end point pages are marked as non-coalescing) */ 242 while (regionStart >= 0 && freeList.isCoalescable(regionStart)) 243 regionStart -= PAGES_IN_CHUNK; 244 while (nextRegionStart < GenericFreeList.MAX_UNITS && freeList.isCoalescable(nextRegionStart)) 245 nextRegionStart += PAGES_IN_CHUNK; 246 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(regionStart >= 0 && nextRegionStart < GenericFreeList.MAX_UNITS); 247 if (pagesFreed == nextRegionStart - regionStart) { 248 freeContiguousChunk(start.plus(Conversions.pagesToBytes(regionStart))); 249 } 250 } 251 } 252 } 253 254 /** 255 * Allocate sufficient contiguous chunks within a discontiguous region to 256 * satisfy the pending request. Note that this is purely about address space 257 * allocation within a discontiguous region. This method does not reserve 258 * individual pages, it merely assigns a suitably large region of virtual 259 * memory from within the discontiguous region for use by a particular space. 260 * 261 * @param pages The number of pages currently being requested 262 * @return A chunk number or GenericFreelist.FAILURE 263 */ 264 private int allocateContiguousChunks(int pages) { 265 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(metaDataPagesPerRegion == 0 || pages <= PAGES_IN_CHUNK - metaDataPagesPerRegion); 266 int rtn = GenericFreeList.FAILURE; 267 int requiredChunks = Space.requiredChunks(pages); 268 Address region = space.growDiscontiguousSpace(requiredChunks); 269 if (!region.isZero()) { 270 int regionStart = Conversions.bytesToPages(region.diff(start)); 271 int regionEnd = regionStart + (requiredChunks * Space.PAGES_IN_CHUNK) - 1; 272 freeList.setUncoalescable(regionStart); 273 freeList.setUncoalescable(regionEnd + 1); 274 for (int p = regionStart; p < regionEnd; p += Space.PAGES_IN_CHUNK) { 275 int liberated; 276 if (p != regionStart) 277 freeList.clearUncoalescable(p); 278 liberated = freeList.free(p, true); // add chunk to our free list 279 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(liberated == Space.PAGES_IN_CHUNK + (p - regionStart)); 280 if (metaDataPagesPerRegion > 1) 281 freeList.alloc(metaDataPagesPerRegion, p); // carve out space for metadata 282 pagesCurrentlyOnFreeList += Space.PAGES_IN_CHUNK - metaDataPagesPerRegion; 283 } 284 rtn = freeList.alloc(pages); // re-do the request which triggered this call 285 } 286 return rtn; 287 } 288 289 /** 290 * Release a single chunk from a discontiguous region. All this does is 291 * release a chunk from the virtual address space associated with this 292 * discontiguous space. 293 * 294 * @param chunk The chunk to be freed 295 */ 296 private void freeContiguousChunk(Address chunk) { 297 int numChunks = Map.getContiguousRegionChunks(chunk); 298 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(numChunks == 1 || metaDataPagesPerRegion == 0); 299 300 /* nail down all pages associated with the chunk, so it is no longer on our free list */ 301 int chunkStart = Conversions.bytesToPages(chunk.diff(start)); 302 int chunkEnd = chunkStart + (numChunks * Space.PAGES_IN_CHUNK); 303 while (chunkStart < chunkEnd) { 304 freeList.setUncoalescable(chunkStart); 305 if (metaDataPagesPerRegion > 0) 306 freeList.free(chunkStart); // first free any metadata pages 307 int tmp = freeList.alloc(Space.PAGES_IN_CHUNK, chunkStart); // then alloc the entire chunk 308 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(tmp == chunkStart); 309 chunkStart += Space.PAGES_IN_CHUNK; 310 pagesCurrentlyOnFreeList -= (Space.PAGES_IN_CHUNK - metaDataPagesPerRegion); 311 } 312 /* now return the address space associated with the chunk for global reuse */ 313 space.releaseDiscontiguousChunks(chunk); 314 } 315 316 /** 317 * Reserve virtual address space for meta-data. 318 * 319 * @param extent The size of this space 320 */ 321 private void reserveMetaData(Extent extent) { 322 highWaterMark = 0; 323 if (metaDataPagesPerRegion > 0) { 324 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(start.toWord().rshl(EmbeddedMetaData.LOG_BYTES_IN_REGION).lsh(EmbeddedMetaData.LOG_BYTES_IN_REGION).toAddress().EQ(start)); 325 Extent size = extent.toWord().rshl(EmbeddedMetaData.LOG_BYTES_IN_REGION).lsh(EmbeddedMetaData.LOG_BYTES_IN_REGION).toExtent(); 326 Address cursor = start.plus(size); 327 while (cursor.GT(start)) { 328 cursor = cursor.minus(EmbeddedMetaData.BYTES_IN_REGION); 329 int unit = cursor.diff(start).toWord().rshl(LOG_BYTES_IN_PAGE).toInt(); 330 int tmp = freeList.alloc(metaDataPagesPerRegion, unit); 331 pagesCurrentlyOnFreeList -= metaDataPagesPerRegion; 332 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(tmp == unit); 333 } 334 } 335 } 336 337 /** 338 * {@inheritDoc}<p> 339 * In the case of a free-list allocator, meta-data is pre-allocated, so 340 * simply return the un-adjusted request size. 341 * 342 * @param pages The size of the pending allocation in pages 343 * @return The (unadjusted) request size, since metadata is pre-allocated 344 */ 345 @Override 346 public int adjustForMetaData(int pages) { 347 return pages; 348 } 349 350 public Address getHighWater() { 351 return start.plus(Extent.fromIntSignExtend(highWaterMark << LOG_BYTES_IN_PAGE)); 352 } 353 354 /** 355 * Return the size of the super page 356 * 357 * @param first the Address of the first word in the superpage 358 * @return the size in bytes 359 */ 360 @Inline 361 public Extent getSize(Address first) { 362 if (VM.VERIFY_ASSERTIONS) 363 VM.assertions._assert(Conversions.isPageAligned(first)); 364 365 int pageOffset = Conversions.bytesToPages(first.diff(start)); 366 int pages = freeList.size(pageOffset); 367 return Conversions.pagesToBytes(pages); 368 } 369 370 /** 371 * Resize the free list associated with this resource and nail down 372 * its start address. This method is called to re-set the free list 373 * once the global free list (which it shares) is finalized and the 374 * base address is finalized. There's a circular dependency, so we 375 * need an explicit call-back to reset the free list size and start 376 * 377 * @param startAddress The final start address for the discontiguous space. 378 */ 379 @Interruptible 380 public void resizeFreeList(Address startAddress) { 381 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!contiguous && !Plan.isInitialized()); 382 start = startAddress; 383 freeList.resizeFreeList(); 384 } 385}