001/* 002 * This file is part of the Jikes RVM project (http://jikesrvm.org). 003 * 004 * This file is licensed to You under the Eclipse Public License (EPL); 005 * You may not use this file except in compliance with the License. You 006 * may obtain a copy of the License at 007 * 008 * http://www.opensource.org/licenses/eclipse-1.0.php 009 * 010 * See the COPYRIGHT.txt file distributed with this work for information 011 * regarding copyright ownership. 012 */ 013package org.mmtk.policy; 014 015import static org.mmtk.utility.Constants.*; 016 017import org.mmtk.plan.Plan; 018import org.mmtk.plan.TransitiveClosure; 019import org.mmtk.utility.heap.Map; 020import org.mmtk.utility.heap.Mmapper; 021import org.mmtk.utility.heap.PageResource; 022import org.mmtk.utility.heap.SpaceDescriptor; 023import org.mmtk.utility.heap.VMRequest; 024import org.mmtk.utility.options.Options; 025import org.mmtk.utility.Log; 026 027import org.mmtk.vm.VM; 028 029import org.vmmagic.pragma.*; 030import org.vmmagic.unboxed.*; 031 032/** 033 * This class defines and manages spaces. Each policy is an instance 034 * of a space. A space is a region of virtual memory (contiguous or 035 * discontigous) which is subject to the same memory management 036 * regime. Multiple spaces (instances of this class or its 037 * descendants) may have the same policy (eg there could be numerous 038 * instances of CopySpace, each with different roles). Spaces are 039 * defined in terms of a unique region of virtual memory, so no two 040 * space instances ever share any virtual memory.<p> 041 * 042 * In addition to tracking virtual memory use and the mapping to 043 * policy, spaces also manage memory consumption (<i>used</i> virtual 044 * memory). 045 * 046 */ 047@Uninterruptible 048public abstract class Space { 049 050 /**************************************************************************** 051 * 052 * Class variables 053 */ 054 055 /** 056 * 057 */ 058 private static boolean DEBUG = false; 059 060 // the following is somewhat arbitrary for the 64 bit system at this stage 061 public static final int LOG_ADDRESS_SPACE = (BYTES_IN_ADDRESS == 4) ? 32 : 40; 062 public static final int LOG_BYTES_IN_CHUNK = 22; 063 public static final int BYTES_IN_CHUNK = 1 << LOG_BYTES_IN_CHUNK; 064 public static final int PAGES_IN_CHUNK = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE); 065 private static final int LOG_MAX_CHUNKS = LOG_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK; 066 public static final int MAX_CHUNKS = 1 << LOG_MAX_CHUNKS; 067 public static final int MAX_SPACES = 20; // quite arbitrary 068 069 public static final Address HEAP_START = chunkAlign(VM.HEAP_START, true); 070 public static final Address AVAILABLE_START = chunkAlign(VM.AVAILABLE_START, false); 071 public static final Address AVAILABLE_END = chunkAlign(VM.AVAILABLE_END, true); 072 public static final Extent AVAILABLE_BYTES = AVAILABLE_END.toWord().minus(AVAILABLE_START.toWord()).toExtent(); 073 public static final int AVAILABLE_PAGES = AVAILABLE_BYTES.toWord().rshl(LOG_BYTES_IN_PAGE).toInt(); 074 public static final Address HEAP_END = chunkAlign(VM.HEAP_END, false); 075 076 private static final boolean FORCE_SLOW_MAP_LOOKUP = false; 077 078 private static final int PAGES = 0; 079 private static final int MB = 1; 080 private static final int PAGES_MB = 2; 081 private static final int MB_PAGES = 3; 082 083 private static int spaceCount = 0; 084 private static Space[] spaces = new Space[MAX_SPACES]; 085 private static Address heapCursor = HEAP_START; 086 private static Address heapLimit = HEAP_END; 087 088 /**************************************************************************** 089 * 090 * Instance variables 091 */ 092 093 /** 094 * 095 */ 096 private final String name; 097 private final int nameLength; 098 protected final int descriptor; 099 private final int index; 100 private final VMRequest vmRequest; 101 102 protected final boolean immortal; 103 protected final boolean movable; 104 protected final boolean contiguous; 105 protected final boolean zeroed; 106 107 protected PageResource pr; 108 protected final Address start; 109 protected final Extent extent; 110 protected Address headDiscontiguousRegion; 111 112 /**************************************************************************** 113 * 114 * Initialization 115 */ 116 117 { 118 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(PAGES_IN_CHUNK > 1); 119 } 120 121 /** 122 * This is the base constructor for <i>all</i> spaces.<p> 123 * 124 * @param name The name of this space (used when printing error messages etc) 125 * @param movable Are objects in this space movable? 126 * @param immortal Are objects in this space immortal (uncollected)? 127 * @param zeroed if it is {@code true}, allocated memory is zeroed. 128 * @param vmRequest An object describing the virtual memory requested. 129 */ 130 protected Space(String name, boolean movable, boolean immortal, boolean zeroed, VMRequest vmRequest) { 131 this.name = name; 132 this.nameLength = name.length(); // necessary to avoid calling length() in uninterruptible code 133 this.movable = movable; 134 this.immortal = immortal; 135 this.zeroed = zeroed; 136 this.vmRequest = vmRequest; 137 this.index = spaceCount++; 138 spaces[index] = this; 139 140 if (vmRequest.type == VMRequest.REQUEST_DISCONTIGUOUS) { 141 this.contiguous = false; 142 this.descriptor = SpaceDescriptor.createDescriptor(); 143 this.start = Address.zero(); 144 this.extent = Extent.zero(); 145 this.headDiscontiguousRegion = Address.zero(); 146 VM.memory.setHeapRange(index, HEAP_START, HEAP_END); // this should really be refined! Once we have a code space, we can be a lot more specific about what is a valid code heap area 147 return; 148 } 149 150 Address start; 151 Extent extent; 152 153 if (vmRequest.type == VMRequest.REQUEST_FRACTION) { 154 extent = getFracAvailable(vmRequest.frac); 155 } else { 156 extent = vmRequest.extent; 157 } 158 159 if (extent.NE(chunkAlign(extent, false))) { 160 VM.assertions.fail(name + " requested non-aligned extent: " + extent.toLong() + " bytes"); 161 } 162 163 if (vmRequest.type == VMRequest.REQUEST_FIXED) { 164 start = vmRequest.start; 165 if (start.NE(chunkAlign(start, false))) { 166 VM.assertions.fail(name + " starting on non-aligned boundary: " + start.toLong() + " bytes"); 167 } 168 } else if (vmRequest.top) { 169 if (Map.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name); 170 heapLimit = heapLimit.minus(extent); 171 start = heapLimit; 172 } else { 173 start = heapCursor; 174 heapCursor = heapCursor.plus(extent); 175 } 176 177 if (heapCursor.GT(heapLimit)) { 178 Log.write("Out of virtual address space allocating \""); 179 Log.write(name); Log.write("\" at "); 180 Log.write(heapCursor.minus(extent)); Log.write(" ("); 181 Log.write(heapCursor); Log.write(" > "); 182 Log.write(heapLimit); Log.writeln(")"); 183 VM.assertions.fail("exiting"); 184 } 185 186 this.contiguous = true; 187 this.start = start; 188 this.extent = extent; 189 this.descriptor = SpaceDescriptor.createDescriptor(start, start.plus(extent)); 190 191 VM.memory.setHeapRange(index, start, start.plus(extent)); 192 Map.insert(start, extent, descriptor, this); 193 194 if (DEBUG) { 195 Log.write(name); Log.write(" "); 196 Log.write(start); Log.write(" "); 197 Log.write(start.plus(extent)); Log.write(" "); 198 Log.writeln(extent.toWord()); 199 } 200 } 201 202 /**************************************************************************** 203 * 204 * Accessor methods 205 */ 206 207 /** @return The start of the discontiguous space */ 208 public static Address getDiscontigStart() { 209 return heapCursor; 210 } 211 212 /** @return The end of the discontiguous space */ 213 public static Address getDiscontigEnd() { 214 return heapLimit.minus(1); 215 } 216 217 /** @return The name of this space */ 218 public final String getName() { 219 return name; 220 } 221 222 /** @return The start address of this space */ 223 public final Address getStart() { 224 return start; 225 } 226 227 /** @return The size (extent) of this space */ 228 public final Extent getExtent() { 229 return extent; 230 } 231 232 /** @return The integer descriptor for this space */ 233 public final int getDescriptor() { 234 return descriptor; 235 } 236 237 /** @return The index (ordinal number) of this space */ 238 public final int getIndex() { 239 return index; 240 } 241 242 /** @return {@code true} if this space is never collected */ 243 public final boolean isImmortal() { 244 return immortal; 245 } 246 247 /** @return {@code true} if objects in this space may move */ 248 public boolean isMovable() { 249 return movable; 250 } 251 252 /** @return The number of reserved pages */ 253 public final int reservedPages() { 254 return pr.reservedPages(); 255 } 256 257 /** @return The number of committed pages */ 258 public final int committedPages() { 259 return pr.committedPages(); 260 } 261 262 /** @return The number of pages available for allocation */ 263 public final int availablePhysicalPages() { 264 return pr.getAvailablePhysicalPages(); 265 } 266 267 /** @return Cumulative committed pages. */ 268 public static long cumulativeCommittedPages() { 269 return PageResource.cumulativeCommittedPages(); 270 } 271 272 /**************************************************************************** 273 * 274 * Object and address tests / accessors 275 */ 276 277 /** 278 * Return {@code true} if the given object is in an immortal (uncollected) space. 279 * 280 * @param object The object in question 281 * @return {@code true} if the given object is in an immortal (uncollected) space. 282 */ 283 public static boolean isImmortal(ObjectReference object) { 284 Space space = getSpaceForObject(object); 285 if (space == null) 286 return true; 287 else 288 return space.isImmortal(); 289 } 290 291 /** 292 * Return {@code true} if the given object is in space that moves objects. 293 * 294 * @param object The object in question 295 * @return {@code true} if the given object is in space that moves objects. 296 */ 297 @Inline 298 public static boolean isMovable(ObjectReference object) { 299 Space space = getSpaceForObject(object); 300 if (space == null) 301 return true; 302 else 303 return space.isMovable(); 304 } 305 306 /** 307 * Return {@code true} if the given object is in a space managed by MMTk. 308 * 309 * @param object The object in question 310 * @return {@code true} if the given object is in a space managed by MMTk. 311 */ 312 @Inline 313 public static boolean isMappedObject(ObjectReference object) { 314 return !object.isNull() && (getSpaceForObject(object) != null) && Mmapper.objectIsMapped(object); 315 } 316 317 /** 318 * Return {@code true} if the given address is in a space managed by MMTk. 319 * 320 * @param address The address in question 321 * @return {@code true} if the given address is in a space managed by MMTk. 322 */ 323 @Inline 324 public static boolean isMappedAddress(Address address) { 325 return Map.getSpaceForAddress(address) != null && Mmapper.addressIsMapped(address); 326 } 327 328 /** 329 * Return {@code true} if the given object is the space associated with the 330 * given descriptor. 331 * 332 * @param descriptor The descriptor for a space 333 * @param object The object in question 334 * @return {@code true} if the given object is in the space associated with 335 * the descriptor. 336 */ 337 @Inline 338 public static boolean isInSpace(int descriptor, ObjectReference object) { 339 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!object.isNull()); 340 return isInSpace(descriptor, VM.objectModel.refToAddress(object)); 341 } 342 343 /** 344 * Return {@code true} if the given address is the space associated with the 345 * given descriptor. 346 * 347 * @param descriptor The descriptor for a space 348 * @param address The address in question. 349 * @return {@code true} if the given address is in the space associated with 350 * the descriptor. 351 */ 352 @Inline 353 public static boolean isInSpace(int descriptor, Address address) { 354 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!address.isZero()); 355 if (FORCE_SLOW_MAP_LOOKUP || !SpaceDescriptor.isContiguous(descriptor)) { 356 return Map.getDescriptorForAddress(address) == descriptor; 357 } else { 358 Address start = SpaceDescriptor.getStart(descriptor); 359 if (!VM.VERIFY_ASSERTIONS && 360 SpaceDescriptor.isContiguousHi(descriptor)) 361 return address.GE(start); 362 else { 363 Extent size = Word.fromIntSignExtend(SpaceDescriptor.getChunks(descriptor)).lsh(LOG_BYTES_IN_CHUNK).toExtent(); 364 Address end = start.plus(size); 365 return address.GE(start) && address.LT(end); 366 } 367 } 368 } 369 370 /** 371 * Return the space for a given object 372 * 373 * @param object The object in question 374 * @return The space containing the object 375 */ 376 @Inline 377 public static Space getSpaceForObject(ObjectReference object) { 378 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!object.isNull()); 379 return Map.getSpaceForAddress(VM.objectModel.refToAddress(object)); 380 } 381 382 /** 383 * Return the space for a given address, not necessarily the 384 * start address of an object. 385 * 386 * @param addr The address in question 387 * @return The space containing the address 388 */ 389 public static Space getSpaceForAddress(Address addr) { 390 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!addr.isZero()); 391 return Map.getSpaceForAddress(addr); 392 } 393 394 /**************************************************************************** 395 * 396 * Page management 397 */ 398 399 /** 400 * Updates the zeroing approach for this space. 401 * 402 * @param useNT whether to use non-temporal instructions for zeroing 403 * @param concurrent whether zeroing will be done concurrently 404 */ 405 @Interruptible 406 public void setZeroingApproach(boolean useNT, boolean concurrent) { 407 pr.updateZeroingApproach(useNT, concurrent); 408 } 409 410 /** 411 * Skip concurrent zeroing (fall back to bulk zeroing). 412 */ 413 public void skipConcurrentZeroing() { 414 pr.skipConcurrentZeroing(); 415 } 416 417 /** 418 * Trigger concurrent zeroing. 419 */ 420 public void triggerConcurrentZeroing() { 421 pr.triggerConcurrentZeroing(); 422 } 423 424 /** 425 * Acquire a number of pages from the page resource, returning 426 * either the address of the first page, or zero on failure.<p> 427 * 428 * This may trigger a GC if necessary.<p> 429 * 430 * First the page budget is checked to see whether polling the GC is 431 * necessary. If so, the GC is polled. If a GC is required then the 432 * request fails and zero is returned.<p> 433 * 434 * If the check of the page budget does not lead to GC being 435 * triggered, then a request is made for specific pages in virtual 436 * memory. If the page manager cannot satisify this request, then 437 * the request fails, a GC is forced, and zero is returned. 438 * Otherwise the address of the first page is returned.<p> 439 * 440 * @param pages The number of pages requested 441 * @return The start of the first page if successful, zero on 442 * failure. 443 */ 444 @LogicallyUninterruptible 445 public final Address acquire(int pages) { 446 boolean allowPoll = VM.activePlan.isMutator() && Plan.isInitialized(); 447 448 /* Check page budget */ 449 int pagesReserved = pr.reservePages(pages); 450 451 /* Poll, either fixing budget or requiring GC */ 452 if (allowPoll && VM.activePlan.global().poll(false, this)) { 453 pr.clearRequest(pagesReserved); 454 VM.collection.blockForGC(); 455 return Address.zero(); // GC required, return failure 456 } 457 458 /* Page budget is ok, try to acquire virtual memory */ 459 Address rtn = pr.getNewPages(pagesReserved, pages, zeroed); 460 if (rtn.isZero()) { 461 /* Failed, so force a GC */ 462 if (!allowPoll) VM.assertions.fail("Physical allocation failed when polling not allowed!"); 463 boolean gcPerformed = VM.activePlan.global().poll(true, this); 464 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(gcPerformed, "GC not performed when forced."); 465 pr.clearRequest(pagesReserved); 466 VM.collection.blockForGC(); 467 return Address.zero(); 468 } 469 470 return rtn; 471 } 472 473 /** 474 * Extend the virtual memory associated with a particular discontiguous 475 * space. This simply involves requesting a suitable number of chunks 476 * from the pool of chunks available to discontiguous spaces. 477 * 478 * @param chunks The number of chunks by which the space needs to be extended 479 * @return The address of the new discontiguous space. 480 */ 481 public Address growDiscontiguousSpace(int chunks) { 482 Address newHead = Map.allocateContiguousChunks(descriptor, this, chunks, headDiscontiguousRegion); 483 if (newHead.isZero()) { 484 return Address.zero(); 485 } 486 return headDiscontiguousRegion = newHead; 487 } 488 489 /** 490 * Return the number of chunks required to satisfy a request for a certain number of pages 491 * 492 * @param pages The number of pages desired 493 * @return The number of chunks needed to satisfy the request 494 */ 495 public static int requiredChunks(int pages) { 496 Extent extent = chunkAlign(Extent.fromIntZeroExtend(pages << LOG_BYTES_IN_PAGE), false); 497 return extent.toWord().rshl(LOG_BYTES_IN_CHUNK).toInt(); 498 } 499 500 /** 501 * This hook is called by page resources each time a space grows. The space may 502 * tap into the hook to monitor heap growth. The call is made from within the 503 * page resources' critical region, immediately before yielding the lock. 504 * 505 * @param start The start of the newly allocated space 506 * @param bytes The size of the newly allocated space 507 * @param newChunk {@code true} if the new space encroached upon or started a new chunk or chunks. 508 */ 509 public void growSpace(Address start, Extent bytes, boolean newChunk) {} 510 511 /** 512 * Release one or more contiguous chunks associated with a discontiguous 513 * space. 514 * 515 * @param chunk The address of the start of the contiguous chunk or chunks 516 * @return The number of chunks freed 517 */ 518 public int releaseDiscontiguousChunks(Address chunk) { 519 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(chunk.EQ(chunkAlign(chunk, true))); 520 if (chunk.EQ(headDiscontiguousRegion)) { 521 headDiscontiguousRegion = Map.getNextContiguousRegion(chunk); 522 } 523 return Map.freeContiguousChunks(chunk); 524 } 525 526 /** 527 * @return The address of the head of the discontiguous chunk map. 528 */ 529 public Address getHeadDiscontiguousRegion() { 530 return headDiscontiguousRegion; 531 } 532 533 public void releaseAllChunks() { 534 Map.freeAllChunks(headDiscontiguousRegion); 535 headDiscontiguousRegion = Address.zero(); 536 } 537 538 /** 539 * Release a unit of allocation (a page or pages) 540 * 541 * @param start The address of the start of the region to be released 542 */ 543 public abstract void release(Address start); 544 545 /** 546 * Get the total number of pages reserved by all of the spaces 547 * 548 * @return the total number of pages reserved by all of the spaces 549 */ 550 private static int getPagesReserved() { 551 int pages = 0; 552 for (int i = 0; i < spaceCount; i++) { 553 pages += spaces[i].reservedPages(); 554 } 555 return pages; 556 } 557 558 /**************************************************************************** 559 * 560 * Debugging / printing 561 */ 562 563 /** 564 * Print out the memory used by all spaces, in megabytes 565 */ 566 public static void printUsageMB() { 567 printUsage(MB); 568 } 569 570 /** 571 * Print out the memory used by all spaces, in megabytes 572 */ 573 public static void printUsagePages() { 574 printUsage(PAGES); 575 } 576 577 /** 578 * Print out a map of virtual memory useage by all spaces 579 */ 580 public static void printVMMap() { 581 Log.writeln("Key: (I)mmortal (N)onmoving (D)iscontiguous (E)xtent (F)raction"); 582 Log.write(" HEAP_START "); Log.writeln(HEAP_START); 583 Log.write("AVAILABLE_START "); Log.writeln(AVAILABLE_START); 584 for (int i = 0; i < spaceCount; i++) { 585 Space space = spaces[i]; 586 587 for (int s = 0; s < 11 - space.nameLength; s++) 588 Log.write(" "); 589 Log.write(space.name); Log.write(" "); 590 Log.write(space.immortal ? "I" : " "); 591 Log.write(space.movable ? " " : "N"); 592 593 if (space.contiguous) { 594 Log.write(" "); 595 Log.write(space.start); Log.write("->"); 596 Log.write(space.start.plus(space.extent.minus(1))); 597 if (space.vmRequest.type == VMRequest.REQUEST_EXTENT) { 598 Log.write(" E "); Log.write(space.vmRequest.extent); 599 } else if (space.vmRequest.type == VMRequest.REQUEST_FRACTION) { 600 Log.write(" F "); Log.write(space.vmRequest.frac); 601 } 602 Log.writeln(); 603 } else { 604 Log.write("D ["); 605 for (Address a = space.headDiscontiguousRegion; !a.isZero(); a = Map.getNextContiguousRegion(a)) { 606 Log.write(a); Log.write("->"); 607 Log.write(a.plus(Map.getContiguousRegionSize(a).minus(1))); 608 if (!Map.getNextContiguousRegion(a).isZero()) 609 Log.write(", "); 610 } 611 Log.writeln("]"); 612 } 613 } 614 Log.write(" AVAILABLE_END "); Log.writeln(AVAILABLE_END); 615 Log.write(" HEAP_END "); Log.writeln(HEAP_END); 616 } 617 618 /** 619 * Interface to use to implement the Visitor Pattern for Spaces. 620 */ 621 public interface SpaceVisitor { 622 void visit(Space s); 623 } 624 625 /** 626 * Implement the Visitor Pattern for Spaces. 627 * @param v The visitor to perform on each Space instance 628 */ 629 @Interruptible 630 public static void visitSpaces(SpaceVisitor v) { 631 for (int i = 0; i < spaceCount; i++) { 632 v.visit(spaces[i]); 633 } 634 } 635 636 637 /** 638 * Ensure that all MMTk spaces (all spaces aside from the VM space) 639 * are mapped. Demand zero map all of them if they are not already 640 * mapped. 641 */ 642 @Interruptible 643 public static void eagerlyMmapMMTkSpaces() { 644 eagerlyMmapMMTkContiguousSpaces(); 645 eagerlyMmapMMTkDiscontiguousSpaces(); 646 } 647 648 649 /** 650 * Ensure that all contiguous MMTk spaces are mapped. Demand zero map 651 * all of them if they are not already mapped. 652 */ 653 @Interruptible 654 public static void eagerlyMmapMMTkContiguousSpaces() { 655 for (int i = 0; i < spaceCount; i++) { 656 Space space = spaces[i]; 657 if (space != VM.memory.getVMSpace()) { 658 if (Options.verbose.getValue() > 2) { 659 Log.write("Mapping "); 660 Log.write(space.name); 661 Log.write(" "); 662 Log.write(space.start); 663 Log.write("->"); 664 Log.writeln(space.start.plus(space.extent.minus(1))); 665 } 666 Mmapper.ensureMapped(space.start, space.extent.toInt() >> LOG_BYTES_IN_PAGE); 667 } 668 } 669 } 670 671 /** 672 * Ensure that all discontiguous MMTk spaces are mapped. Demand zero map 673 * all of them if they are not already mapped. 674 */ 675 @Interruptible 676 public static void eagerlyMmapMMTkDiscontiguousSpaces() { 677 Address regionStart = Space.getDiscontigStart(); 678 Address regionEnd = Space.getDiscontigEnd(); 679 int pages = regionEnd.diff(regionStart).toInt() >> LOG_BYTES_IN_PAGE; 680 if (Options.verbose.getValue() > 2) { 681 Log.write("Mapping discontiguous spaces "); 682 Log.write(regionStart); 683 Log.write("->"); 684 Log.writeln(regionEnd.minus(1)); 685 } 686 Mmapper.ensureMapped(getDiscontigStart(), pages); 687 } 688 689 /** 690 * Print out the memory used by all spaces in either megabytes or 691 * pages. 692 * 693 * @param mode An enumeration type that specifies the format for the 694 * prining (PAGES, MB, PAGES_MB, or MB_PAGES). 695 */ 696 private static void printUsage(int mode) { 697 Log.write("used = "); 698 printPages(getPagesReserved(), mode); 699 boolean first = true; 700 for (int i = 0; i < spaceCount; i++) { 701 Space space = spaces[i]; 702 Log.write(first ? " = " : " + "); 703 first = false; 704 Log.write(space.name); Log.write(" "); 705 printPages(space.reservedPages(), mode); 706 } 707 Log.writeln(); 708 } 709 710 /** 711 * Print out the number of pages and or megabytes, depending on the mode. 712 * 713 * @param pages The number of pages 714 * @param mode An enumeration type that specifies the format for the 715 * printing (PAGES, MB, PAGES_MB, or MB_PAGES). 716 */ 717 private static void printPages(int pages, int mode) { 718 double mb = (double) (pages << LOG_BYTES_IN_PAGE) / (double) (1 << 20); 719 switch (mode) { 720 case PAGES: Log.write(pages); Log.write(" pgs"); break; 721 case MB: Log.write(mb); Log.write(" Mb"); break; 722 case PAGES_MB: Log.write(pages); Log.write(" pgs ("); Log.write(mb); Log.write(" Mb)"); break; 723 case MB_PAGES: Log.write(mb); Log.write(" Mb ("); Log.write(pages); Log.write(" pgs)"); break; 724 default: VM.assertions.fail("writePages passed illegal printing mode"); 725 } 726 } 727 728 /**************************************************************************** 729 * 730 * Miscellaneous 731 */ 732 733 /** 734 * Trace an object as part of a collection and return the object, 735 * which may have been forwarded (if a copying collector). 736 * 737 * @param trace The trace being conducted. 738 * @param object The object to trace 739 * @return The object, forwarded, if appropriate 740 */ 741 public abstract ObjectReference traceObject(TransitiveClosure trace, ObjectReference object); 742 743 744 /** 745 * Has the object in this space been reached during the current collection. 746 * This is used for GC Tracing. 747 * 748 * @param object The object reference. 749 * @return {@code true} if the object is reachable. 750 */ 751 public boolean isReachable(ObjectReference object) { 752 return isLive(object); 753 } 754 755 756 /** 757 * Is the object in this space alive? 758 * 759 * @param object The object reference. 760 * @return {@code true} if the object is live. 761 */ 762 public abstract boolean isLive(ObjectReference object); 763 764 /** 765 * Align an address to a space chunk 766 * 767 * @param addr The address to be aligned 768 * @param down If {@code true} the address will be rounded down, otherwise 769 * it will rounded up. 770 * @return The chunk-aligned address 771 */ 772 public static Address chunkAlign(Address addr, boolean down) { 773 if (!down) addr = addr.plus(BYTES_IN_CHUNK - 1); 774 return addr.toWord().rshl(LOG_BYTES_IN_CHUNK).lsh(LOG_BYTES_IN_CHUNK).toAddress(); 775 } 776 777 /** 778 * Align an extent to a space chunk 779 * 780 * @param bytes The extent to be aligned 781 * @param down If {@code true} the extent will be rounded down, otherwise 782 * it will rounded up. 783 * @return The chunk-aligned extent 784 */ 785 public static Extent chunkAlign(Extent bytes, boolean down) { 786 if (!down) bytes = bytes.plus(BYTES_IN_CHUNK - 1); 787 return bytes.toWord().rshl(LOG_BYTES_IN_CHUNK).lsh(LOG_BYTES_IN_CHUNK).toExtent(); 788 } 789 790 /** 791 * Convert a fraction into a number of bytes according to the 792 * fraction of available bytes. 793 * 794 * @param frac The fraction of available virtual memory desired 795 * @return The corresponding number of bytes, chunk-aligned. 796 */ 797 public static Extent getFracAvailable(float frac) { 798 long bytes = (long) (frac * AVAILABLE_BYTES.toLong()); 799 Word mb = Word.fromIntSignExtend((int) (bytes >> LOG_BYTES_IN_MBYTE)); 800 Extent rtn = mb.lsh(LOG_BYTES_IN_MBYTE).toExtent(); 801 return chunkAlign(rtn, false); 802 } 803 804 /** @return the actual number of spaces in the space array */ 805 public static int getSpaceCount() { 806 return spaceCount; 807 } 808 809 /** 810 * @return the spaces array. Note that the array is partially empty: 811 * use {@link #getSpaceCount()} to determine the maximum index that 812 * is still filled. 813 */ 814 public static Space[] getSpaces() { 815 return spaces; 816 } 817 818}