001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.activemq.store.kahadb; 018 019import java.io.ByteArrayInputStream; 020import java.io.ByteArrayOutputStream; 021import java.io.DataInput; 022import java.io.DataOutput; 023import java.io.EOFException; 024import java.io.File; 025import java.io.IOException; 026import java.io.InputStream; 027import java.io.InterruptedIOException; 028import java.io.ObjectInputStream; 029import java.io.ObjectOutputStream; 030import java.io.OutputStream; 031import java.util.ArrayList; 032import java.util.Arrays; 033import java.util.Collection; 034import java.util.Collections; 035import java.util.Date; 036import java.util.HashMap; 037import java.util.HashSet; 038import java.util.Iterator; 039import java.util.LinkedHashMap; 040import java.util.LinkedHashSet; 041import java.util.LinkedList; 042import java.util.List; 043import java.util.Map; 044import java.util.Map.Entry; 045import java.util.Set; 046import java.util.SortedSet; 047import java.util.TreeMap; 048import java.util.TreeSet; 049import java.util.concurrent.atomic.AtomicBoolean; 050import java.util.concurrent.atomic.AtomicLong; 051import java.util.concurrent.locks.ReentrantReadWriteLock; 052 053import org.apache.activemq.ActiveMQMessageAuditNoSync; 054import org.apache.activemq.broker.BrokerService; 055import org.apache.activemq.broker.BrokerServiceAware; 056import org.apache.activemq.command.MessageAck; 057import org.apache.activemq.command.TransactionId; 058import org.apache.activemq.openwire.OpenWireFormat; 059import org.apache.activemq.protobuf.Buffer; 060import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand; 061import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand; 062import org.apache.activemq.store.kahadb.data.KahaCommitCommand; 063import org.apache.activemq.store.kahadb.data.KahaDestination; 064import org.apache.activemq.store.kahadb.data.KahaEntryType; 065import org.apache.activemq.store.kahadb.data.KahaPrepareCommand; 066import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand; 067import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand; 068import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand; 069import org.apache.activemq.store.kahadb.data.KahaRollbackCommand; 070import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand; 071import org.apache.activemq.store.kahadb.data.KahaTraceCommand; 072import org.apache.activemq.store.kahadb.data.KahaTransactionInfo; 073import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand; 074import org.apache.activemq.store.kahadb.disk.index.BTreeIndex; 075import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor; 076import org.apache.activemq.store.kahadb.disk.index.ListIndex; 077import org.apache.activemq.store.kahadb.disk.journal.DataFile; 078import org.apache.activemq.store.kahadb.disk.journal.Journal; 079import org.apache.activemq.store.kahadb.disk.journal.Location; 080import org.apache.activemq.store.kahadb.disk.page.Page; 081import org.apache.activemq.store.kahadb.disk.page.PageFile; 082import org.apache.activemq.store.kahadb.disk.page.Transaction; 083import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller; 084import org.apache.activemq.store.kahadb.disk.util.LongMarshaller; 085import org.apache.activemq.store.kahadb.disk.util.Marshaller; 086import org.apache.activemq.store.kahadb.disk.util.Sequence; 087import org.apache.activemq.store.kahadb.disk.util.SequenceSet; 088import org.apache.activemq.store.kahadb.disk.util.StringMarshaller; 089import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller; 090import org.apache.activemq.util.ByteSequence; 091import org.apache.activemq.util.DataByteArrayInputStream; 092import org.apache.activemq.util.DataByteArrayOutputStream; 093import org.apache.activemq.util.IOHelper; 094import org.apache.activemq.util.ServiceStopper; 095import org.apache.activemq.util.ServiceSupport; 096import org.slf4j.Logger; 097import org.slf4j.LoggerFactory; 098 099public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware { 100 101 protected BrokerService brokerService; 102 103 public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME"; 104 public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0); 105 public static final File DEFAULT_DIRECTORY = new File("KahaDB"); 106 protected static final Buffer UNMATCHED; 107 static { 108 UNMATCHED = new Buffer(new byte[]{}); 109 } 110 private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class); 111 112 static final int CLOSED_STATE = 1; 113 static final int OPEN_STATE = 2; 114 static final long NOT_ACKED = -1; 115 116 static final int VERSION = 5; 117 118 protected class Metadata { 119 protected Page<Metadata> page; 120 protected int state; 121 protected BTreeIndex<String, StoredDestination> destinations; 122 protected Location lastUpdate; 123 protected Location firstInProgressTransactionLocation; 124 protected Location producerSequenceIdTrackerLocation = null; 125 protected Location ackMessageFileMapLocation = null; 126 protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync(); 127 protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<Integer, Set<Integer>>(); 128 protected int version = VERSION; 129 protected int openwireVersion = OpenWireFormat.DEFAULT_VERSION; 130 131 public void read(DataInput is) throws IOException { 132 state = is.readInt(); 133 destinations = new BTreeIndex<String, StoredDestination>(pageFile, is.readLong()); 134 if (is.readBoolean()) { 135 lastUpdate = LocationMarshaller.INSTANCE.readPayload(is); 136 } else { 137 lastUpdate = null; 138 } 139 if (is.readBoolean()) { 140 firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is); 141 } else { 142 firstInProgressTransactionLocation = null; 143 } 144 try { 145 if (is.readBoolean()) { 146 producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is); 147 } else { 148 producerSequenceIdTrackerLocation = null; 149 } 150 } catch (EOFException expectedOnUpgrade) { 151 } 152 try { 153 version = is.readInt(); 154 } catch (EOFException expectedOnUpgrade) { 155 version = 1; 156 } 157 if (version >= 5 && is.readBoolean()) { 158 ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is); 159 } else { 160 ackMessageFileMapLocation = null; 161 } 162 try { 163 openwireVersion = is.readInt(); 164 } catch (EOFException expectedOnUpgrade) { 165 openwireVersion = OpenWireFormat.DEFAULT_VERSION; 166 } 167 LOG.info("KahaDB is version " + version); 168 } 169 170 public void write(DataOutput os) throws IOException { 171 os.writeInt(state); 172 os.writeLong(destinations.getPageId()); 173 174 if (lastUpdate != null) { 175 os.writeBoolean(true); 176 LocationMarshaller.INSTANCE.writePayload(lastUpdate, os); 177 } else { 178 os.writeBoolean(false); 179 } 180 181 if (firstInProgressTransactionLocation != null) { 182 os.writeBoolean(true); 183 LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os); 184 } else { 185 os.writeBoolean(false); 186 } 187 188 if (producerSequenceIdTrackerLocation != null) { 189 os.writeBoolean(true); 190 LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os); 191 } else { 192 os.writeBoolean(false); 193 } 194 os.writeInt(VERSION); 195 if (ackMessageFileMapLocation != null) { 196 os.writeBoolean(true); 197 LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os); 198 } else { 199 os.writeBoolean(false); 200 } 201 os.writeInt(this.openwireVersion); 202 } 203 } 204 205 class MetadataMarshaller extends VariableMarshaller<Metadata> { 206 @Override 207 public Metadata readPayload(DataInput dataIn) throws IOException { 208 Metadata rc = createMetadata(); 209 rc.read(dataIn); 210 return rc; 211 } 212 213 @Override 214 public void writePayload(Metadata object, DataOutput dataOut) throws IOException { 215 object.write(dataOut); 216 } 217 } 218 219 protected PageFile pageFile; 220 protected Journal journal; 221 protected Metadata metadata = new Metadata(); 222 223 protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller(); 224 225 protected boolean failIfDatabaseIsLocked; 226 227 protected boolean deleteAllMessages; 228 protected File directory = DEFAULT_DIRECTORY; 229 protected File indexDirectory = null; 230 protected Thread checkpointThread; 231 protected boolean enableJournalDiskSyncs=true; 232 protected boolean archiveDataLogs; 233 protected File directoryArchive; 234 protected AtomicLong journalSize = new AtomicLong(0); 235 long checkpointInterval = 5*1000; 236 long cleanupInterval = 30*1000; 237 int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH; 238 int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE; 239 boolean enableIndexWriteAsync = false; 240 int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE; 241 private String preallocationScope = Journal.PreallocationScope.ENTIRE_JOURNAL.name(); 242 private String preallocationStrategy = Journal.PreallocationStrategy.SPARSE_FILE.name(); 243 244 protected AtomicBoolean opened = new AtomicBoolean(); 245 private boolean ignoreMissingJournalfiles = false; 246 private int indexCacheSize = 10000; 247 private boolean checkForCorruptJournalFiles = false; 248 private boolean checksumJournalFiles = true; 249 protected boolean forceRecoverIndex = false; 250 private final Object checkpointThreadLock = new Object(); 251 private boolean rewriteOnRedelivery = false; 252 private boolean archiveCorruptedIndex = false; 253 private boolean useIndexLFRUEviction = false; 254 private float indexLFUEvictionFactor = 0.2f; 255 private boolean enableIndexDiskSyncs = true; 256 private boolean enableIndexRecoveryFile = true; 257 private boolean enableIndexPageCaching = true; 258 ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock(); 259 260 @Override 261 public void doStart() throws Exception { 262 load(); 263 } 264 265 @Override 266 public void doStop(ServiceStopper stopper) throws Exception { 267 unload(); 268 } 269 270 private void loadPageFile() throws IOException { 271 this.indexLock.writeLock().lock(); 272 try { 273 final PageFile pageFile = getPageFile(); 274 pageFile.load(); 275 pageFile.tx().execute(new Transaction.Closure<IOException>() { 276 @Override 277 public void execute(Transaction tx) throws IOException { 278 if (pageFile.getPageCount() == 0) { 279 // First time this is created.. Initialize the metadata 280 Page<Metadata> page = tx.allocate(); 281 assert page.getPageId() == 0; 282 page.set(metadata); 283 metadata.page = page; 284 metadata.state = CLOSED_STATE; 285 metadata.destinations = new BTreeIndex<String, StoredDestination>(pageFile, tx.allocate().getPageId()); 286 287 tx.store(metadata.page, metadataMarshaller, true); 288 } else { 289 Page<Metadata> page = tx.load(0, metadataMarshaller); 290 metadata = page.get(); 291 metadata.page = page; 292 } 293 metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE); 294 metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller()); 295 metadata.destinations.load(tx); 296 } 297 }); 298 // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted. 299 // Perhaps we should just keep an index of file 300 storedDestinations.clear(); 301 pageFile.tx().execute(new Transaction.Closure<IOException>() { 302 @Override 303 public void execute(Transaction tx) throws IOException { 304 for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) { 305 Entry<String, StoredDestination> entry = iterator.next(); 306 StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null); 307 storedDestinations.put(entry.getKey(), sd); 308 309 if (checkForCorruptJournalFiles) { 310 // sanity check the index also 311 if (!entry.getValue().locationIndex.isEmpty(tx)) { 312 if (entry.getValue().orderIndex.nextMessageId <= 0) { 313 throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey()); 314 } 315 } 316 } 317 } 318 } 319 }); 320 pageFile.flush(); 321 } finally { 322 this.indexLock.writeLock().unlock(); 323 } 324 } 325 326 private void startCheckpoint() { 327 if (checkpointInterval == 0 && cleanupInterval == 0) { 328 LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart"); 329 return; 330 } 331 synchronized (checkpointThreadLock) { 332 boolean start = false; 333 if (checkpointThread == null) { 334 start = true; 335 } else if (!checkpointThread.isAlive()) { 336 start = true; 337 LOG.info("KahaDB: Recovering checkpoint thread after death"); 338 } 339 if (start) { 340 checkpointThread = new Thread("ActiveMQ Journal Checkpoint Worker") { 341 @Override 342 public void run() { 343 try { 344 long lastCleanup = System.currentTimeMillis(); 345 long lastCheckpoint = System.currentTimeMillis(); 346 // Sleep for a short time so we can periodically check 347 // to see if we need to exit this thread. 348 long sleepTime = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500); 349 while (opened.get()) { 350 Thread.sleep(sleepTime); 351 long now = System.currentTimeMillis(); 352 if( cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval) ) { 353 checkpointCleanup(true); 354 lastCleanup = now; 355 lastCheckpoint = now; 356 } else if( checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval )) { 357 checkpointCleanup(false); 358 lastCheckpoint = now; 359 } 360 } 361 } catch (InterruptedException e) { 362 // Looks like someone really wants us to exit this thread... 363 } catch (IOException ioe) { 364 LOG.error("Checkpoint failed", ioe); 365 brokerService.handleIOException(ioe); 366 } 367 } 368 }; 369 370 checkpointThread.setDaemon(true); 371 checkpointThread.start(); 372 } 373 } 374 } 375 376 public void open() throws IOException { 377 if( opened.compareAndSet(false, true) ) { 378 getJournal().start(); 379 try { 380 loadPageFile(); 381 } catch (Throwable t) { 382 LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t); 383 if (LOG.isDebugEnabled()) { 384 LOG.debug("Index load failure", t); 385 } 386 // try to recover index 387 try { 388 pageFile.unload(); 389 } catch (Exception ignore) {} 390 if (archiveCorruptedIndex) { 391 pageFile.archive(); 392 } else { 393 pageFile.delete(); 394 } 395 metadata = createMetadata(); 396 pageFile = null; 397 loadPageFile(); 398 } 399 startCheckpoint(); 400 recover(); 401 } 402 } 403 404 public void load() throws IOException { 405 this.indexLock.writeLock().lock(); 406 IOHelper.mkdirs(directory); 407 try { 408 if (deleteAllMessages) { 409 getJournal().start(); 410 getJournal().delete(); 411 getJournal().close(); 412 journal = null; 413 getPageFile().delete(); 414 LOG.info("Persistence store purged."); 415 deleteAllMessages = false; 416 } 417 418 open(); 419 store(new KahaTraceCommand().setMessage("LOADED " + new Date())); 420 } finally { 421 this.indexLock.writeLock().unlock(); 422 } 423 } 424 425 public void close() throws IOException, InterruptedException { 426 if( opened.compareAndSet(true, false)) { 427 checkpointLock.writeLock().lock(); 428 try { 429 if (metadata.page != null) { 430 checkpointUpdate(true); 431 } 432 pageFile.unload(); 433 metadata = createMetadata(); 434 } finally { 435 checkpointLock.writeLock().unlock(); 436 } 437 journal.close(); 438 synchronized (checkpointThreadLock) { 439 if (checkpointThread != null) { 440 checkpointThread.join(); 441 } 442 } 443 } 444 } 445 446 public void unload() throws IOException, InterruptedException { 447 this.indexLock.writeLock().lock(); 448 try { 449 if( pageFile != null && pageFile.isLoaded() ) { 450 metadata.state = CLOSED_STATE; 451 metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0]; 452 453 if (metadata.page != null) { 454 pageFile.tx().execute(new Transaction.Closure<IOException>() { 455 @Override 456 public void execute(Transaction tx) throws IOException { 457 tx.store(metadata.page, metadataMarshaller, true); 458 } 459 }); 460 } 461 } 462 } finally { 463 this.indexLock.writeLock().unlock(); 464 } 465 close(); 466 } 467 468 // public for testing 469 @SuppressWarnings("rawtypes") 470 public Location[] getInProgressTxLocationRange() { 471 Location[] range = new Location[]{null, null}; 472 synchronized (inflightTransactions) { 473 if (!inflightTransactions.isEmpty()) { 474 for (List<Operation> ops : inflightTransactions.values()) { 475 if (!ops.isEmpty()) { 476 trackMaxAndMin(range, ops); 477 } 478 } 479 } 480 if (!preparedTransactions.isEmpty()) { 481 for (List<Operation> ops : preparedTransactions.values()) { 482 if (!ops.isEmpty()) { 483 trackMaxAndMin(range, ops); 484 } 485 } 486 } 487 } 488 return range; 489 } 490 491 @SuppressWarnings("rawtypes") 492 private void trackMaxAndMin(Location[] range, List<Operation> ops) { 493 Location t = ops.get(0).getLocation(); 494 if (range[0]==null || t.compareTo(range[0]) <= 0) { 495 range[0] = t; 496 } 497 t = ops.get(ops.size() -1).getLocation(); 498 if (range[1]==null || t.compareTo(range[1]) >= 0) { 499 range[1] = t; 500 } 501 } 502 503 class TranInfo { 504 TransactionId id; 505 Location location; 506 507 class opCount { 508 int add; 509 int remove; 510 } 511 HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<KahaDestination, opCount>(); 512 513 @SuppressWarnings("rawtypes") 514 public void track(Operation operation) { 515 if (location == null ) { 516 location = operation.getLocation(); 517 } 518 KahaDestination destination; 519 boolean isAdd = false; 520 if (operation instanceof AddOperation) { 521 AddOperation add = (AddOperation) operation; 522 destination = add.getCommand().getDestination(); 523 isAdd = true; 524 } else { 525 RemoveOperation removeOpperation = (RemoveOperation) operation; 526 destination = removeOpperation.getCommand().getDestination(); 527 } 528 opCount opCount = destinationOpCount.get(destination); 529 if (opCount == null) { 530 opCount = new opCount(); 531 destinationOpCount.put(destination, opCount); 532 } 533 if (isAdd) { 534 opCount.add++; 535 } else { 536 opCount.remove++; 537 } 538 } 539 540 @Override 541 public String toString() { 542 StringBuffer buffer = new StringBuffer(); 543 buffer.append(location).append(";").append(id).append(";\n"); 544 for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) { 545 buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';'); 546 } 547 return buffer.toString(); 548 } 549 } 550 551 @SuppressWarnings("rawtypes") 552 public String getTransactions() { 553 554 ArrayList<TranInfo> infos = new ArrayList<TranInfo>(); 555 synchronized (inflightTransactions) { 556 if (!inflightTransactions.isEmpty()) { 557 for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) { 558 TranInfo info = new TranInfo(); 559 info.id = entry.getKey(); 560 for (Operation operation : entry.getValue()) { 561 info.track(operation); 562 } 563 infos.add(info); 564 } 565 } 566 } 567 synchronized (preparedTransactions) { 568 if (!preparedTransactions.isEmpty()) { 569 for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) { 570 TranInfo info = new TranInfo(); 571 info.id = entry.getKey(); 572 for (Operation operation : entry.getValue()) { 573 info.track(operation); 574 } 575 infos.add(info); 576 } 577 } 578 } 579 return infos.toString(); 580 } 581 582 /** 583 * Move all the messages that were in the journal into long term storage. We 584 * just replay and do a checkpoint. 585 * 586 * @throws IOException 587 * @throws IOException 588 * @throws IllegalStateException 589 */ 590 private void recover() throws IllegalStateException, IOException { 591 this.indexLock.writeLock().lock(); 592 try { 593 594 long start = System.currentTimeMillis(); 595 Location producerAuditPosition = recoverProducerAudit(); 596 Location ackMessageFileLocation = recoverAckMessageFileMap(); 597 Location lastIndoubtPosition = getRecoveryPosition(); 598 599 Location recoveryPosition = minimum(producerAuditPosition, ackMessageFileLocation); 600 recoveryPosition = minimum(recoveryPosition, lastIndoubtPosition); 601 602 if (recoveryPosition != null) { 603 int redoCounter = 0; 604 int dataFileRotationTracker = recoveryPosition.getDataFileId(); 605 LOG.info("Recovering from the journal @" + recoveryPosition); 606 while (recoveryPosition != null) { 607 try { 608 JournalCommand<?> message = load(recoveryPosition); 609 metadata.lastUpdate = recoveryPosition; 610 process(message, recoveryPosition, lastIndoubtPosition); 611 redoCounter++; 612 } catch (IOException failedRecovery) { 613 if (isIgnoreMissingJournalfiles()) { 614 LOG.debug("Failed to recover data at position:" + recoveryPosition, failedRecovery); 615 // track this dud location 616 journal.corruptRecoveryLocation(recoveryPosition); 617 } else { 618 throw new IOException("Failed to recover data at position:" + recoveryPosition, failedRecovery); 619 } 620 } 621 recoveryPosition = journal.getNextLocation(recoveryPosition); 622 // hold on to the minimum number of open files during recovery 623 if (recoveryPosition != null && dataFileRotationTracker != recoveryPosition.getDataFileId()) { 624 dataFileRotationTracker = recoveryPosition.getDataFileId(); 625 journal.cleanup(); 626 } 627 if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) { 628 LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered .."); 629 } 630 } 631 if (LOG.isInfoEnabled()) { 632 long end = System.currentTimeMillis(); 633 LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds."); 634 } 635 } 636 637 // We may have to undo some index updates. 638 pageFile.tx().execute(new Transaction.Closure<IOException>() { 639 @Override 640 public void execute(Transaction tx) throws IOException { 641 recoverIndex(tx); 642 } 643 }); 644 645 // rollback any recovered inflight local transactions, and discard any inflight XA transactions. 646 Set<TransactionId> toRollback = new HashSet<TransactionId>(); 647 Set<TransactionId> toDiscard = new HashSet<TransactionId>(); 648 synchronized (inflightTransactions) { 649 for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) { 650 TransactionId id = it.next(); 651 if (id.isLocalTransaction()) { 652 toRollback.add(id); 653 } else { 654 toDiscard.add(id); 655 } 656 } 657 for (TransactionId tx: toRollback) { 658 if (LOG.isDebugEnabled()) { 659 LOG.debug("rolling back recovered indoubt local transaction " + tx); 660 } 661 store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null); 662 } 663 for (TransactionId tx: toDiscard) { 664 if (LOG.isDebugEnabled()) { 665 LOG.debug("discarding recovered in-flight XA transaction " + tx); 666 } 667 inflightTransactions.remove(tx); 668 } 669 } 670 671 synchronized (preparedTransactions) { 672 for (TransactionId txId : preparedTransactions.keySet()) { 673 LOG.warn("Recovered prepared XA TX: [{}]", txId); 674 } 675 } 676 677 } finally { 678 this.indexLock.writeLock().unlock(); 679 } 680 } 681 682 @SuppressWarnings("unused") 683 private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) { 684 return TransactionIdConversion.convertToLocal(tx); 685 } 686 687 private Location minimum(Location producerAuditPosition, 688 Location lastIndoubtPosition) { 689 Location min = null; 690 if (producerAuditPosition != null) { 691 min = producerAuditPosition; 692 if (lastIndoubtPosition != null && lastIndoubtPosition.compareTo(producerAuditPosition) < 0) { 693 min = lastIndoubtPosition; 694 } 695 } else { 696 min = lastIndoubtPosition; 697 } 698 return min; 699 } 700 701 private Location recoverProducerAudit() throws IOException { 702 if (metadata.producerSequenceIdTrackerLocation != null) { 703 try { 704 KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation); 705 ObjectInputStream objectIn = new ObjectInputStream(audit.getAudit().newInput()); 706 int maxNumProducers = getMaxFailoverProducersToTrack(); 707 int maxAuditDepth = getFailoverProducersAuditDepth(); 708 metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject(); 709 metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth); 710 metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers); 711 return journal.getNextLocation(metadata.producerSequenceIdTrackerLocation); 712 } catch (Exception e) { 713 LOG.warn("Cannot recover message audit", e); 714 return journal.getNextLocation(null); 715 } 716 } else { 717 // got no audit stored so got to recreate via replay from start of the journal 718 return journal.getNextLocation(null); 719 } 720 } 721 722 @SuppressWarnings("unchecked") 723 private Location recoverAckMessageFileMap() throws IOException { 724 if (metadata.ackMessageFileMapLocation != null) { 725 try { 726 KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation); 727 ObjectInputStream objectIn = new ObjectInputStream(audit.getAckMessageFileMap().newInput()); 728 metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject(); 729 return journal.getNextLocation(metadata.ackMessageFileMapLocation); 730 } catch (Exception e) { 731 LOG.warn("Cannot recover ackMessageFileMap", e); 732 return journal.getNextLocation(null); 733 } 734 } else { 735 // got no ackMessageFileMap stored so got to recreate via replay from start of the journal 736 return journal.getNextLocation(null); 737 } 738 } 739 740 protected void recoverIndex(Transaction tx) throws IOException { 741 long start = System.currentTimeMillis(); 742 // It is possible index updates got applied before the journal updates.. 743 // in that case we need to removed references to messages that are not in the journal 744 final Location lastAppendLocation = journal.getLastAppendLocation(); 745 long undoCounter=0; 746 747 // Go through all the destinations to see if they have messages past the lastAppendLocation 748 for (StoredDestination sd : storedDestinations.values()) { 749 750 final ArrayList<Long> matches = new ArrayList<Long>(); 751 // Find all the Locations that are >= than the last Append Location. 752 sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) { 753 @Override 754 protected void matched(Location key, Long value) { 755 matches.add(value); 756 } 757 }); 758 759 for (Long sequenceId : matches) { 760 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 761 sd.locationIndex.remove(tx, keys.location); 762 sd.messageIdIndex.remove(tx, keys.messageId); 763 metadata.producerSequenceIdTracker.rollback(keys.messageId); 764 undoCounter++; 765 // TODO: do we need to modify the ack positions for the pub sub case? 766 } 767 } 768 769 if( undoCounter > 0 ) { 770 // The rolledback operations are basically in flight journal writes. To avoid getting 771 // these the end user should do sync writes to the journal. 772 if (LOG.isInfoEnabled()) { 773 long end = System.currentTimeMillis(); 774 LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 775 } 776 } 777 778 undoCounter = 0; 779 start = System.currentTimeMillis(); 780 781 // Lets be extra paranoid here and verify that all the datafiles being referenced 782 // by the indexes still exists. 783 784 final SequenceSet ss = new SequenceSet(); 785 for (StoredDestination sd : storedDestinations.values()) { 786 // Use a visitor to cut down the number of pages that we load 787 sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 788 int last=-1; 789 790 @Override 791 public boolean isInterestedInKeysBetween(Location first, Location second) { 792 if( first==null ) { 793 return !ss.contains(0, second.getDataFileId()); 794 } else if( second==null ) { 795 return true; 796 } else { 797 return !ss.contains(first.getDataFileId(), second.getDataFileId()); 798 } 799 } 800 801 @Override 802 public void visit(List<Location> keys, List<Long> values) { 803 for (Location l : keys) { 804 int fileId = l.getDataFileId(); 805 if( last != fileId ) { 806 ss.add(fileId); 807 last = fileId; 808 } 809 } 810 } 811 812 }); 813 } 814 HashSet<Integer> missingJournalFiles = new HashSet<Integer>(); 815 while (!ss.isEmpty()) { 816 missingJournalFiles.add((int) ss.removeFirst()); 817 } 818 819 for (Entry<Integer, Set<Integer>> entry : metadata.ackMessageFileMap.entrySet()) { 820 missingJournalFiles.add(entry.getKey()); 821 for (Integer i : entry.getValue()) { 822 missingJournalFiles.add(i); 823 } 824 } 825 826 missingJournalFiles.removeAll(journal.getFileMap().keySet()); 827 828 if (!missingJournalFiles.isEmpty()) { 829 LOG.warn("Some journal files are missing: " + missingJournalFiles); 830 } 831 832 ArrayList<BTreeVisitor.Predicate<Location>> knownCorruption = new ArrayList<BTreeVisitor.Predicate<Location>>(); 833 ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<BTreeVisitor.Predicate<Location>>(); 834 for (Integer missing : missingJournalFiles) { 835 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0))); 836 } 837 838 if (checkForCorruptJournalFiles) { 839 Collection<DataFile> dataFiles = journal.getFileMap().values(); 840 for (DataFile dataFile : dataFiles) { 841 int id = dataFile.getDataFileId(); 842 // eof to next file id 843 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0))); 844 Sequence seq = dataFile.getCorruptedBlocks().getHead(); 845 while (seq != null) { 846 BTreeVisitor.BetweenVisitor visitor = new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1)); 847 missingPredicates.add(visitor); 848 knownCorruption.add(visitor); 849 seq = seq.getNext(); 850 } 851 } 852 } 853 854 if (!missingPredicates.isEmpty()) { 855 for (Entry<String, StoredDestination> sdEntry : storedDestinations.entrySet()) { 856 final StoredDestination sd = sdEntry.getValue(); 857 final LinkedHashMap<Long, Location> matches = new LinkedHashMap<Long, Location>(); 858 sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) { 859 @Override 860 protected void matched(Location key, Long value) { 861 matches.put(value, key); 862 } 863 }); 864 865 // If some message references are affected by the missing data files... 866 if (!matches.isEmpty()) { 867 868 // We either 'gracefully' recover dropping the missing messages or 869 // we error out. 870 if( ignoreMissingJournalfiles ) { 871 // Update the index to remove the references to the missing data 872 for (Long sequenceId : matches.keySet()) { 873 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 874 sd.locationIndex.remove(tx, keys.location); 875 sd.messageIdIndex.remove(tx, keys.messageId); 876 LOG.info("[" + sdEntry.getKey() + "] dropped: " + keys.messageId + " at corrupt location: " + keys.location); 877 undoCounter++; 878 // TODO: do we need to modify the ack positions for the pub sub case? 879 } 880 } else { 881 LOG.error("[" + sdEntry.getKey() + "] references corrupt locations: " + matches); 882 throw new IOException("Detected missing/corrupt journal files referenced by:[" + sdEntry.getKey() + "] " +matches.size()+" messages affected."); 883 } 884 } 885 } 886 } 887 888 if (!ignoreMissingJournalfiles) { 889 if (!knownCorruption.isEmpty()) { 890 LOG.error("Detected corrupt journal files. " + knownCorruption); 891 throw new IOException("Detected corrupt journal files. " + knownCorruption); 892 } 893 894 if (!missingJournalFiles.isEmpty()) { 895 LOG.error("Detected missing journal files. " + missingJournalFiles); 896 throw new IOException("Detected missing journal files. " + missingJournalFiles); 897 } 898 } 899 900 if( undoCounter > 0 ) { 901 // The rolledback operations are basically in flight journal writes. To avoid getting these the end user 902 // should do sync writes to the journal. 903 if (LOG.isInfoEnabled()) { 904 long end = System.currentTimeMillis(); 905 LOG.info("Detected missing/corrupt journal files. Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 906 } 907 } 908 } 909 910 private Location nextRecoveryPosition; 911 private Location lastRecoveryPosition; 912 913 public void incrementalRecover() throws IOException { 914 this.indexLock.writeLock().lock(); 915 try { 916 if( nextRecoveryPosition == null ) { 917 if( lastRecoveryPosition==null ) { 918 nextRecoveryPosition = getRecoveryPosition(); 919 } else { 920 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 921 } 922 } 923 while (nextRecoveryPosition != null) { 924 lastRecoveryPosition = nextRecoveryPosition; 925 metadata.lastUpdate = lastRecoveryPosition; 926 JournalCommand<?> message = load(lastRecoveryPosition); 927 process(message, lastRecoveryPosition, (IndexAware) null); 928 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 929 } 930 } finally { 931 this.indexLock.writeLock().unlock(); 932 } 933 } 934 935 public Location getLastUpdatePosition() throws IOException { 936 return metadata.lastUpdate; 937 } 938 939 private Location getRecoveryPosition() throws IOException { 940 941 if (!this.forceRecoverIndex) { 942 943 // If we need to recover the transactions.. 944 if (metadata.firstInProgressTransactionLocation != null) { 945 return metadata.firstInProgressTransactionLocation; 946 } 947 948 // Perhaps there were no transactions... 949 if( metadata.lastUpdate!=null) { 950 // Start replay at the record after the last one recorded in the index file. 951 return journal.getNextLocation(metadata.lastUpdate); 952 } 953 } 954 // This loads the first position. 955 return journal.getNextLocation(null); 956 } 957 958 protected void checkpointCleanup(final boolean cleanup) throws IOException { 959 long start; 960 this.indexLock.writeLock().lock(); 961 try { 962 start = System.currentTimeMillis(); 963 if( !opened.get() ) { 964 return; 965 } 966 } finally { 967 this.indexLock.writeLock().unlock(); 968 } 969 checkpointUpdate(cleanup); 970 long end = System.currentTimeMillis(); 971 if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) { 972 if (LOG.isInfoEnabled()) { 973 LOG.info("Slow KahaDB access: cleanup took " + (end - start)); 974 } 975 } 976 } 977 978 public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException { 979 int size = data.serializedSizeFramed(); 980 DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1); 981 os.writeByte(data.type().getNumber()); 982 data.writeFramed(os); 983 return os.toByteSequence(); 984 } 985 986 // ///////////////////////////////////////////////////////////////// 987 // Methods call by the broker to update and query the store. 988 // ///////////////////////////////////////////////////////////////// 989 public Location store(JournalCommand<?> data) throws IOException { 990 return store(data, false, null,null); 991 } 992 993 public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException { 994 return store(data, false, null, null, onJournalStoreComplete); 995 } 996 997 public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException { 998 return store(data, sync, before, after, null); 999 } 1000 1001 /** 1002 * All updated are are funneled through this method. The updates are converted 1003 * to a JournalMessage which is logged to the journal and then the data from 1004 * the JournalMessage is used to update the index just like it would be done 1005 * during a recovery process. 1006 */ 1007 public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException { 1008 try { 1009 ByteSequence sequence = toByteSequence(data); 1010 1011 Location location; 1012 checkpointLock.readLock().lock(); 1013 try { 1014 1015 long start = System.currentTimeMillis(); 1016 location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ; 1017 long start2 = System.currentTimeMillis(); 1018 process(data, location, before); 1019 1020 long end = System.currentTimeMillis(); 1021 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 1022 if (LOG.isInfoEnabled()) { 1023 LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms"); 1024 } 1025 } 1026 1027 } finally{ 1028 checkpointLock.readLock().unlock(); 1029 } 1030 if (after != null) { 1031 after.run(); 1032 } 1033 1034 if (checkpointThread != null && !checkpointThread.isAlive() && opened.get()) { 1035 startCheckpoint(); 1036 } 1037 return location; 1038 } catch (IOException ioe) { 1039 LOG.error("KahaDB failed to store to Journal, command of type: " + data.type(), ioe); 1040 brokerService.handleIOException(ioe); 1041 throw ioe; 1042 } 1043 } 1044 1045 /** 1046 * Loads a previously stored JournalMessage 1047 * 1048 * @param location 1049 * @return 1050 * @throws IOException 1051 */ 1052 public JournalCommand<?> load(Location location) throws IOException { 1053 long start = System.currentTimeMillis(); 1054 ByteSequence data = journal.read(location); 1055 long end = System.currentTimeMillis(); 1056 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 1057 if (LOG.isInfoEnabled()) { 1058 LOG.info("Slow KahaDB access: Journal read took: "+(end-start)+" ms"); 1059 } 1060 } 1061 DataByteArrayInputStream is = new DataByteArrayInputStream(data); 1062 byte readByte = is.readByte(); 1063 KahaEntryType type = KahaEntryType.valueOf(readByte); 1064 if( type == null ) { 1065 try { 1066 is.close(); 1067 } catch (IOException e) {} 1068 throw new IOException("Could not load journal record. Invalid location: "+location); 1069 } 1070 JournalCommand<?> message = (JournalCommand<?>)type.createMessage(); 1071 message.mergeFramed(is); 1072 return message; 1073 } 1074 1075 /** 1076 * do minimal recovery till we reach the last inDoubtLocation 1077 * @param data 1078 * @param location 1079 * @param inDoubtlocation 1080 * @throws IOException 1081 */ 1082 void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException { 1083 if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) { 1084 process(data, location, (IndexAware) null); 1085 } else { 1086 // just recover producer audit 1087 data.visit(new Visitor() { 1088 @Override 1089 public void visit(KahaAddMessageCommand command) throws IOException { 1090 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1091 } 1092 }); 1093 } 1094 } 1095 1096 // ///////////////////////////////////////////////////////////////// 1097 // Journaled record processing methods. Once the record is journaled, 1098 // these methods handle applying the index updates. These may be called 1099 // from the recovery method too so they need to be idempotent 1100 // ///////////////////////////////////////////////////////////////// 1101 1102 void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException { 1103 data.visit(new Visitor() { 1104 @Override 1105 public void visit(KahaAddMessageCommand command) throws IOException { 1106 process(command, location, onSequenceAssignedCallback); 1107 } 1108 1109 @Override 1110 public void visit(KahaRemoveMessageCommand command) throws IOException { 1111 process(command, location); 1112 } 1113 1114 @Override 1115 public void visit(KahaPrepareCommand command) throws IOException { 1116 process(command, location); 1117 } 1118 1119 @Override 1120 public void visit(KahaCommitCommand command) throws IOException { 1121 process(command, location, onSequenceAssignedCallback); 1122 } 1123 1124 @Override 1125 public void visit(KahaRollbackCommand command) throws IOException { 1126 process(command, location); 1127 } 1128 1129 @Override 1130 public void visit(KahaRemoveDestinationCommand command) throws IOException { 1131 process(command, location); 1132 } 1133 1134 @Override 1135 public void visit(KahaSubscriptionCommand command) throws IOException { 1136 process(command, location); 1137 } 1138 1139 @Override 1140 public void visit(KahaProducerAuditCommand command) throws IOException { 1141 processLocation(location); 1142 } 1143 1144 @Override 1145 public void visit(KahaAckMessageFileMapCommand command) throws IOException { 1146 processLocation(location); 1147 } 1148 1149 @Override 1150 public void visit(KahaTraceCommand command) { 1151 processLocation(location); 1152 } 1153 1154 @Override 1155 public void visit(KahaUpdateMessageCommand command) throws IOException { 1156 process(command, location); 1157 } 1158 }); 1159 } 1160 1161 @SuppressWarnings("rawtypes") 1162 protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException { 1163 if (command.hasTransactionInfo()) { 1164 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1165 inflightTx.add(new AddOperation(command, location, runWithIndexLock)); 1166 } else { 1167 this.indexLock.writeLock().lock(); 1168 try { 1169 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1170 @Override 1171 public void execute(Transaction tx) throws IOException { 1172 long assignedIndex = updateIndex(tx, command, location); 1173 if (runWithIndexLock != null) { 1174 runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex); 1175 } 1176 } 1177 }); 1178 1179 } finally { 1180 this.indexLock.writeLock().unlock(); 1181 } 1182 } 1183 } 1184 1185 @SuppressWarnings("rawtypes") 1186 protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException { 1187 this.indexLock.writeLock().lock(); 1188 try { 1189 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1190 @Override 1191 public void execute(Transaction tx) throws IOException { 1192 updateIndex(tx, command, location); 1193 } 1194 }); 1195 } finally { 1196 this.indexLock.writeLock().unlock(); 1197 } 1198 } 1199 1200 @SuppressWarnings("rawtypes") 1201 protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException { 1202 if (command.hasTransactionInfo()) { 1203 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1204 inflightTx.add(new RemoveOperation(command, location)); 1205 } else { 1206 this.indexLock.writeLock().lock(); 1207 try { 1208 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1209 @Override 1210 public void execute(Transaction tx) throws IOException { 1211 updateIndex(tx, command, location); 1212 } 1213 }); 1214 } finally { 1215 this.indexLock.writeLock().unlock(); 1216 } 1217 } 1218 } 1219 1220 protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException { 1221 this.indexLock.writeLock().lock(); 1222 try { 1223 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1224 @Override 1225 public void execute(Transaction tx) throws IOException { 1226 updateIndex(tx, command, location); 1227 } 1228 }); 1229 } finally { 1230 this.indexLock.writeLock().unlock(); 1231 } 1232 } 1233 1234 protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException { 1235 this.indexLock.writeLock().lock(); 1236 try { 1237 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1238 @Override 1239 public void execute(Transaction tx) throws IOException { 1240 updateIndex(tx, command, location); 1241 } 1242 }); 1243 } finally { 1244 this.indexLock.writeLock().unlock(); 1245 } 1246 } 1247 1248 protected void processLocation(final Location location) { 1249 this.indexLock.writeLock().lock(); 1250 try { 1251 metadata.lastUpdate = location; 1252 } finally { 1253 this.indexLock.writeLock().unlock(); 1254 } 1255 } 1256 1257 @SuppressWarnings("rawtypes") 1258 protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException { 1259 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1260 List<Operation> inflightTx; 1261 synchronized (inflightTransactions) { 1262 inflightTx = inflightTransactions.remove(key); 1263 if (inflightTx == null) { 1264 inflightTx = preparedTransactions.remove(key); 1265 } 1266 } 1267 if (inflightTx == null) { 1268 // only non persistent messages in this tx 1269 if (before != null) { 1270 before.sequenceAssignedWithIndexLocked(-1); 1271 } 1272 return; 1273 } 1274 1275 final List<Operation> messagingTx = inflightTx; 1276 indexLock.writeLock().lock(); 1277 try { 1278 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1279 @Override 1280 public void execute(Transaction tx) throws IOException { 1281 for (Operation op : messagingTx) { 1282 op.execute(tx); 1283 } 1284 } 1285 }); 1286 metadata.lastUpdate = location; 1287 } finally { 1288 indexLock.writeLock().unlock(); 1289 } 1290 } 1291 1292 @SuppressWarnings("rawtypes") 1293 protected void process(KahaPrepareCommand command, Location location) { 1294 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1295 synchronized (inflightTransactions) { 1296 List<Operation> tx = inflightTransactions.remove(key); 1297 if (tx != null) { 1298 preparedTransactions.put(key, tx); 1299 } 1300 } 1301 } 1302 1303 @SuppressWarnings("rawtypes") 1304 protected void process(KahaRollbackCommand command, Location location) throws IOException { 1305 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1306 List<Operation> updates = null; 1307 synchronized (inflightTransactions) { 1308 updates = inflightTransactions.remove(key); 1309 if (updates == null) { 1310 updates = preparedTransactions.remove(key); 1311 } 1312 } 1313 } 1314 1315 // ///////////////////////////////////////////////////////////////// 1316 // These methods do the actual index updates. 1317 // ///////////////////////////////////////////////////////////////// 1318 1319 protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock(); 1320 private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<Integer>(); 1321 1322 long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException { 1323 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1324 1325 // Skip adding the message to the index if this is a topic and there are 1326 // no subscriptions. 1327 if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) { 1328 return -1; 1329 } 1330 1331 // Add the message. 1332 int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY; 1333 long id = sd.orderIndex.getNextMessageId(priority); 1334 Long previous = sd.locationIndex.put(tx, location, id); 1335 if (previous == null) { 1336 previous = sd.messageIdIndex.put(tx, command.getMessageId(), id); 1337 if (previous == null) { 1338 sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location)); 1339 if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) { 1340 addAckLocationForNewMessage(tx, sd, id); 1341 } 1342 metadata.lastUpdate = location; 1343 } else { 1344 1345 MessageKeys messageKeys = sd.orderIndex.get(tx, previous); 1346 if (messageKeys != null && messageKeys.location.compareTo(location) < 0) { 1347 // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt 1348 LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1349 } 1350 sd.messageIdIndex.put(tx, command.getMessageId(), previous); 1351 sd.locationIndex.remove(tx, location); 1352 id = -1; 1353 } 1354 } else { 1355 // restore the previous value.. Looks like this was a redo of a previously 1356 // added message. We don't want to assign it a new id as the other indexes would 1357 // be wrong.. 1358 sd.locationIndex.put(tx, location, previous); 1359 metadata.lastUpdate = location; 1360 } 1361 // record this id in any event, initial send or recovery 1362 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1363 return id; 1364 } 1365 1366 void trackPendingAdd(KahaDestination destination, Long seq) { 1367 StoredDestination sd = storedDestinations.get(key(destination)); 1368 if (sd != null) { 1369 sd.trackPendingAdd(seq); 1370 } 1371 } 1372 1373 void trackPendingAddComplete(KahaDestination destination, Long seq) { 1374 StoredDestination sd = storedDestinations.get(key(destination)); 1375 if (sd != null) { 1376 sd.trackPendingAddComplete(seq); 1377 } 1378 } 1379 1380 void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException { 1381 KahaAddMessageCommand command = updateMessageCommand.getMessage(); 1382 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1383 1384 Long id = sd.messageIdIndex.get(tx, command.getMessageId()); 1385 if (id != null) { 1386 MessageKeys previousKeys = sd.orderIndex.put( 1387 tx, 1388 command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY, 1389 id, 1390 new MessageKeys(command.getMessageId(), location) 1391 ); 1392 sd.locationIndex.put(tx, location, id); 1393 // on first update previous is original location, on recovery/replay it may be the updated location 1394 if(previousKeys != null && !previousKeys.location.equals(location)) { 1395 sd.locationIndex.remove(tx, previousKeys.location); 1396 } 1397 metadata.lastUpdate = location; 1398 } else { 1399 LOG.warn("Non existent message update attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1400 } 1401 } 1402 1403 void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException { 1404 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1405 if (!command.hasSubscriptionKey()) { 1406 1407 // In the queue case we just remove the message from the index.. 1408 Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId()); 1409 if (sequenceId != null) { 1410 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 1411 if (keys != null) { 1412 sd.locationIndex.remove(tx, keys.location); 1413 recordAckMessageReferenceLocation(ackLocation, keys.location); 1414 metadata.lastUpdate = ackLocation; 1415 } else if (LOG.isDebugEnabled()) { 1416 LOG.debug("message not found in order index: " + sequenceId + " for: " + command.getMessageId()); 1417 } 1418 } else if (LOG.isDebugEnabled()) { 1419 LOG.debug("message not found in sequence id index: " + command.getMessageId()); 1420 } 1421 } else { 1422 // In the topic case we need remove the message once it's been acked 1423 // by all the subs 1424 Long sequence = sd.messageIdIndex.get(tx, command.getMessageId()); 1425 1426 // Make sure it's a valid message id... 1427 if (sequence != null) { 1428 String subscriptionKey = command.getSubscriptionKey(); 1429 if (command.getAck() != UNMATCHED) { 1430 sd.orderIndex.get(tx, sequence); 1431 byte priority = sd.orderIndex.lastGetPriority(); 1432 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority)); 1433 } 1434 1435 MessageKeys keys = sd.orderIndex.get(tx, sequence); 1436 if (keys != null) { 1437 recordAckMessageReferenceLocation(ackLocation, keys.location); 1438 } 1439 // The following method handles deleting un-referenced messages. 1440 removeAckLocation(tx, sd, subscriptionKey, sequence); 1441 metadata.lastUpdate = ackLocation; 1442 } else if (LOG.isDebugEnabled()) { 1443 LOG.debug("no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey()); 1444 } 1445 1446 } 1447 } 1448 1449 private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) { 1450 Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId())); 1451 if (referenceFileIds == null) { 1452 referenceFileIds = new HashSet<Integer>(); 1453 referenceFileIds.add(messageLocation.getDataFileId()); 1454 metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds); 1455 } else { 1456 Integer id = Integer.valueOf(messageLocation.getDataFileId()); 1457 if (!referenceFileIds.contains(id)) { 1458 referenceFileIds.add(id); 1459 } 1460 } 1461 } 1462 1463 void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException { 1464 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1465 sd.orderIndex.remove(tx); 1466 1467 sd.locationIndex.clear(tx); 1468 sd.locationIndex.unload(tx); 1469 tx.free(sd.locationIndex.getPageId()); 1470 1471 sd.messageIdIndex.clear(tx); 1472 sd.messageIdIndex.unload(tx); 1473 tx.free(sd.messageIdIndex.getPageId()); 1474 1475 if (sd.subscriptions != null) { 1476 sd.subscriptions.clear(tx); 1477 sd.subscriptions.unload(tx); 1478 tx.free(sd.subscriptions.getPageId()); 1479 1480 sd.subscriptionAcks.clear(tx); 1481 sd.subscriptionAcks.unload(tx); 1482 tx.free(sd.subscriptionAcks.getPageId()); 1483 1484 sd.ackPositions.clear(tx); 1485 sd.ackPositions.unload(tx); 1486 tx.free(sd.ackPositions.getHeadPageId()); 1487 1488 sd.subLocations.clear(tx); 1489 sd.subLocations.unload(tx); 1490 tx.free(sd.subLocations.getHeadPageId()); 1491 } 1492 1493 String key = key(command.getDestination()); 1494 storedDestinations.remove(key); 1495 metadata.destinations.remove(tx, key); 1496 } 1497 1498 void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException { 1499 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1500 final String subscriptionKey = command.getSubscriptionKey(); 1501 1502 // If set then we are creating it.. otherwise we are destroying the sub 1503 if (command.hasSubscriptionInfo()) { 1504 Location existing = sd.subLocations.get(tx, subscriptionKey); 1505 if (existing != null && existing.compareTo(location) == 0) { 1506 // replay on recovery, ignore 1507 LOG.trace("ignoring journal replay of replay of sub from: " + location); 1508 return; 1509 } 1510 1511 sd.subscriptions.put(tx, subscriptionKey, command); 1512 sd.subLocations.put(tx, subscriptionKey, location); 1513 long ackLocation=NOT_ACKED; 1514 if (!command.getRetroactive()) { 1515 ackLocation = sd.orderIndex.nextMessageId-1; 1516 } else { 1517 addAckLocationForRetroactiveSub(tx, sd, subscriptionKey); 1518 } 1519 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation)); 1520 sd.subscriptionCache.add(subscriptionKey); 1521 } else { 1522 // delete the sub... 1523 sd.subscriptions.remove(tx, subscriptionKey); 1524 sd.subLocations.remove(tx, subscriptionKey); 1525 sd.subscriptionAcks.remove(tx, subscriptionKey); 1526 sd.subscriptionCache.remove(subscriptionKey); 1527 removeAckLocationsForSub(tx, sd, subscriptionKey); 1528 1529 if (sd.subscriptions.isEmpty(tx)) { 1530 // remove the stored destination 1531 KahaRemoveDestinationCommand removeDestinationCommand = new KahaRemoveDestinationCommand(); 1532 removeDestinationCommand.setDestination(command.getDestination()); 1533 updateIndex(tx, removeDestinationCommand, null); 1534 } 1535 } 1536 } 1537 1538 private void checkpointUpdate(final boolean cleanup) throws IOException { 1539 checkpointLock.writeLock().lock(); 1540 try { 1541 this.indexLock.writeLock().lock(); 1542 try { 1543 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1544 @Override 1545 public void execute(Transaction tx) throws IOException { 1546 checkpointUpdate(tx, cleanup); 1547 } 1548 }); 1549 } finally { 1550 this.indexLock.writeLock().unlock(); 1551 } 1552 1553 } finally { 1554 checkpointLock.writeLock().unlock(); 1555 } 1556 } 1557 1558 /** 1559 * @param tx 1560 * @throws IOException 1561 */ 1562 void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException { 1563 LOG.debug("Checkpoint started."); 1564 1565 // reflect last update exclusive of current checkpoint 1566 Location lastUpdate = metadata.lastUpdate; 1567 1568 metadata.state = OPEN_STATE; 1569 metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit(); 1570 metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap(); 1571 Location[] inProgressTxRange = getInProgressTxLocationRange(); 1572 metadata.firstInProgressTransactionLocation = inProgressTxRange[0]; 1573 tx.store(metadata.page, metadataMarshaller, true); 1574 pageFile.flush(); 1575 1576 if( cleanup ) { 1577 1578 final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet()); 1579 final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet); 1580 1581 if (LOG.isTraceEnabled()) { 1582 LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet); 1583 } 1584 1585 if (lastUpdate != null) { 1586 gcCandidateSet.remove(lastUpdate.getDataFileId()); 1587 } 1588 1589 // Don't GC files under replication 1590 if( journalFilesBeingReplicated!=null ) { 1591 gcCandidateSet.removeAll(journalFilesBeingReplicated); 1592 } 1593 1594 if (metadata.producerSequenceIdTrackerLocation != null) { 1595 int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId(); 1596 if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) { 1597 // rewrite so we don't prevent gc 1598 metadata.producerSequenceIdTracker.setModified(true); 1599 if (LOG.isTraceEnabled()) { 1600 LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation); 1601 } 1602 } 1603 gcCandidateSet.remove(dataFileId); 1604 if (LOG.isTraceEnabled()) { 1605 LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + dataFileId + ", " + gcCandidateSet); 1606 } 1607 } 1608 1609 if (metadata.ackMessageFileMapLocation != null) { 1610 int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId(); 1611 gcCandidateSet.remove(dataFileId); 1612 if (LOG.isTraceEnabled()) { 1613 LOG.trace("gc candidates after ackMessageFileMapLocation:" + dataFileId + ", " + gcCandidateSet); 1614 } 1615 } 1616 1617 // Don't GC files referenced by in-progress tx 1618 if (inProgressTxRange[0] != null) { 1619 for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) { 1620 gcCandidateSet.remove(pendingTx); 1621 } 1622 } 1623 if (LOG.isTraceEnabled()) { 1624 LOG.trace("gc candidates after tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet); 1625 } 1626 1627 // Go through all the destinations to see if any of them can remove GC candidates. 1628 for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) { 1629 if( gcCandidateSet.isEmpty() ) { 1630 break; 1631 } 1632 1633 // Use a visitor to cut down the number of pages that we load 1634 entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 1635 int last=-1; 1636 @Override 1637 public boolean isInterestedInKeysBetween(Location first, Location second) { 1638 if( first==null ) { 1639 SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1); 1640 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1641 subset.remove(second.getDataFileId()); 1642 } 1643 return !subset.isEmpty(); 1644 } else if( second==null ) { 1645 SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId()); 1646 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1647 subset.remove(first.getDataFileId()); 1648 } 1649 return !subset.isEmpty(); 1650 } else { 1651 SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1); 1652 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1653 subset.remove(first.getDataFileId()); 1654 } 1655 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1656 subset.remove(second.getDataFileId()); 1657 } 1658 return !subset.isEmpty(); 1659 } 1660 } 1661 1662 @Override 1663 public void visit(List<Location> keys, List<Long> values) { 1664 for (Location l : keys) { 1665 int fileId = l.getDataFileId(); 1666 if( last != fileId ) { 1667 gcCandidateSet.remove(fileId); 1668 last = fileId; 1669 } 1670 } 1671 } 1672 }); 1673 1674 // Durable Subscription 1675 if (entry.getValue().subLocations != null) { 1676 Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx); 1677 while (iter.hasNext()) { 1678 Entry<String, Location> subscription = iter.next(); 1679 int dataFileId = subscription.getValue().getDataFileId(); 1680 1681 // Move subscription along if it has no outstanding messages that need ack'd 1682 // and its in the last log file in the journal. 1683 if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) { 1684 final StoredDestination destination = entry.getValue(); 1685 final String subscriptionKey = subscription.getKey(); 1686 SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey); 1687 1688 // When pending is size one that is the next message Id meaning there 1689 // are no pending messages currently. 1690 if (pendingAcks == null || pendingAcks.size() <= 1) { 1691 if (LOG.isTraceEnabled()) { 1692 LOG.trace("Found candidate for rewrite: {} from file {}", entry.getKey(), dataFileId); 1693 } 1694 1695 final KahaSubscriptionCommand kahaSub = 1696 destination.subscriptions.get(tx, subscriptionKey); 1697 destination.subLocations.put( 1698 tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub)); 1699 1700 // Skips the remove from candidates if we rewrote the subscription 1701 // in order to prevent duplicate subscription commands on recover. 1702 // If another subscription is on the same file and isn't rewritten 1703 // than it will remove the file from the set. 1704 continue; 1705 } 1706 } 1707 1708 gcCandidateSet.remove(dataFileId); 1709 } 1710 } 1711 1712 if (LOG.isTraceEnabled()) { 1713 LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet); 1714 } 1715 } 1716 1717 // check we are not deleting file with ack for in-use journal files 1718 if (LOG.isTraceEnabled()) { 1719 LOG.trace("gc candidates: " + gcCandidateSet); 1720 LOG.trace("ackMessageFileMap: " + metadata.ackMessageFileMap); 1721 } 1722 boolean ackMessageFileMapMod = false; 1723 Iterator<Integer> candidates = gcCandidateSet.iterator(); 1724 while (candidates.hasNext()) { 1725 Integer candidate = candidates.next(); 1726 Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate); 1727 if (referencedFileIds != null) { 1728 for (Integer referencedFileId : referencedFileIds) { 1729 if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) { 1730 // active file that is not targeted for deletion is referenced so don't delete 1731 candidates.remove(); 1732 break; 1733 } 1734 } 1735 if (gcCandidateSet.contains(candidate)) { 1736 ackMessageFileMapMod |= (metadata.ackMessageFileMap.remove(candidate) != null); 1737 } else { 1738 if (LOG.isTraceEnabled()) { 1739 LOG.trace("not removing data file: " + candidate 1740 + " as contained ack(s) refer to referenced file: " + referencedFileIds); 1741 } 1742 } 1743 } 1744 } 1745 1746 if (!gcCandidateSet.isEmpty()) { 1747 if (LOG.isDebugEnabled()) { 1748 LOG.debug("Cleanup removing the data files: " + gcCandidateSet); 1749 } 1750 journal.removeDataFiles(gcCandidateSet); 1751 for (Integer candidate : gcCandidateSet) { 1752 for (Set<Integer> ackFiles : metadata.ackMessageFileMap.values()) { 1753 ackMessageFileMapMod |= ackFiles.remove(candidate); 1754 } 1755 } 1756 if (ackMessageFileMapMod) { 1757 checkpointUpdate(tx, false); 1758 } 1759 } 1760 } 1761 1762 LOG.debug("Checkpoint done."); 1763 } 1764 1765 final Runnable nullCompletionCallback = new Runnable() { 1766 @Override 1767 public void run() { 1768 } 1769 }; 1770 1771 private Location checkpointProducerAudit() throws IOException { 1772 if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) { 1773 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1774 ObjectOutputStream oout = new ObjectOutputStream(baos); 1775 oout.writeObject(metadata.producerSequenceIdTracker); 1776 oout.flush(); 1777 oout.close(); 1778 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1779 Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback); 1780 try { 1781 location.getLatch().await(); 1782 } catch (InterruptedException e) { 1783 throw new InterruptedIOException(e.toString()); 1784 } 1785 return location; 1786 } 1787 return metadata.producerSequenceIdTrackerLocation; 1788 } 1789 1790 private Location checkpointAckMessageFileMap() throws IOException { 1791 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1792 ObjectOutputStream oout = new ObjectOutputStream(baos); 1793 oout.writeObject(metadata.ackMessageFileMap); 1794 oout.flush(); 1795 oout.close(); 1796 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1797 Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback); 1798 try { 1799 location.getLatch().await(); 1800 } catch (InterruptedException e) { 1801 throw new InterruptedIOException(e.toString()); 1802 } 1803 return location; 1804 } 1805 1806 private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException { 1807 1808 ByteSequence sequence = toByteSequence(subscription); 1809 Location location = journal.write(sequence, nullCompletionCallback) ; 1810 1811 try { 1812 location.getLatch().await(); 1813 } catch (InterruptedException e) { 1814 throw new InterruptedIOException(e.toString()); 1815 } 1816 return location; 1817 } 1818 1819 public HashSet<Integer> getJournalFilesBeingReplicated() { 1820 return journalFilesBeingReplicated; 1821 } 1822 1823 // ///////////////////////////////////////////////////////////////// 1824 // StoredDestination related implementation methods. 1825 // ///////////////////////////////////////////////////////////////// 1826 1827 protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<String, StoredDestination>(); 1828 1829 static class MessageKeys { 1830 final String messageId; 1831 final Location location; 1832 1833 public MessageKeys(String messageId, Location location) { 1834 this.messageId=messageId; 1835 this.location=location; 1836 } 1837 1838 @Override 1839 public String toString() { 1840 return "["+messageId+","+location+"]"; 1841 } 1842 } 1843 1844 static protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> { 1845 static final MessageKeysMarshaller INSTANCE = new MessageKeysMarshaller(); 1846 1847 @Override 1848 public MessageKeys readPayload(DataInput dataIn) throws IOException { 1849 return new MessageKeys(dataIn.readUTF(), LocationMarshaller.INSTANCE.readPayload(dataIn)); 1850 } 1851 1852 @Override 1853 public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException { 1854 dataOut.writeUTF(object.messageId); 1855 LocationMarshaller.INSTANCE.writePayload(object.location, dataOut); 1856 } 1857 } 1858 1859 class LastAck { 1860 long lastAckedSequence; 1861 byte priority; 1862 1863 public LastAck(LastAck source) { 1864 this.lastAckedSequence = source.lastAckedSequence; 1865 this.priority = source.priority; 1866 } 1867 1868 public LastAck() { 1869 this.priority = MessageOrderIndex.HI; 1870 } 1871 1872 public LastAck(long ackLocation) { 1873 this.lastAckedSequence = ackLocation; 1874 this.priority = MessageOrderIndex.LO; 1875 } 1876 1877 public LastAck(long ackLocation, byte priority) { 1878 this.lastAckedSequence = ackLocation; 1879 this.priority = priority; 1880 } 1881 1882 @Override 1883 public String toString() { 1884 return "[" + lastAckedSequence + ":" + priority + "]"; 1885 } 1886 } 1887 1888 protected class LastAckMarshaller implements Marshaller<LastAck> { 1889 1890 @Override 1891 public void writePayload(LastAck object, DataOutput dataOut) throws IOException { 1892 dataOut.writeLong(object.lastAckedSequence); 1893 dataOut.writeByte(object.priority); 1894 } 1895 1896 @Override 1897 public LastAck readPayload(DataInput dataIn) throws IOException { 1898 LastAck lastAcked = new LastAck(); 1899 lastAcked.lastAckedSequence = dataIn.readLong(); 1900 if (metadata.version >= 3) { 1901 lastAcked.priority = dataIn.readByte(); 1902 } 1903 return lastAcked; 1904 } 1905 1906 @Override 1907 public int getFixedSize() { 1908 return 9; 1909 } 1910 1911 @Override 1912 public LastAck deepCopy(LastAck source) { 1913 return new LastAck(source); 1914 } 1915 1916 @Override 1917 public boolean isDeepCopySupported() { 1918 return true; 1919 } 1920 } 1921 1922 class StoredDestination { 1923 1924 MessageOrderIndex orderIndex = new MessageOrderIndex(); 1925 BTreeIndex<Location, Long> locationIndex; 1926 BTreeIndex<String, Long> messageIdIndex; 1927 1928 // These bits are only set for Topics 1929 BTreeIndex<String, KahaSubscriptionCommand> subscriptions; 1930 BTreeIndex<String, LastAck> subscriptionAcks; 1931 HashMap<String, MessageOrderCursor> subscriptionCursors; 1932 ListIndex<String, SequenceSet> ackPositions; 1933 ListIndex<String, Location> subLocations; 1934 1935 // Transient data used to track which Messages are no longer needed. 1936 final TreeMap<Long, Long> messageReferences = new TreeMap<Long, Long>(); 1937 final HashSet<String> subscriptionCache = new LinkedHashSet<String>(); 1938 1939 public void trackPendingAdd(Long seq) { 1940 orderIndex.trackPendingAdd(seq); 1941 } 1942 1943 public void trackPendingAddComplete(Long seq) { 1944 orderIndex.trackPendingAddComplete(seq); 1945 } 1946 1947 @Override 1948 public String toString() { 1949 return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size(); 1950 } 1951 } 1952 1953 protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> { 1954 1955 @Override 1956 public StoredDestination readPayload(final DataInput dataIn) throws IOException { 1957 final StoredDestination value = new StoredDestination(); 1958 value.orderIndex.defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 1959 value.locationIndex = new BTreeIndex<Location, Long>(pageFile, dataIn.readLong()); 1960 value.messageIdIndex = new BTreeIndex<String, Long>(pageFile, dataIn.readLong()); 1961 1962 if (dataIn.readBoolean()) { 1963 value.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, dataIn.readLong()); 1964 value.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, dataIn.readLong()); 1965 if (metadata.version >= 4) { 1966 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, dataIn.readLong()); 1967 } else { 1968 // upgrade 1969 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1970 @Override 1971 public void execute(Transaction tx) throws IOException { 1972 LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<String, SequenceSet>(); 1973 1974 if (metadata.version >= 3) { 1975 // migrate 1976 BTreeIndex<Long, HashSet<String>> oldAckPositions = 1977 new BTreeIndex<Long, HashSet<String>>(pageFile, dataIn.readLong()); 1978 oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE); 1979 oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE); 1980 oldAckPositions.load(tx); 1981 1982 1983 // Do the initial build of the data in memory before writing into the store 1984 // based Ack Positions List to avoid a lot of disk thrashing. 1985 Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx); 1986 while (iterator.hasNext()) { 1987 Entry<Long, HashSet<String>> entry = iterator.next(); 1988 1989 for(String subKey : entry.getValue()) { 1990 SequenceSet pendingAcks = temp.get(subKey); 1991 if (pendingAcks == null) { 1992 pendingAcks = new SequenceSet(); 1993 temp.put(subKey, pendingAcks); 1994 } 1995 1996 pendingAcks.add(entry.getKey()); 1997 } 1998 } 1999 } 2000 // Now move the pending messages to ack data into the store backed 2001 // structure. 2002 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 2003 value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 2004 value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 2005 value.ackPositions.load(tx); 2006 for(String subscriptionKey : temp.keySet()) { 2007 value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey)); 2008 } 2009 2010 } 2011 }); 2012 } 2013 2014 if (metadata.version >= 5) { 2015 value.subLocations = new ListIndex<String, Location>(pageFile, dataIn.readLong()); 2016 } else { 2017 // upgrade 2018 pageFile.tx().execute(new Transaction.Closure<IOException>() { 2019 @Override 2020 public void execute(Transaction tx) throws IOException { 2021 value.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 2022 value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 2023 value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 2024 value.subLocations.load(tx); 2025 } 2026 }); 2027 } 2028 } 2029 if (metadata.version >= 2) { 2030 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 2031 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 2032 } else { 2033 // upgrade 2034 pageFile.tx().execute(new Transaction.Closure<IOException>() { 2035 @Override 2036 public void execute(Transaction tx) throws IOException { 2037 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2038 value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2039 value.orderIndex.lowPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2040 value.orderIndex.lowPriorityIndex.load(tx); 2041 2042 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2043 value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2044 value.orderIndex.highPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2045 value.orderIndex.highPriorityIndex.load(tx); 2046 } 2047 }); 2048 } 2049 2050 return value; 2051 } 2052 2053 @Override 2054 public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException { 2055 dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId()); 2056 dataOut.writeLong(value.locationIndex.getPageId()); 2057 dataOut.writeLong(value.messageIdIndex.getPageId()); 2058 if (value.subscriptions != null) { 2059 dataOut.writeBoolean(true); 2060 dataOut.writeLong(value.subscriptions.getPageId()); 2061 dataOut.writeLong(value.subscriptionAcks.getPageId()); 2062 dataOut.writeLong(value.ackPositions.getHeadPageId()); 2063 dataOut.writeLong(value.subLocations.getHeadPageId()); 2064 } else { 2065 dataOut.writeBoolean(false); 2066 } 2067 dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId()); 2068 dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId()); 2069 } 2070 } 2071 2072 static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> { 2073 final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller(); 2074 2075 @Override 2076 public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException { 2077 KahaSubscriptionCommand rc = new KahaSubscriptionCommand(); 2078 rc.mergeFramed((InputStream)dataIn); 2079 return rc; 2080 } 2081 2082 @Override 2083 public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException { 2084 object.writeFramed((OutputStream)dataOut); 2085 } 2086 } 2087 2088 protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2089 String key = key(destination); 2090 StoredDestination rc = storedDestinations.get(key); 2091 if (rc == null) { 2092 boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC; 2093 rc = loadStoredDestination(tx, key, topic); 2094 // Cache it. We may want to remove/unload destinations from the 2095 // cache that are not used for a while 2096 // to reduce memory usage. 2097 storedDestinations.put(key, rc); 2098 } 2099 return rc; 2100 } 2101 2102 protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2103 String key = key(destination); 2104 StoredDestination rc = storedDestinations.get(key); 2105 if (rc == null && metadata.destinations.containsKey(tx, key)) { 2106 rc = getStoredDestination(destination, tx); 2107 } 2108 return rc; 2109 } 2110 2111 /** 2112 * @param tx 2113 * @param key 2114 * @param topic 2115 * @return 2116 * @throws IOException 2117 */ 2118 private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException { 2119 // Try to load the existing indexes.. 2120 StoredDestination rc = metadata.destinations.get(tx, key); 2121 if (rc == null) { 2122 // Brand new destination.. allocate indexes for it. 2123 rc = new StoredDestination(); 2124 rc.orderIndex.allocate(tx); 2125 rc.locationIndex = new BTreeIndex<Location, Long>(pageFile, tx.allocate()); 2126 rc.messageIdIndex = new BTreeIndex<String, Long>(pageFile, tx.allocate()); 2127 2128 if (topic) { 2129 rc.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, tx.allocate()); 2130 rc.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, tx.allocate()); 2131 rc.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 2132 rc.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 2133 } 2134 metadata.destinations.put(tx, key, rc); 2135 } 2136 2137 // Configure the marshalers and load. 2138 rc.orderIndex.load(tx); 2139 2140 // Figure out the next key using the last entry in the destination. 2141 rc.orderIndex.configureLast(tx); 2142 2143 rc.locationIndex.setKeyMarshaller(org.apache.activemq.store.kahadb.disk.util.LocationMarshaller.INSTANCE); 2144 rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2145 rc.locationIndex.load(tx); 2146 2147 rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE); 2148 rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2149 rc.messageIdIndex.load(tx); 2150 2151 // If it was a topic... 2152 if (topic) { 2153 2154 rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE); 2155 rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE); 2156 rc.subscriptions.load(tx); 2157 2158 rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE); 2159 rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller()); 2160 rc.subscriptionAcks.load(tx); 2161 2162 rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 2163 rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 2164 rc.ackPositions.load(tx); 2165 2166 rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 2167 rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 2168 rc.subLocations.load(tx); 2169 2170 rc.subscriptionCursors = new HashMap<String, MessageOrderCursor>(); 2171 2172 if (metadata.version < 3) { 2173 2174 // on upgrade need to fill ackLocation with available messages past last ack 2175 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2176 Entry<String, LastAck> entry = iterator.next(); 2177 for (Iterator<Entry<Long, MessageKeys>> orderIterator = 2178 rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) { 2179 Long sequence = orderIterator.next().getKey(); 2180 addAckLocation(tx, rc, sequence, entry.getKey()); 2181 } 2182 // modify so it is upgraded 2183 rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue()); 2184 } 2185 } 2186 2187 // Configure the message references index 2188 Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx); 2189 while (subscriptions.hasNext()) { 2190 Entry<String, SequenceSet> subscription = subscriptions.next(); 2191 SequenceSet pendingAcks = subscription.getValue(); 2192 if (pendingAcks != null && !pendingAcks.isEmpty()) { 2193 Long lastPendingAck = pendingAcks.getTail().getLast(); 2194 for(Long sequenceId : pendingAcks) { 2195 Long current = rc.messageReferences.get(sequenceId); 2196 if (current == null) { 2197 current = new Long(0); 2198 } 2199 2200 // We always add a trailing empty entry for the next position to start from 2201 // so we need to ensure we don't count that as a message reference on reload. 2202 if (!sequenceId.equals(lastPendingAck)) { 2203 current = current.longValue() + 1; 2204 } 2205 2206 rc.messageReferences.put(sequenceId, current); 2207 } 2208 } 2209 } 2210 2211 // Configure the subscription cache 2212 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2213 Entry<String, LastAck> entry = iterator.next(); 2214 rc.subscriptionCache.add(entry.getKey()); 2215 } 2216 2217 if (rc.orderIndex.nextMessageId == 0) { 2218 // check for existing durable sub all acked out - pull next seq from acks as messages are gone 2219 if (!rc.subscriptionAcks.isEmpty(tx)) { 2220 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) { 2221 Entry<String, LastAck> entry = iterator.next(); 2222 rc.orderIndex.nextMessageId = 2223 Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1); 2224 } 2225 } 2226 } else { 2227 // update based on ackPositions for unmatched, last entry is always the next 2228 if (!rc.messageReferences.isEmpty()) { 2229 Long nextMessageId = (Long) rc.messageReferences.keySet().toArray()[rc.messageReferences.size() - 1]; 2230 rc.orderIndex.nextMessageId = 2231 Math.max(rc.orderIndex.nextMessageId, nextMessageId); 2232 } 2233 } 2234 } 2235 2236 if (metadata.version < VERSION) { 2237 // store again after upgrade 2238 metadata.destinations.put(tx, key, rc); 2239 } 2240 return rc; 2241 } 2242 2243 private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException { 2244 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2245 if (sequences == null) { 2246 sequences = new SequenceSet(); 2247 sequences.add(messageSequence); 2248 sd.ackPositions.add(tx, subscriptionKey, sequences); 2249 } else { 2250 sequences.add(messageSequence); 2251 sd.ackPositions.put(tx, subscriptionKey, sequences); 2252 } 2253 2254 Long count = sd.messageReferences.get(messageSequence); 2255 if (count == null) { 2256 count = Long.valueOf(0L); 2257 } 2258 count = count.longValue() + 1; 2259 sd.messageReferences.put(messageSequence, count); 2260 } 2261 2262 // new sub is interested in potentially all existing messages 2263 private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2264 SequenceSet allOutstanding = new SequenceSet(); 2265 Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx); 2266 while (iterator.hasNext()) { 2267 SequenceSet set = iterator.next().getValue(); 2268 for (Long entry : set) { 2269 allOutstanding.add(entry); 2270 } 2271 } 2272 sd.ackPositions.put(tx, subscriptionKey, allOutstanding); 2273 2274 for (Long ackPosition : allOutstanding) { 2275 Long count = sd.messageReferences.get(ackPosition); 2276 count = count.longValue() + 1; 2277 sd.messageReferences.put(ackPosition, count); 2278 } 2279 } 2280 2281 // on a new message add, all existing subs are interested in this message 2282 private void addAckLocationForNewMessage(Transaction tx, StoredDestination sd, Long messageSequence) throws IOException { 2283 for(String subscriptionKey : sd.subscriptionCache) { 2284 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2285 if (sequences == null) { 2286 sequences = new SequenceSet(); 2287 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2288 sd.ackPositions.add(tx, subscriptionKey, sequences); 2289 } else { 2290 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2291 sd.ackPositions.put(tx, subscriptionKey, sequences); 2292 } 2293 2294 Long count = sd.messageReferences.get(messageSequence); 2295 if (count == null) { 2296 count = Long.valueOf(0L); 2297 } 2298 count = count.longValue() + 1; 2299 sd.messageReferences.put(messageSequence, count); 2300 sd.messageReferences.put(messageSequence+1, Long.valueOf(0L)); 2301 } 2302 } 2303 2304 private void removeAckLocationsForSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2305 if (!sd.ackPositions.isEmpty(tx)) { 2306 SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey); 2307 if (sequences == null || sequences.isEmpty()) { 2308 return; 2309 } 2310 2311 ArrayList<Long> unreferenced = new ArrayList<Long>(); 2312 2313 for(Long sequenceId : sequences) { 2314 Long references = sd.messageReferences.get(sequenceId); 2315 if (references != null) { 2316 references = references.longValue() - 1; 2317 2318 if (references.longValue() > 0) { 2319 sd.messageReferences.put(sequenceId, references); 2320 } else { 2321 sd.messageReferences.remove(sequenceId); 2322 unreferenced.add(sequenceId); 2323 } 2324 } 2325 } 2326 2327 for(Long sequenceId : unreferenced) { 2328 // Find all the entries that need to get deleted. 2329 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2330 sd.orderIndex.getDeleteList(tx, deletes, sequenceId); 2331 2332 // Do the actual deletes. 2333 for (Entry<Long, MessageKeys> entry : deletes) { 2334 sd.locationIndex.remove(tx, entry.getValue().location); 2335 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2336 sd.orderIndex.remove(tx, entry.getKey()); 2337 } 2338 } 2339 } 2340 } 2341 2342 /** 2343 * @param tx 2344 * @param sd 2345 * @param subscriptionKey 2346 * @param messageSequence 2347 * @throws IOException 2348 */ 2349 private void removeAckLocation(Transaction tx, StoredDestination sd, String subscriptionKey, Long messageSequence) throws IOException { 2350 // Remove the sub from the previous location set.. 2351 if (messageSequence != null) { 2352 SequenceSet range = sd.ackPositions.get(tx, subscriptionKey); 2353 if (range != null && !range.isEmpty()) { 2354 range.remove(messageSequence); 2355 if (!range.isEmpty()) { 2356 sd.ackPositions.put(tx, subscriptionKey, range); 2357 } else { 2358 sd.ackPositions.remove(tx, subscriptionKey); 2359 } 2360 2361 // Check if the message is reference by any other subscription. 2362 Long count = sd.messageReferences.get(messageSequence); 2363 if (count != null){ 2364 long references = count.longValue() - 1; 2365 if (references > 0) { 2366 sd.messageReferences.put(messageSequence, Long.valueOf(references)); 2367 return; 2368 } else { 2369 sd.messageReferences.remove(messageSequence); 2370 } 2371 } 2372 2373 // Find all the entries that need to get deleted. 2374 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2375 sd.orderIndex.getDeleteList(tx, deletes, messageSequence); 2376 2377 // Do the actual deletes. 2378 for (Entry<Long, MessageKeys> entry : deletes) { 2379 sd.locationIndex.remove(tx, entry.getValue().location); 2380 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2381 sd.orderIndex.remove(tx, entry.getKey()); 2382 } 2383 } 2384 } 2385 } 2386 2387 public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2388 return sd.subscriptionAcks.get(tx, subscriptionKey); 2389 } 2390 2391 public long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2392 SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey); 2393 if (messageSequences != null) { 2394 long result = messageSequences.rangeSize(); 2395 // if there's anything in the range the last value is always the nextMessage marker, so remove 1. 2396 return result > 0 ? result - 1 : 0; 2397 } 2398 2399 return 0; 2400 } 2401 2402 protected String key(KahaDestination destination) { 2403 return destination.getType().getNumber() + ":" + destination.getName(); 2404 } 2405 2406 // ///////////////////////////////////////////////////////////////// 2407 // Transaction related implementation methods. 2408 // ///////////////////////////////////////////////////////////////// 2409 @SuppressWarnings("rawtypes") 2410 private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2411 @SuppressWarnings("rawtypes") 2412 protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2413 protected final Set<String> ackedAndPrepared = new HashSet<String>(); 2414 protected final Set<String> rolledBackAcks = new HashSet<String>(); 2415 2416 // messages that have prepared (pending) acks cannot be re-dispatched unless the outcome is rollback, 2417 // till then they are skipped by the store. 2418 // 'at most once' XA guarantee 2419 public void trackRecoveredAcks(ArrayList<MessageAck> acks) { 2420 this.indexLock.writeLock().lock(); 2421 try { 2422 for (MessageAck ack : acks) { 2423 ackedAndPrepared.add(ack.getLastMessageId().toProducerKey()); 2424 } 2425 } finally { 2426 this.indexLock.writeLock().unlock(); 2427 } 2428 } 2429 2430 public void forgetRecoveredAcks(ArrayList<MessageAck> acks, boolean rollback) throws IOException { 2431 if (acks != null) { 2432 this.indexLock.writeLock().lock(); 2433 try { 2434 for (MessageAck ack : acks) { 2435 final String id = ack.getLastMessageId().toProducerKey(); 2436 ackedAndPrepared.remove(id); 2437 if (rollback) { 2438 rolledBackAcks.add(id); 2439 } 2440 } 2441 } finally { 2442 this.indexLock.writeLock().unlock(); 2443 } 2444 } 2445 } 2446 2447 @SuppressWarnings("rawtypes") 2448 private List<Operation> getInflightTx(KahaTransactionInfo info) { 2449 TransactionId key = TransactionIdConversion.convert(info); 2450 List<Operation> tx; 2451 synchronized (inflightTransactions) { 2452 tx = inflightTransactions.get(key); 2453 if (tx == null) { 2454 tx = Collections.synchronizedList(new ArrayList<Operation>()); 2455 inflightTransactions.put(key, tx); 2456 } 2457 } 2458 return tx; 2459 } 2460 2461 @SuppressWarnings("unused") 2462 private TransactionId key(KahaTransactionInfo transactionInfo) { 2463 return TransactionIdConversion.convert(transactionInfo); 2464 } 2465 2466 abstract class Operation <T extends JournalCommand<T>> { 2467 final T command; 2468 final Location location; 2469 2470 public Operation(T command, Location location) { 2471 this.command = command; 2472 this.location = location; 2473 } 2474 2475 public Location getLocation() { 2476 return location; 2477 } 2478 2479 public T getCommand() { 2480 return command; 2481 } 2482 2483 abstract public void execute(Transaction tx) throws IOException; 2484 } 2485 2486 class AddOperation extends Operation<KahaAddMessageCommand> { 2487 final IndexAware runWithIndexLock; 2488 public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) { 2489 super(command, location); 2490 this.runWithIndexLock = runWithIndexLock; 2491 } 2492 2493 @Override 2494 public void execute(Transaction tx) throws IOException { 2495 long seq = updateIndex(tx, command, location); 2496 if (runWithIndexLock != null) { 2497 runWithIndexLock.sequenceAssignedWithIndexLocked(seq); 2498 } 2499 } 2500 2501 } 2502 2503 class RemoveOperation extends Operation<KahaRemoveMessageCommand> { 2504 2505 public RemoveOperation(KahaRemoveMessageCommand command, Location location) { 2506 super(command, location); 2507 } 2508 2509 @Override 2510 public void execute(Transaction tx) throws IOException { 2511 updateIndex(tx, command, location); 2512 } 2513 } 2514 2515 // ///////////////////////////////////////////////////////////////// 2516 // Initialization related implementation methods. 2517 // ///////////////////////////////////////////////////////////////// 2518 2519 private PageFile createPageFile() throws IOException { 2520 if( indexDirectory == null ) { 2521 indexDirectory = directory; 2522 } 2523 IOHelper.mkdirs(indexDirectory); 2524 PageFile index = new PageFile(indexDirectory, "db"); 2525 index.setEnableWriteThread(isEnableIndexWriteAsync()); 2526 index.setWriteBatchSize(getIndexWriteBatchSize()); 2527 index.setPageCacheSize(indexCacheSize); 2528 index.setUseLFRUEviction(isUseIndexLFRUEviction()); 2529 index.setLFUEvictionFactor(getIndexLFUEvictionFactor()); 2530 index.setEnableDiskSyncs(isEnableIndexDiskSyncs()); 2531 index.setEnableRecoveryFile(isEnableIndexRecoveryFile()); 2532 index.setEnablePageCaching(isEnableIndexPageCaching()); 2533 return index; 2534 } 2535 2536 private Journal createJournal() throws IOException { 2537 Journal manager = new Journal(); 2538 manager.setDirectory(directory); 2539 manager.setMaxFileLength(getJournalMaxFileLength()); 2540 manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles); 2541 manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles); 2542 manager.setWriteBatchSize(getJournalMaxWriteBatchSize()); 2543 manager.setArchiveDataLogs(isArchiveDataLogs()); 2544 manager.setSizeAccumulator(journalSize); 2545 manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs()); 2546 manager.setPreallocationScope(Journal.PreallocationScope.valueOf(preallocationScope.trim().toUpperCase())); 2547 manager.setPreallocationStrategy( 2548 Journal.PreallocationStrategy.valueOf(preallocationStrategy.trim().toUpperCase())); 2549 if (getDirectoryArchive() != null) { 2550 IOHelper.mkdirs(getDirectoryArchive()); 2551 manager.setDirectoryArchive(getDirectoryArchive()); 2552 } 2553 return manager; 2554 } 2555 2556 private Metadata createMetadata() { 2557 Metadata md = new Metadata(); 2558 md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth()); 2559 md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack()); 2560 return md; 2561 } 2562 2563 public int getJournalMaxWriteBatchSize() { 2564 return journalMaxWriteBatchSize; 2565 } 2566 2567 public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) { 2568 this.journalMaxWriteBatchSize = journalMaxWriteBatchSize; 2569 } 2570 2571 public File getDirectory() { 2572 return directory; 2573 } 2574 2575 public void setDirectory(File directory) { 2576 this.directory = directory; 2577 } 2578 2579 public boolean isDeleteAllMessages() { 2580 return deleteAllMessages; 2581 } 2582 2583 public void setDeleteAllMessages(boolean deleteAllMessages) { 2584 this.deleteAllMessages = deleteAllMessages; 2585 } 2586 2587 public void setIndexWriteBatchSize(int setIndexWriteBatchSize) { 2588 this.setIndexWriteBatchSize = setIndexWriteBatchSize; 2589 } 2590 2591 public int getIndexWriteBatchSize() { 2592 return setIndexWriteBatchSize; 2593 } 2594 2595 public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) { 2596 this.enableIndexWriteAsync = enableIndexWriteAsync; 2597 } 2598 2599 boolean isEnableIndexWriteAsync() { 2600 return enableIndexWriteAsync; 2601 } 2602 2603 public boolean isEnableJournalDiskSyncs() { 2604 return enableJournalDiskSyncs; 2605 } 2606 2607 public void setEnableJournalDiskSyncs(boolean syncWrites) { 2608 this.enableJournalDiskSyncs = syncWrites; 2609 } 2610 2611 public long getCheckpointInterval() { 2612 return checkpointInterval; 2613 } 2614 2615 public void setCheckpointInterval(long checkpointInterval) { 2616 this.checkpointInterval = checkpointInterval; 2617 } 2618 2619 public long getCleanupInterval() { 2620 return cleanupInterval; 2621 } 2622 2623 public void setCleanupInterval(long cleanupInterval) { 2624 this.cleanupInterval = cleanupInterval; 2625 } 2626 2627 public void setJournalMaxFileLength(int journalMaxFileLength) { 2628 this.journalMaxFileLength = journalMaxFileLength; 2629 } 2630 2631 public int getJournalMaxFileLength() { 2632 return journalMaxFileLength; 2633 } 2634 2635 public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) { 2636 this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack); 2637 } 2638 2639 public int getMaxFailoverProducersToTrack() { 2640 return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack(); 2641 } 2642 2643 public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) { 2644 this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth); 2645 } 2646 2647 public int getFailoverProducersAuditDepth() { 2648 return this.metadata.producerSequenceIdTracker.getAuditDepth(); 2649 } 2650 2651 public PageFile getPageFile() throws IOException { 2652 if (pageFile == null) { 2653 pageFile = createPageFile(); 2654 } 2655 return pageFile; 2656 } 2657 2658 public Journal getJournal() throws IOException { 2659 if (journal == null) { 2660 journal = createJournal(); 2661 } 2662 return journal; 2663 } 2664 2665 protected Metadata getMetadata() { 2666 return metadata; 2667 } 2668 2669 public boolean isFailIfDatabaseIsLocked() { 2670 return failIfDatabaseIsLocked; 2671 } 2672 2673 public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) { 2674 this.failIfDatabaseIsLocked = failIfDatabaseIsLocked; 2675 } 2676 2677 public boolean isIgnoreMissingJournalfiles() { 2678 return ignoreMissingJournalfiles; 2679 } 2680 2681 public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) { 2682 this.ignoreMissingJournalfiles = ignoreMissingJournalfiles; 2683 } 2684 2685 public int getIndexCacheSize() { 2686 return indexCacheSize; 2687 } 2688 2689 public void setIndexCacheSize(int indexCacheSize) { 2690 this.indexCacheSize = indexCacheSize; 2691 } 2692 2693 public boolean isCheckForCorruptJournalFiles() { 2694 return checkForCorruptJournalFiles; 2695 } 2696 2697 public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) { 2698 this.checkForCorruptJournalFiles = checkForCorruptJournalFiles; 2699 } 2700 2701 public boolean isChecksumJournalFiles() { 2702 return checksumJournalFiles; 2703 } 2704 2705 public void setChecksumJournalFiles(boolean checksumJournalFiles) { 2706 this.checksumJournalFiles = checksumJournalFiles; 2707 } 2708 2709 @Override 2710 public void setBrokerService(BrokerService brokerService) { 2711 this.brokerService = brokerService; 2712 } 2713 2714 /** 2715 * @return the archiveDataLogs 2716 */ 2717 public boolean isArchiveDataLogs() { 2718 return this.archiveDataLogs; 2719 } 2720 2721 /** 2722 * @param archiveDataLogs the archiveDataLogs to set 2723 */ 2724 public void setArchiveDataLogs(boolean archiveDataLogs) { 2725 this.archiveDataLogs = archiveDataLogs; 2726 } 2727 2728 /** 2729 * @return the directoryArchive 2730 */ 2731 public File getDirectoryArchive() { 2732 return this.directoryArchive; 2733 } 2734 2735 /** 2736 * @param directoryArchive the directoryArchive to set 2737 */ 2738 public void setDirectoryArchive(File directoryArchive) { 2739 this.directoryArchive = directoryArchive; 2740 } 2741 2742 public boolean isArchiveCorruptedIndex() { 2743 return archiveCorruptedIndex; 2744 } 2745 2746 public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) { 2747 this.archiveCorruptedIndex = archiveCorruptedIndex; 2748 } 2749 2750 public float getIndexLFUEvictionFactor() { 2751 return indexLFUEvictionFactor; 2752 } 2753 2754 public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) { 2755 this.indexLFUEvictionFactor = indexLFUEvictionFactor; 2756 } 2757 2758 public boolean isUseIndexLFRUEviction() { 2759 return useIndexLFRUEviction; 2760 } 2761 2762 public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) { 2763 this.useIndexLFRUEviction = useIndexLFRUEviction; 2764 } 2765 2766 public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) { 2767 this.enableIndexDiskSyncs = enableIndexDiskSyncs; 2768 } 2769 2770 public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) { 2771 this.enableIndexRecoveryFile = enableIndexRecoveryFile; 2772 } 2773 2774 public void setEnableIndexPageCaching(boolean enableIndexPageCaching) { 2775 this.enableIndexPageCaching = enableIndexPageCaching; 2776 } 2777 2778 public boolean isEnableIndexDiskSyncs() { 2779 return enableIndexDiskSyncs; 2780 } 2781 2782 public boolean isEnableIndexRecoveryFile() { 2783 return enableIndexRecoveryFile; 2784 } 2785 2786 public boolean isEnableIndexPageCaching() { 2787 return enableIndexPageCaching; 2788 } 2789 2790 // ///////////////////////////////////////////////////////////////// 2791 // Internal conversion methods. 2792 // ///////////////////////////////////////////////////////////////// 2793 2794 class MessageOrderCursor{ 2795 long defaultCursorPosition; 2796 long lowPriorityCursorPosition; 2797 long highPriorityCursorPosition; 2798 MessageOrderCursor(){ 2799 } 2800 2801 MessageOrderCursor(long position){ 2802 this.defaultCursorPosition=position; 2803 this.lowPriorityCursorPosition=position; 2804 this.highPriorityCursorPosition=position; 2805 } 2806 2807 MessageOrderCursor(MessageOrderCursor other){ 2808 this.defaultCursorPosition=other.defaultCursorPosition; 2809 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2810 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2811 } 2812 2813 MessageOrderCursor copy() { 2814 return new MessageOrderCursor(this); 2815 } 2816 2817 void reset() { 2818 this.defaultCursorPosition=0; 2819 this.highPriorityCursorPosition=0; 2820 this.lowPriorityCursorPosition=0; 2821 } 2822 2823 void increment() { 2824 if (defaultCursorPosition!=0) { 2825 defaultCursorPosition++; 2826 } 2827 if (highPriorityCursorPosition!=0) { 2828 highPriorityCursorPosition++; 2829 } 2830 if (lowPriorityCursorPosition!=0) { 2831 lowPriorityCursorPosition++; 2832 } 2833 } 2834 2835 @Override 2836 public String toString() { 2837 return "MessageOrderCursor:[def:" + defaultCursorPosition 2838 + ", low:" + lowPriorityCursorPosition 2839 + ", high:" + highPriorityCursorPosition + "]"; 2840 } 2841 2842 public void sync(MessageOrderCursor other) { 2843 this.defaultCursorPosition=other.defaultCursorPosition; 2844 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2845 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2846 } 2847 } 2848 2849 class MessageOrderIndex { 2850 static final byte HI = 9; 2851 static final byte LO = 0; 2852 static final byte DEF = 4; 2853 2854 long nextMessageId; 2855 BTreeIndex<Long, MessageKeys> defaultPriorityIndex; 2856 BTreeIndex<Long, MessageKeys> lowPriorityIndex; 2857 BTreeIndex<Long, MessageKeys> highPriorityIndex; 2858 final MessageOrderCursor cursor = new MessageOrderCursor(); 2859 Long lastDefaultKey; 2860 Long lastHighKey; 2861 Long lastLowKey; 2862 byte lastGetPriority; 2863 final List<Long> pendingAdditions = new LinkedList<Long>(); 2864 2865 MessageKeys remove(Transaction tx, Long key) throws IOException { 2866 MessageKeys result = defaultPriorityIndex.remove(tx, key); 2867 if (result == null && highPriorityIndex!=null) { 2868 result = highPriorityIndex.remove(tx, key); 2869 if (result ==null && lowPriorityIndex!=null) { 2870 result = lowPriorityIndex.remove(tx, key); 2871 } 2872 } 2873 return result; 2874 } 2875 2876 void load(Transaction tx) throws IOException { 2877 defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2878 defaultPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2879 defaultPriorityIndex.load(tx); 2880 lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2881 lowPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2882 lowPriorityIndex.load(tx); 2883 highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2884 highPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2885 highPriorityIndex.load(tx); 2886 } 2887 2888 void allocate(Transaction tx) throws IOException { 2889 defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2890 if (metadata.version >= 2) { 2891 lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2892 highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2893 } 2894 } 2895 2896 void configureLast(Transaction tx) throws IOException { 2897 // Figure out the next key using the last entry in the destination. 2898 TreeSet<Long> orderedSet = new TreeSet<Long>(); 2899 2900 addLast(orderedSet, highPriorityIndex, tx); 2901 addLast(orderedSet, defaultPriorityIndex, tx); 2902 addLast(orderedSet, lowPriorityIndex, tx); 2903 2904 if (!orderedSet.isEmpty()) { 2905 nextMessageId = orderedSet.last() + 1; 2906 } 2907 } 2908 2909 private void addLast(TreeSet<Long> orderedSet, BTreeIndex<Long, MessageKeys> index, Transaction tx) throws IOException { 2910 if (index != null) { 2911 Entry<Long, MessageKeys> lastEntry = index.getLast(tx); 2912 if (lastEntry != null) { 2913 orderedSet.add(lastEntry.getKey()); 2914 } 2915 } 2916 } 2917 2918 void clear(Transaction tx) throws IOException { 2919 this.remove(tx); 2920 this.resetCursorPosition(); 2921 this.allocate(tx); 2922 this.load(tx); 2923 this.configureLast(tx); 2924 } 2925 2926 void remove(Transaction tx) throws IOException { 2927 defaultPriorityIndex.clear(tx); 2928 defaultPriorityIndex.unload(tx); 2929 tx.free(defaultPriorityIndex.getPageId()); 2930 if (lowPriorityIndex != null) { 2931 lowPriorityIndex.clear(tx); 2932 lowPriorityIndex.unload(tx); 2933 2934 tx.free(lowPriorityIndex.getPageId()); 2935 } 2936 if (highPriorityIndex != null) { 2937 highPriorityIndex.clear(tx); 2938 highPriorityIndex.unload(tx); 2939 tx.free(highPriorityIndex.getPageId()); 2940 } 2941 } 2942 2943 void resetCursorPosition() { 2944 this.cursor.reset(); 2945 lastDefaultKey = null; 2946 lastHighKey = null; 2947 lastLowKey = null; 2948 } 2949 2950 void setBatch(Transaction tx, Long sequence) throws IOException { 2951 if (sequence != null) { 2952 Long nextPosition = new Long(sequence.longValue() + 1); 2953 lastDefaultKey = sequence; 2954 cursor.defaultCursorPosition = nextPosition.longValue(); 2955 lastHighKey = sequence; 2956 cursor.highPriorityCursorPosition = nextPosition.longValue(); 2957 lastLowKey = sequence; 2958 cursor.lowPriorityCursorPosition = nextPosition.longValue(); 2959 } 2960 } 2961 2962 void setBatch(Transaction tx, LastAck last) throws IOException { 2963 setBatch(tx, last.lastAckedSequence); 2964 if (cursor.defaultCursorPosition == 0 2965 && cursor.highPriorityCursorPosition == 0 2966 && cursor.lowPriorityCursorPosition == 0) { 2967 long next = last.lastAckedSequence + 1; 2968 switch (last.priority) { 2969 case DEF: 2970 cursor.defaultCursorPosition = next; 2971 cursor.highPriorityCursorPosition = next; 2972 break; 2973 case HI: 2974 cursor.highPriorityCursorPosition = next; 2975 break; 2976 case LO: 2977 cursor.lowPriorityCursorPosition = next; 2978 cursor.defaultCursorPosition = next; 2979 cursor.highPriorityCursorPosition = next; 2980 break; 2981 } 2982 } 2983 } 2984 2985 void stoppedIterating() { 2986 if (lastDefaultKey!=null) { 2987 cursor.defaultCursorPosition=lastDefaultKey.longValue()+1; 2988 } 2989 if (lastHighKey!=null) { 2990 cursor.highPriorityCursorPosition=lastHighKey.longValue()+1; 2991 } 2992 if (lastLowKey!=null) { 2993 cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1; 2994 } 2995 lastDefaultKey = null; 2996 lastHighKey = null; 2997 lastLowKey = null; 2998 } 2999 3000 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId) 3001 throws IOException { 3002 if (defaultPriorityIndex.containsKey(tx, sequenceId)) { 3003 getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId); 3004 } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) { 3005 getDeleteList(tx, deletes, highPriorityIndex, sequenceId); 3006 } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) { 3007 getDeleteList(tx, deletes, lowPriorityIndex, sequenceId); 3008 } 3009 } 3010 3011 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, 3012 BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException { 3013 3014 Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null); 3015 deletes.add(iterator.next()); 3016 } 3017 3018 long getNextMessageId(int priority) { 3019 return nextMessageId++; 3020 } 3021 3022 MessageKeys get(Transaction tx, Long key) throws IOException { 3023 MessageKeys result = defaultPriorityIndex.get(tx, key); 3024 if (result == null) { 3025 result = highPriorityIndex.get(tx, key); 3026 if (result == null) { 3027 result = lowPriorityIndex.get(tx, key); 3028 lastGetPriority = LO; 3029 } else { 3030 lastGetPriority = HI; 3031 } 3032 } else { 3033 lastGetPriority = DEF; 3034 } 3035 return result; 3036 } 3037 3038 MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException { 3039 if (priority == javax.jms.Message.DEFAULT_PRIORITY) { 3040 return defaultPriorityIndex.put(tx, key, value); 3041 } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) { 3042 return highPriorityIndex.put(tx, key, value); 3043 } else { 3044 return lowPriorityIndex.put(tx, key, value); 3045 } 3046 } 3047 3048 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{ 3049 return new MessageOrderIterator(tx,cursor,this); 3050 } 3051 3052 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{ 3053 return new MessageOrderIterator(tx,m,this); 3054 } 3055 3056 public byte lastGetPriority() { 3057 return lastGetPriority; 3058 } 3059 3060 public boolean alreadyDispatched(Long sequence) { 3061 return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) || 3062 (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) || 3063 (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence); 3064 } 3065 3066 public void trackPendingAdd(Long seq) { 3067 synchronized (pendingAdditions) { 3068 pendingAdditions.add(seq); 3069 } 3070 } 3071 3072 public void trackPendingAddComplete(Long seq) { 3073 synchronized (pendingAdditions) { 3074 pendingAdditions.remove(seq); 3075 } 3076 } 3077 3078 public Long minPendingAdd() { 3079 synchronized (pendingAdditions) { 3080 if (!pendingAdditions.isEmpty()) { 3081 return pendingAdditions.get(0); 3082 } else { 3083 return null; 3084 } 3085 } 3086 } 3087 3088 3089 class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{ 3090 Iterator<Entry<Long, MessageKeys>>currentIterator; 3091 final Iterator<Entry<Long, MessageKeys>>highIterator; 3092 final Iterator<Entry<Long, MessageKeys>>defaultIterator; 3093 final Iterator<Entry<Long, MessageKeys>>lowIterator; 3094 3095 MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException { 3096 Long pendingAddLimiter = messageOrderIndex.minPendingAdd(); 3097 this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter); 3098 if (highPriorityIndex != null) { 3099 this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter); 3100 } else { 3101 this.highIterator = null; 3102 } 3103 if (lowPriorityIndex != null) { 3104 this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter); 3105 } else { 3106 this.lowIterator = null; 3107 } 3108 } 3109 3110 @Override 3111 public boolean hasNext() { 3112 if (currentIterator == null) { 3113 if (highIterator != null) { 3114 if (highIterator.hasNext()) { 3115 currentIterator = highIterator; 3116 return currentIterator.hasNext(); 3117 } 3118 if (defaultIterator.hasNext()) { 3119 currentIterator = defaultIterator; 3120 return currentIterator.hasNext(); 3121 } 3122 if (lowIterator.hasNext()) { 3123 currentIterator = lowIterator; 3124 return currentIterator.hasNext(); 3125 } 3126 return false; 3127 } else { 3128 currentIterator = defaultIterator; 3129 return currentIterator.hasNext(); 3130 } 3131 } 3132 if (highIterator != null) { 3133 if (currentIterator.hasNext()) { 3134 return true; 3135 } 3136 if (currentIterator == highIterator) { 3137 if (defaultIterator.hasNext()) { 3138 currentIterator = defaultIterator; 3139 return currentIterator.hasNext(); 3140 } 3141 if (lowIterator.hasNext()) { 3142 currentIterator = lowIterator; 3143 return currentIterator.hasNext(); 3144 } 3145 return false; 3146 } 3147 3148 if (currentIterator == defaultIterator) { 3149 if (lowIterator.hasNext()) { 3150 currentIterator = lowIterator; 3151 return currentIterator.hasNext(); 3152 } 3153 return false; 3154 } 3155 } 3156 return currentIterator.hasNext(); 3157 } 3158 3159 @Override 3160 public Entry<Long, MessageKeys> next() { 3161 Entry<Long, MessageKeys> result = currentIterator.next(); 3162 if (result != null) { 3163 Long key = result.getKey(); 3164 if (highIterator != null) { 3165 if (currentIterator == defaultIterator) { 3166 lastDefaultKey = key; 3167 } else if (currentIterator == highIterator) { 3168 lastHighKey = key; 3169 } else { 3170 lastLowKey = key; 3171 } 3172 } else { 3173 lastDefaultKey = key; 3174 } 3175 } 3176 return result; 3177 } 3178 3179 @Override 3180 public void remove() { 3181 throw new UnsupportedOperationException(); 3182 } 3183 3184 } 3185 } 3186 3187 private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> { 3188 final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller(); 3189 3190 @Override 3191 public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException { 3192 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 3193 ObjectOutputStream oout = new ObjectOutputStream(baos); 3194 oout.writeObject(object); 3195 oout.flush(); 3196 oout.close(); 3197 byte[] data = baos.toByteArray(); 3198 dataOut.writeInt(data.length); 3199 dataOut.write(data); 3200 } 3201 3202 @Override 3203 @SuppressWarnings("unchecked") 3204 public HashSet<String> readPayload(DataInput dataIn) throws IOException { 3205 int dataLen = dataIn.readInt(); 3206 byte[] data = new byte[dataLen]; 3207 dataIn.readFully(data); 3208 ByteArrayInputStream bais = new ByteArrayInputStream(data); 3209 ObjectInputStream oin = new ObjectInputStream(bais); 3210 try { 3211 return (HashSet<String>) oin.readObject(); 3212 } catch (ClassNotFoundException cfe) { 3213 IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe); 3214 ioe.initCause(cfe); 3215 throw ioe; 3216 } 3217 } 3218 } 3219 3220 public File getIndexDirectory() { 3221 return indexDirectory; 3222 } 3223 3224 public void setIndexDirectory(File indexDirectory) { 3225 this.indexDirectory = indexDirectory; 3226 } 3227 3228 interface IndexAware { 3229 public void sequenceAssignedWithIndexLocked(long index); 3230 } 3231 3232 public String getPreallocationScope() { 3233 return preallocationScope; 3234 } 3235 3236 public void setPreallocationScope(String preallocationScope) { 3237 this.preallocationScope = preallocationScope; 3238 } 3239 3240 public String getPreallocationStrategy() { 3241 return preallocationStrategy; 3242 } 3243 3244 public void setPreallocationStrategy(String preallocationStrategy) { 3245 this.preallocationStrategy = preallocationStrategy; 3246 } 3247 3248}