2 * Copyright 2000-2016 JetBrains s.r.o.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 package com.intellij.openapi.vfs.newvfs.persistent;
18 import com.intellij.openapi.Forceable;
19 import com.intellij.openapi.application.ApplicationManager;
20 import com.intellij.openapi.application.ApplicationNamesInfo;
21 import com.intellij.openapi.application.PathManager;
22 import com.intellij.openapi.diagnostic.Logger;
23 import com.intellij.openapi.util.Disposer;
24 import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream;
25 import com.intellij.openapi.util.io.ByteSequence;
26 import com.intellij.openapi.util.io.FileAttributes;
27 import com.intellij.openapi.util.io.FileUtil;
28 import com.intellij.openapi.vfs.newvfs.FileAttribute;
29 import com.intellij.openapi.vfs.newvfs.impl.FileNameCache;
30 import com.intellij.util.ArrayUtil;
31 import com.intellij.util.BitUtil;
32 import com.intellij.util.CompressionUtil;
33 import com.intellij.util.SystemProperties;
34 import com.intellij.util.concurrency.AppExecutorUtil;
35 import com.intellij.util.containers.ConcurrentIntObjectMap;
36 import com.intellij.util.containers.ContainerUtil;
37 import com.intellij.util.containers.IntArrayList;
38 import com.intellij.util.io.*;
39 import com.intellij.util.io.DataOutputStream;
40 import com.intellij.util.io.storage.*;
41 import gnu.trove.TIntArrayList;
42 import org.jetbrains.annotations.NotNull;
43 import org.jetbrains.annotations.Nullable;
48 import java.nio.charset.Charset;
49 import java.security.MessageDigest;
50 import java.util.Arrays;
51 import java.util.concurrent.ConcurrentMap;
52 import java.util.concurrent.ExecutorService;
53 import java.util.concurrent.ScheduledFuture;
54 import java.util.concurrent.locks.ReentrantReadWriteLock;
59 @SuppressWarnings({"PointlessArithmeticExpression", "HardCodedStringLiteral"})
60 public class FSRecords implements Forceable {
61 private static final Logger LOG = Logger.getInstance("#com.intellij.vfs.persistent.FSRecords");
63 public static final boolean weHaveContentHashes = SystemProperties.getBooleanProperty("idea.share.contents", true);
64 private static final boolean lazyVfsDataCleaning = SystemProperties.getBooleanProperty("idea.lazy.vfs.data.cleaning", true);
65 static final boolean backgroundVfsFlush = SystemProperties.getBooleanProperty("idea.background.vfs.flush", true);
66 public static final boolean persistentAttributesList = SystemProperties.getBooleanProperty("idea.persistent.attr.list", true);
67 private static final boolean inlineAttributes = SystemProperties.getBooleanProperty("idea.inline.vfs.attributes", true);
68 static final boolean bulkAttrReadSupport = SystemProperties.getBooleanProperty("idea.bulk.attr.read", false);
69 static final boolean useSnappyForCompression = SystemProperties.getBooleanProperty("idea.use.snappy.for.vfs", false);
70 private static final boolean useSmallAttrTable = SystemProperties.getBooleanProperty("idea.use.small.attr.table.for.vfs", true);
71 static final String VFS_FILES_EXTENSION = System.getProperty("idea.vfs.files.extension", ".dat");
73 private static final int VERSION = 21 + (weHaveContentHashes ? 0x10:0) + (IOUtil.ourByteBuffersUseNativeByteOrder ? 0x37:0) +
74 (persistentAttributesList ? 31 : 0) + (bulkAttrReadSupport ? 0x27:0) + (inlineAttributes ? 0x31 : 0) +
75 (useSnappyForCompression ? 0x7f : 0) + (useSmallAttrTable ? 0x31 : 0) +
76 (PersistentHashMapValueStorage.COMPRESSION_ENABLED ? 21:0);
78 private static final int PARENT_OFFSET = 0;
79 private static final int PARENT_SIZE = 4;
80 private static final int NAME_OFFSET = PARENT_OFFSET + PARENT_SIZE;
81 private static final int NAME_SIZE = 4;
82 private static final int FLAGS_OFFSET = NAME_OFFSET + NAME_SIZE;
83 private static final int FLAGS_SIZE = 4;
84 private static final int ATTR_REF_OFFSET = FLAGS_OFFSET + FLAGS_SIZE;
85 private static final int ATTR_REF_SIZE = 4;
86 private static final int CONTENT_OFFSET = ATTR_REF_OFFSET + ATTR_REF_SIZE;
87 private static final int CONTENT_SIZE = 4;
88 private static final int TIMESTAMP_OFFSET = CONTENT_OFFSET + CONTENT_SIZE;
89 private static final int TIMESTAMP_SIZE = 8;
90 private static final int MOD_COUNT_OFFSET = TIMESTAMP_OFFSET + TIMESTAMP_SIZE;
91 private static final int MOD_COUNT_SIZE = 4;
92 private static final int LENGTH_OFFSET = MOD_COUNT_OFFSET + MOD_COUNT_SIZE;
93 private static final int LENGTH_SIZE = 8;
95 private static final int RECORD_SIZE = LENGTH_OFFSET + LENGTH_SIZE;
97 private static final byte[] ZEROES = new byte[RECORD_SIZE];
99 private static final int HEADER_VERSION_OFFSET = 0;
100 //private static final int HEADER_RESERVED_4BYTES_OFFSET = 4; // reserved
101 private static final int HEADER_GLOBAL_MOD_COUNT_OFFSET = 8;
102 private static final int HEADER_CONNECTION_STATUS_OFFSET = 12;
103 private static final int HEADER_TIMESTAMP_OFFSET = 16;
104 private static final int HEADER_SIZE = HEADER_TIMESTAMP_OFFSET + 8;
106 private static final int CONNECTED_MAGIC = 0x12ad34e4;
107 private static final int SAFELY_CLOSED_MAGIC = 0x1f2f3f4f;
108 private static final int CORRUPTED_MAGIC = 0xabcf7f7f;
110 private static final FileAttribute ourChildrenAttr = new FileAttribute("FsRecords.DIRECTORY_CHILDREN");
112 private static final ReentrantReadWriteLock.ReadLock r;
113 private static final ReentrantReadWriteLock.WriteLock w;
115 private static volatile int ourLocalModificationCount;
116 private static volatile boolean ourIsDisposed;
118 private static final int FREE_RECORD_FLAG = 0x100;
119 private static final int ALL_VALID_FLAGS = PersistentFS.ALL_VALID_FLAGS | FREE_RECORD_FLAG;
122 //noinspection ConstantConditions
123 assert HEADER_SIZE <= RECORD_SIZE;
125 ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
127 w = lock.writeLock();
130 static void writeAttributesToRecord(int id, int parentId, @NotNull FileAttributes attributes, @NotNull String name) {
135 setTimestamp(id, attributes.lastModified);
136 setLength(id, attributes.isDirectory() ? -1L : attributes.length);
138 setFlags(id, (attributes.isDirectory() ? PersistentFS.IS_DIRECTORY_FLAG : 0) |
139 (attributes.isWritable() ? 0 : PersistentFS.IS_READ_ONLY) |
140 (attributes.isSymLink() ? PersistentFS.IS_SYMLINK : 0) |
141 (attributes.isSpecial() ? PersistentFS.IS_SPECIAL : 0) |
142 (attributes.isHidden() ? PersistentFS.IS_HIDDEN : 0), true);
143 setParent(id, parentId);
145 catch (Throwable e) {
146 DbConnection.handleError(e);
153 static void requestVfsRebuild(Throwable e) {
154 //noinspection ThrowableResultOfMethodCallIgnored
155 DbConnection.handleError(e);
158 static File basePath() {
159 return new File(DbConnection.getCachesDir());
162 public static class DbConnection {
163 private static boolean ourInitialized;
164 private static final ConcurrentMap<String, Integer> myAttributeIds = ContainerUtil.newConcurrentMap();
166 private static PersistentStringEnumerator myNames;
167 private static Storage myAttributes;
168 private static RefCountingStorage myContents;
169 private static ResizeableMappedFile myRecords;
170 private static PersistentBTreeEnumerator<byte[]> myContentHashesEnumerator;
171 private static final VfsDependentEnum<String> myAttributesList =
172 new VfsDependentEnum<>("attrib", EnumeratorStringDescriptor.INSTANCE, 1);
173 private static final TIntArrayList myFreeRecords = new TIntArrayList();
175 private static boolean myDirty;
176 private static ScheduledFuture<?> myFlushingFuture;
177 private static boolean myCorrupted;
179 private static final AttrPageAwareCapacityAllocationPolicy REASONABLY_SMALL = new AttrPageAwareCapacityAllocationPolicy();
182 public static void connect() {
185 if (!ourInitialized) {
188 ourInitialized = true;
196 private static void scanFreeRecords() {
197 final int filelength = (int)myRecords.length();
198 LOG.assertTrue(filelength % RECORD_SIZE == 0, "invalid file size: " + filelength);
200 int count = filelength / RECORD_SIZE;
201 for (int n = 2; n < count; n++) {
202 if (BitUtil.isSet(getFlags(n), FREE_RECORD_FLAG)) {
203 myFreeRecords.add(n);
208 static int getFreeRecord() {
209 if (myFreeRecords.isEmpty()) return 0;
210 return myFreeRecords.remove(myFreeRecords.size() - 1);
213 private static void createBrokenMarkerFile(@Nullable Throwable reason) {
214 final File brokenMarker = getCorruptionMarkerFile();
217 final ByteArrayOutputStream out = new ByteArrayOutputStream();
218 try (PrintStream stream = new PrintStream(out)) {
219 new Exception().printStackTrace(stream);
220 if (reason != null) {
221 stream.print("\nReason:\n");
222 reason.printStackTrace(stream);
225 LOG.info("Creating VFS corruption marker; Trace=\n" + out);
227 try (FileWriter writer = new FileWriter(brokenMarker)) {
228 writer.write("These files are corrupted and must be rebuilt from the scratch on next startup");
231 catch (IOException e) {
236 private static File getCorruptionMarkerFile() {
237 return new File(basePath(), "corruption.marker");
240 private static void init() {
241 final File basePath = basePath().getAbsoluteFile();
244 final File namesFile = new File(basePath, "names" + VFS_FILES_EXTENSION);
245 final File attributesFile = new File(basePath, "attrib" + VFS_FILES_EXTENSION);
246 final File contentsFile = new File(basePath, "content" + VFS_FILES_EXTENSION);
247 final File contentsHashesFile = new File(basePath, "contentHashes" + VFS_FILES_EXTENSION);
248 final File recordsFile = new File(basePath, "records" + VFS_FILES_EXTENSION);
250 final File vfsDependentEnumBaseFile = VfsDependentEnum.getBaseFile();
252 if (!namesFile.exists()) {
253 invalidateIndex("'" + namesFile.getPath() + "' does not exist");
257 if (getCorruptionMarkerFile().exists()) {
258 invalidateIndex("corruption marker found");
259 throw new IOException("Corruption marker file found");
262 PagedFileStorage.StorageLockContext storageLockContext = new PagedFileStorage.StorageLockContext(false);
263 myNames = new PersistentStringEnumerator(namesFile, storageLockContext);
265 myAttributes = new Storage(attributesFile.getPath(), REASONABLY_SMALL) {
267 protected AbstractRecordsTable createRecordsTable(PagePool pool, File recordsFile) throws IOException {
268 return inlineAttributes && useSmallAttrTable ? new CompactRecordsTable(recordsFile, pool, false) : super.createRecordsTable(pool, recordsFile);
271 myContents = new RefCountingStorage(contentsFile.getPath(), CapacityAllocationPolicy.FIVE_PERCENT_FOR_GROWTH, useSnappyForCompression) {
274 protected ExecutorService createExecutor() {
275 return AppExecutorUtil.createBoundedApplicationPoolExecutor("FSRecords pool",1);
277 }; // sources usually zipped with 4x ratio
278 myContentHashesEnumerator = weHaveContentHashes ? new ContentHashesUtil.HashEnumerator(contentsHashesFile, storageLockContext): null;
279 boolean aligned = PagedFileStorage.BUFFER_SIZE % RECORD_SIZE == 0;
280 assert aligned; // for performance
281 myRecords = new ResizeableMappedFile(recordsFile, 20 * 1024, storageLockContext,
282 PagedFileStorage.BUFFER_SIZE, aligned, IOUtil.ourByteBuffersUseNativeByteOrder);
284 if (myRecords.length() == 0) {
285 cleanRecord(0); // Clean header
286 cleanRecord(1); // Create root record
290 if (getVersion() != VERSION) {
291 throw new IOException("FS repository version mismatch");
294 if (myRecords.getInt(HEADER_CONNECTION_STATUS_OFFSET) != SAFELY_CLOSED_MAGIC) {
295 throw new IOException("FS repository wasn't safely shut down");
300 catch (Exception e) { // IOException, IllegalArgumentException
301 LOG.info("Filesystem storage is corrupted or does not exist. [Re]Building. Reason: " + e.getMessage());
305 boolean deleted = FileUtil.delete(getCorruptionMarkerFile());
306 deleted &= IOUtil.deleteAllFilesStartingWith(namesFile);
307 deleted &= AbstractStorage.deleteFiles(attributesFile.getPath());
308 deleted &= AbstractStorage.deleteFiles(contentsFile.getPath());
309 deleted &= IOUtil.deleteAllFilesStartingWith(contentsHashesFile);
310 deleted &= IOUtil.deleteAllFilesStartingWith(recordsFile);
311 deleted &= IOUtil.deleteAllFilesStartingWith(vfsDependentEnumBaseFile);
314 throw new IOException("Cannot delete filesystem storage files");
317 catch (final IOException e1) {
318 final Runnable warnAndShutdown = () -> {
319 if (ApplicationManager.getApplication().isUnitTestMode()) {
320 //noinspection CallToPrintStackTrace
321 e1.printStackTrace();
324 final String message = "Files in " + basePath.getPath() + " are locked.\n" +
325 ApplicationNamesInfo.getInstance().getProductName() + " will not be able to start up.";
326 if (!ApplicationManager.getApplication().isHeadlessEnvironment()) {
327 JOptionPane.showMessageDialog(JOptionPane.getRootFrame(), message, "Fatal Error", JOptionPane.ERROR_MESSAGE);
330 //noinspection UseOfSystemOutOrSystemErr
331 System.err.println(message);
334 Runtime.getRuntime().halt(1);
337 if (EventQueue.isDispatchThread()) {
338 warnAndShutdown.run();
341 //noinspection SSBasedInspection
342 SwingUtilities.invokeLater(warnAndShutdown);
345 throw new RuntimeException("Can't rebuild filesystem storage ", e1);
352 private static void invalidateIndex(String reason) {
353 LOG.info("Marking VFS as corrupted: " + reason);
354 final File indexRoot = PathManager.getIndexRoot();
355 if (indexRoot.exists()) {
356 final String[] children = indexRoot.list();
357 if (children != null && children.length > 0) {
358 // create index corruption marker only if index directory exists and is non-empty
359 // It is incorrect to consider non-existing indices "corrupted"
360 FileUtil.createIfDoesntExist(new File(PathManager.getIndexRoot(), "corruption.marker"));
365 private static String getCachesDir() {
366 String dir = System.getProperty("caches_dir");
367 return dir == null ? PathManager.getSystemPath() + "/caches/" : dir;
370 private static void markDirty() {
373 myRecords.putInt(HEADER_CONNECTION_STATUS_OFFSET, CONNECTED_MAGIC);
377 private static void setupFlushing() {
378 if (!backgroundVfsFlush)
381 myFlushingFuture = FlushingDaemon.everyFiveSeconds(new Runnable() {
382 private int lastModCount;
386 if (lastModCount == ourLocalModificationCount) {
389 lastModCount = ourLocalModificationCount;
394 public static void force() {
404 private static void doForce() {
405 if (myNames != null) {
407 myAttributes.force();
409 if (myContentHashesEnumerator != null) myContentHashesEnumerator.force();
415 private static void flush() {
416 if (!isDirty() || HeavyProcessLatch.INSTANCE.isRunning()) return;
420 if (myFlushingFuture == null) {
421 return; // avoid NPE when close has already taken place
430 public static boolean isDirty() {
431 return myDirty || myNames.isDirty() || myAttributes.isDirty() || myContents.isDirty() || myRecords.isDirty() ||
432 myContentHashesEnumerator != null && myContentHashesEnumerator.isDirty();
436 private static int getVersion() {
437 final int recordsVersion = myRecords.getInt(HEADER_VERSION_OFFSET);
438 if (myAttributes.getVersion() != recordsVersion || myContents.getVersion() != recordsVersion) return -1;
440 return recordsVersion;
443 public static long getTimestamp() {
444 return myRecords.getLong(HEADER_TIMESTAMP_OFFSET);
447 private static void setCurrentVersion() {
448 myRecords.putInt(HEADER_VERSION_OFFSET, VERSION);
449 myRecords.putLong(HEADER_TIMESTAMP_OFFSET, System.currentTimeMillis());
450 myAttributes.setVersion(VERSION);
451 myContents.setVersion(VERSION);
452 myRecords.putInt(HEADER_CONNECTION_STATUS_OFFSET, SAFELY_CLOSED_MAGIC);
455 static void cleanRecord(int id) {
456 myRecords.put(id * RECORD_SIZE, ZEROES, 0, RECORD_SIZE);
459 public static PersistentStringEnumerator getNames() {
463 private static void closeFiles() throws IOException {
464 if (myFlushingFuture != null) {
465 myFlushingFuture.cancel(false);
466 myFlushingFuture = null;
469 if (myNames != null) {
474 if (myAttributes != null) {
475 Disposer.dispose(myAttributes);
479 if (myContents != null) {
480 Disposer.dispose(myContents);
484 if (myContentHashesEnumerator != null) {
485 myContentHashesEnumerator.close();
486 myContentHashesEnumerator = null;
489 if (myRecords != null) {
494 ourInitialized = false;
497 private static void markClean() {
500 myRecords.putInt(HEADER_CONNECTION_STATUS_OFFSET, myCorrupted ? CORRUPTED_MAGIC : SAFELY_CLOSED_MAGIC);
504 private static final int RESERVED_ATTR_ID = bulkAttrReadSupport ? 1 : 0;
505 private static final int FIRST_ATTR_ID_OFFSET = bulkAttrReadSupport ? RESERVED_ATTR_ID : 0;
507 private static int getAttributeId(@NotNull String attId) throws IOException {
508 if (persistentAttributesList) {
509 return myAttributesList.getId(attId) + FIRST_ATTR_ID_OFFSET;
511 Integer integer = myAttributeIds.get(attId);
512 if (integer != null) return integer.intValue();
513 int enumeratedId = myNames.enumerate(attId);
514 integer = myAttributeIds.putIfAbsent(attId, enumeratedId);
515 return integer == null ? enumeratedId: integer.intValue();
518 private static void handleError(@NotNull Throwable e) throws RuntimeException, Error {
519 if (!ourIsDisposed) {
520 // No need to forcibly mark VFS corrupted if it is already shut down
521 if (!myCorrupted && w.tryLock()) { // avoid deadlock if r lock is occupied by current thread
523 createBrokenMarkerFile(e);
529 if (e instanceof Error) throw (Error)e;
530 if (e instanceof RuntimeException) throw (RuntimeException)e;
531 throw new RuntimeException(e);
534 private static class AttrPageAwareCapacityAllocationPolicy extends CapacityAllocationPolicy {
535 boolean myAttrPageRequested;
538 public int calculateCapacity(int requiredLength) { // 20% for growth
539 return Math.max(myAttrPageRequested ? 8:32, Math.min((int)(requiredLength * 1.2), (requiredLength / 1024 + 1) * 1024));
547 public static void connect() {
548 DbConnection.connect();
551 public static long getCreationTimestamp() {
554 return DbConnection.getTimestamp();
561 private static ResizeableMappedFile getRecords() {
562 return DbConnection.myRecords;
565 private static PersistentBTreeEnumerator<byte[]> getContentHashesEnumerator() {
566 return DbConnection.myContentHashesEnumerator;
569 private static RefCountingStorage getContentStorage() {
570 return DbConnection.myContents;
573 private static Storage getAttributesStorage() {
574 return DbConnection.myAttributes;
577 public static PersistentStringEnumerator getNames() {
578 return DbConnection.getNames();
581 // todo: Address / capacity store in records table, size store with payload
582 public static int createRecord() {
585 DbConnection.markDirty();
587 final int free = DbConnection.getFreeRecord();
589 final int fileLength = length();
590 LOG.assertTrue(fileLength % RECORD_SIZE == 0);
591 int newRecord = fileLength / RECORD_SIZE;
592 DbConnection.cleanRecord(newRecord);
593 assert fileLength + RECORD_SIZE == length();
597 if (lazyVfsDataCleaning) deleteContentAndAttributes(free);
598 DbConnection.cleanRecord(free);
602 catch (Throwable e) {
603 DbConnection.handleError(e);
611 private static int length() {
612 return (int)getRecords().length();
614 public static int getMaxId() {
617 return length()/RECORD_SIZE;
624 static void deleteRecordRecursively(int id) {
628 if (lazyVfsDataCleaning) {
629 markAsDeletedRecursively(id);
631 doDeleteRecursively(id);
634 catch (Throwable e) {
635 DbConnection.handleError(e);
642 private static void markAsDeletedRecursively(final int id) {
643 for (int subrecord : list(id)) {
644 markAsDeletedRecursively(subrecord);
650 private static void markAsDeleted(final int id) {
653 DbConnection.markDirty();
654 addToFreeRecordsList(id);
656 catch (Throwable e) {
657 DbConnection.handleError(e);
664 private static void doDeleteRecursively(final int id) {
665 for (int subrecord : list(id)) {
666 doDeleteRecursively(subrecord);
672 private static void deleteRecord(final int id) {
675 DbConnection.markDirty();
676 deleteContentAndAttributes(id);
678 DbConnection.cleanRecord(id);
679 addToFreeRecordsList(id);
681 catch (Throwable e) {
682 DbConnection.handleError(e);
689 private static void deleteContentAndAttributes(int id) throws IOException {
690 int content_page = getContentRecordId(id);
691 if (content_page != 0) {
692 if (weHaveContentHashes) {
693 getContentStorage().releaseRecord(content_page, false);
695 getContentStorage().releaseRecord(content_page);
699 int att_page = getAttributeRecordId(id);
701 final DataInputStream attStream = getAttributesStorage().readStream(att_page);
702 if (bulkAttrReadSupport) skipRecordHeader(attStream, DbConnection.RESERVED_ATTR_ID, id);
704 while (attStream.available() > 0) {
705 DataInputOutputUtil.readINT(attStream);// Attribute ID;
706 int attAddressOrSize = DataInputOutputUtil.readINT(attStream);
708 if (inlineAttributes) {
709 if(attAddressOrSize < MAX_SMALL_ATTR_SIZE) {
710 attStream.skipBytes(attAddressOrSize);
713 attAddressOrSize -= MAX_SMALL_ATTR_SIZE;
715 getAttributesStorage().deleteRecord(attAddressOrSize);
718 getAttributesStorage().deleteRecord(att_page);
722 private static void addToFreeRecordsList(int id) {
723 // DbConnection.addFreeRecord(id); // important! Do not add fileId to free list until restart
724 setFlags(id, FREE_RECORD_FLAG, false);
727 static int[] listRoots() {
731 final DataInputStream input = readAttribute(1, ourChildrenAttr);
732 if (input == null) return ArrayUtil.EMPTY_INT_ARRAY;
735 final int count = DataInputOutputUtil.readINT(input);
736 int[] result = ArrayUtil.newIntArray(count);
738 for (int i = 0; i < count; i++) {
739 DataInputOutputUtil.readINT(input); // Name
740 prevId = result[i] = DataInputOutputUtil.readINT(input) + prevId; // Id
752 catch (Throwable e) {
753 DbConnection.handleError(e);
759 public void force() {
760 DbConnection.force();
764 public boolean isDirty() {
765 return DbConnection.isDirty();
768 private static void saveNameIdSequenceWithDeltas(int[] names, int[] ids, DataOutputStream output) throws IOException {
769 DataInputOutputUtil.writeINT(output, names.length);
772 for (int i = 0; i < names.length; i++) {
773 DataInputOutputUtil.writeINT(output, names[i] - prevNameId);
774 DataInputOutputUtil.writeINT(output, ids[i] - prevId);
776 prevNameId = names[i];
780 static int findRootRecord(@NotNull String rootUrl) {
784 DbConnection.markDirty();
785 final int root = getNames().enumerate(rootUrl);
787 final DataInputStream input = readAttribute(1, ourChildrenAttr);
788 int[] names = ArrayUtil.EMPTY_INT_ARRAY;
789 int[] ids = ArrayUtil.EMPTY_INT_ARRAY;
793 final int count = DataInputOutputUtil.readINT(input);
794 names = ArrayUtil.newIntArray(count);
795 ids = ArrayUtil.newIntArray(count);
799 for (int i = 0; i < count; i++) {
800 final int name = DataInputOutputUtil.readINT(input) + prevNameId;
801 final int id = DataInputOutputUtil.readINT(input) + prevId;
806 prevNameId = names[i] = name;
807 prevId = ids[i] = id;
816 try (DataOutputStream output = writeAttribute(1, ourChildrenAttr)) {
819 int index = Arrays.binarySearch(ids, id);
820 ids = ArrayUtil.insert(ids, -index - 1, id);
821 names = ArrayUtil.insert(names, -index - 1, root);
823 saveNameIdSequenceWithDeltas(names, ids, output);
828 catch (Throwable e) {
829 DbConnection.handleError(e);
837 static void deleteRootRecord(int id) {
841 DbConnection.markDirty();
842 final DataInputStream input = readAttribute(1, ourChildrenAttr);
843 assert input != null;
847 int count = DataInputOutputUtil.readINT(input);
849 names = ArrayUtil.newIntArray(count);
850 ids = ArrayUtil.newIntArray(count);
853 for (int i = 0; i < count; i++) {
854 names[i] = DataInputOutputUtil.readINT(input) + prevNameId;
855 ids[i] = DataInputOutputUtil.readINT(input) + prevId;
857 prevNameId = names[i];
864 final int index = ArrayUtil.find(ids, id);
867 names = ArrayUtil.remove(names, index);
868 ids = ArrayUtil.remove(ids, index);
870 try (DataOutputStream output = writeAttribute(1, ourChildrenAttr)) {
871 saveNameIdSequenceWithDeltas(names, ids, output);
874 catch (Throwable e) {
875 DbConnection.handleError(e);
883 public static int[] list(int id) {
887 final DataInputStream input = readAttribute(id, ourChildrenAttr);
888 if (input == null) return ArrayUtil.EMPTY_INT_ARRAY;
890 final int count = DataInputOutputUtil.readINT(input);
891 final int[] result = ArrayUtil.newIntArray(count);
893 for (int i = 0; i < count; i++) {
894 prevId = result[i] = DataInputOutputUtil.readINT(input) + prevId;
903 catch (Throwable e) {
904 DbConnection.handleError(e);
905 return ArrayUtil.EMPTY_INT_ARRAY;
909 public static class NameId {
911 public static final NameId[] EMPTY_ARRAY = new NameId[0];
913 public final CharSequence name;
914 public final int nameId;
916 public NameId(int id, int nameId, @NotNull CharSequence name) {
918 this.nameId = nameId;
923 public String toString() {
924 return name + " (" + id + ")";
929 public static NameId[] listAll(int parentId) {
933 final DataInputStream input = readAttribute(parentId, ourChildrenAttr);
934 if (input == null) return NameId.EMPTY_ARRAY;
936 int count = DataInputOutputUtil.readINT(input);
937 NameId[] result = count == 0 ? NameId.EMPTY_ARRAY : new NameId[count];
938 int prevId = parentId;
939 for (int i = 0; i < count; i++) {
940 int id = DataInputOutputUtil.readINT(input) + prevId;
942 int nameId = getNameId(id);
943 result[i] = new NameId(id, nameId, FileNameCache.getVFileName(nameId));
952 catch (Throwable e) {
953 DbConnection.handleError(e);
954 return NameId.EMPTY_ARRAY;
958 static boolean wereChildrenAccessed(int id) {
962 return findAttributePage(id, ourChildrenAttr, false) != 0;
967 catch (Throwable e) {
968 DbConnection.handleError(e);
973 public static void updateList(int id, @NotNull int[] childIds) {
974 Arrays.sort(childIds);
977 DbConnection.markDirty();
978 try (DataOutputStream record = writeAttribute(id, ourChildrenAttr)) {
979 DataInputOutputUtil.writeINT(record, childIds.length);
982 for (int childId : childIds) {
983 assert childId > 0 : childId;
985 LOG.error("Cyclic parent child relations");
988 int delta = childId - prevId;
989 DataInputOutputUtil.writeINT(record, delta);
995 catch (Throwable e) {
996 DbConnection.handleError(e);
1003 private static void incModCount(int id) {
1004 DbConnection.markDirty();
1005 ourLocalModificationCount++;
1006 final int count = getModCount() + 1;
1007 getRecords().putInt(HEADER_GLOBAL_MOD_COUNT_OFFSET, count);
1011 while (parent != 0) {
1012 setModCount(parent, count);
1013 parent = getParent(parent);
1014 if (depth -- == 0) {
1015 LOG.error("Cyclic parent child relation? file: " + getName(id));
1021 static int getLocalModCount() {
1022 return ourLocalModificationCount; // This is volatile, only modified under Application.runWriteAction() lock.
1025 public static int getModCount() {
1028 return getRecords().getInt(HEADER_GLOBAL_MOD_COUNT_OFFSET);
1035 public static int getParent(int id) {
1039 final int parentId = getRecordInt(id, PARENT_OFFSET);
1040 if (parentId == id) {
1041 LOG.error("Cyclic parent child relations in the database. id = " + id);
1051 catch (Throwable e) {
1052 DbConnection.handleError(e);
1057 // returns id, parent(id), parent(parent(id)), ... (already cached id or rootId)
1059 public static TIntArrayList getParents(int id, @NotNull ConcurrentIntObjectMap<?> idCache) {
1060 TIntArrayList result = new TIntArrayList(10);
1066 if (idCache.containsKey(id)) {
1069 parentId = getRecordInt(id, PARENT_OFFSET);
1070 if (parentId == id || result.size() % 128 == 0 && result.contains(parentId)) {
1071 LOG.error("Cyclic parent child relations in the database. id = " + parentId);
1075 } while (parentId != 0);
1077 catch (Throwable e) {
1078 DbConnection.handleError(e);
1086 public static void setParent(int id, int parentId) {
1087 if (id == parentId) {
1088 LOG.error("Cyclic parent/child relations");
1095 putRecordInt(id, PARENT_OFFSET, parentId);
1097 catch (Throwable e) {
1098 DbConnection.handleError(e);
1105 public static int getNameId(int id) {
1109 return getRecordInt(id, NAME_OFFSET);
1115 catch (Throwable e) {
1116 DbConnection.handleError(e);
1121 public static int getNameId(String name) {
1125 return getNames().enumerate(name);
1131 catch (Throwable e) {
1132 DbConnection.handleError(e);
1137 public static String getName(int id) {
1138 return getNameSequence(id).toString();
1142 public static CharSequence getNameSequence(int id) {
1146 final int nameId = getRecordInt(id, NAME_OFFSET);
1147 return nameId == 0 ? "" : FileNameCache.getVFileName(nameId);
1153 catch (Throwable e) {
1154 DbConnection.handleError(e);
1159 public static String getNameByNameId(int nameId) {
1163 return nameId != 0 ? getNames().valueOf(nameId) : "";
1169 catch (Throwable e) {
1170 DbConnection.handleError(e);
1175 public static void setName(int id, @NotNull String name) {
1179 int nameId = getNames().enumerate(name);
1180 putRecordInt(id, NAME_OFFSET, nameId);
1182 catch (Throwable e) {
1183 DbConnection.handleError(e);
1190 public static int getFlags(int id) {
1193 return getRecordInt(id, FLAGS_OFFSET);
1200 public static void setFlags(int id, int flags, final boolean markAsChange) {
1206 putRecordInt(id, FLAGS_OFFSET, flags);
1208 catch (Throwable e) {
1209 DbConnection.handleError(e);
1216 public static long getLength(int id) {
1219 return getRecords().getLong(getOffset(id, LENGTH_OFFSET));
1226 public static void setLength(int id, long len) {
1230 getRecords().putLong(getOffset(id, LENGTH_OFFSET), len);
1232 catch (Throwable e) {
1233 DbConnection.handleError(e);
1240 public static long getTimestamp(int id) {
1243 return getRecords().getLong(getOffset(id, TIMESTAMP_OFFSET));
1250 public static void setTimestamp(int id, long value) {
1254 getRecords().putLong(getOffset(id, TIMESTAMP_OFFSET), value);
1256 catch (Throwable e) {
1257 DbConnection.handleError(e);
1264 static int getModCount(int id) {
1267 return getRecordInt(id, MOD_COUNT_OFFSET);
1274 private static void setModCount(int id, int value) {
1275 putRecordInt(id, MOD_COUNT_OFFSET, value);
1278 private static int getContentRecordId(int fileId) {
1279 return getRecordInt(fileId, CONTENT_OFFSET);
1282 private static void setContentRecordId(int id, int value) {
1283 putRecordInt(id, CONTENT_OFFSET, value);
1286 private static int getAttributeRecordId(int id) {
1287 return getRecordInt(id, ATTR_REF_OFFSET);
1290 private static void setAttributeRecordId(int id, int value) {
1291 putRecordInt(id, ATTR_REF_OFFSET, value);
1294 private static int getRecordInt(int id, int offset) {
1295 return getRecords().getInt(getOffset(id, offset));
1298 private static void putRecordInt(int id, int offset, int value) {
1299 getRecords().putInt(getOffset(id, offset), value);
1302 private static int getOffset(int id, int offset) {
1303 return id * RECORD_SIZE + offset;
1307 public static DataInputStream readContent(int fileId) {
1312 checkFileIsValid(fileId);
1314 page = getContentRecordId(fileId);
1315 if (page == 0) return null;
1320 return doReadContentById(page);
1322 catch (Throwable e) {
1323 DbConnection.handleError(e);
1329 static DataInputStream readContentById(int contentId) {
1331 return doReadContentById(contentId);
1333 catch (Throwable e) {
1334 DbConnection.handleError(e);
1339 private static DataInputStream doReadContentById(int contentId) throws IOException {
1340 DataInputStream stream = getContentStorage().readStream(contentId);
1341 if (useSnappyForCompression) {
1342 byte[] bytes = CompressionUtil.readCompressed(stream);
1343 stream = new DataInputStream(new ByteArrayInputStream(bytes));
1350 public static DataInputStream readAttributeWithLock(int fileId, FileAttribute att) {
1354 DataInputStream stream = readAttribute(fileId, att);
1355 if (stream != null && att.isVersioned()) {
1357 int actualVersion = DataInputOutputUtil.readINT(stream);
1358 if (actualVersion != att.getVersion()) {
1363 catch (IOException e) {
1374 catch (Throwable e) {
1375 DbConnection.handleError(e);
1380 // should be called under r or w lock
1382 private static DataInputStream readAttribute(int fileId, FileAttribute attribute) throws IOException {
1383 checkFileIsValid(fileId);
1385 int recordId = getAttributeRecordId(fileId);
1386 if (recordId == 0) return null;
1387 int encodedAttrId = DbConnection.getAttributeId(attribute.getId());
1389 Storage storage = getAttributesStorage();
1393 try (DataInputStream attrRefs = storage.readStream(recordId)) {
1394 if (bulkAttrReadSupport) skipRecordHeader(attrRefs, DbConnection.RESERVED_ATTR_ID, fileId);
1396 while (attrRefs.available() > 0) {
1397 final int attIdOnPage = DataInputOutputUtil.readINT(attrRefs);
1398 final int attrAddressOrSize = DataInputOutputUtil.readINT(attrRefs);
1400 if (attIdOnPage != encodedAttrId) {
1401 if (inlineAttributes && attrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
1402 attrRefs.skipBytes(attrAddressOrSize);
1406 if (inlineAttributes && attrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
1407 byte[] b = new byte[attrAddressOrSize];
1408 attrRefs.readFully(b);
1409 return new DataInputStream(new ByteArrayInputStream(b));
1411 page = inlineAttributes ? attrAddressOrSize - MAX_SMALL_ATTR_SIZE : attrAddressOrSize;
1420 DataInputStream stream = getAttributesStorage().readStream(page);
1421 if (bulkAttrReadSupport) skipRecordHeader(stream, encodedAttrId, fileId);
1425 // Vfs small attrs: store inline:
1426 // file's AttrId -> [size, capacity] attr record (RESERVED_ATTR_ID fileId)? (attrId ((smallAttrSize smallAttrData) | (attr record)) )
1427 // other attr record: (AttrId, fileId) ? attrData
1428 private static final int MAX_SMALL_ATTR_SIZE = 64;
1430 private static int findAttributePage(int fileId, FileAttribute attr, boolean toWrite) throws IOException {
1431 checkFileIsValid(fileId);
1433 int recordId = getAttributeRecordId(fileId);
1434 int encodedAttrId = DbConnection.getAttributeId(attr.getId());
1435 boolean directoryRecord = false;
1437 Storage storage = getAttributesStorage();
1439 if (recordId == 0) {
1440 if (!toWrite) return 0;
1442 recordId = storage.createNewRecord();
1443 setAttributeRecordId(fileId, recordId);
1444 directoryRecord = true;
1447 try (DataInputStream attrRefs = storage.readStream(recordId)) {
1448 if (bulkAttrReadSupport) skipRecordHeader(attrRefs, DbConnection.RESERVED_ATTR_ID, fileId);
1450 while (attrRefs.available() > 0) {
1451 final int attIdOnPage = DataInputOutputUtil.readINT(attrRefs);
1452 final int attrAddressOrSize = DataInputOutputUtil.readINT(attrRefs);
1454 if (attIdOnPage == encodedAttrId) {
1455 if (inlineAttributes) {
1456 return attrAddressOrSize < MAX_SMALL_ATTR_SIZE ? -recordId : attrAddressOrSize - MAX_SMALL_ATTR_SIZE;
1459 return attrAddressOrSize;
1463 if (inlineAttributes && attrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
1464 attrRefs.skipBytes(attrAddressOrSize);
1472 Storage.AppenderStream appender = storage.appendStream(recordId);
1473 if (bulkAttrReadSupport) {
1474 if (directoryRecord) {
1475 DataInputOutputUtil.writeINT(appender, DbConnection.RESERVED_ATTR_ID);
1476 DataInputOutputUtil.writeINT(appender, fileId);
1480 DataInputOutputUtil.writeINT(appender, encodedAttrId);
1481 int attrAddress = storage.createNewRecord();
1482 DataInputOutputUtil.writeINT(appender, inlineAttributes ? attrAddress + MAX_SMALL_ATTR_SIZE : attrAddress);
1483 DbConnection.REASONABLY_SMALL.myAttrPageRequested = true;
1487 DbConnection.REASONABLY_SMALL.myAttrPageRequested = false;
1495 private static void skipRecordHeader(DataInputStream refs, int expectedRecordTag, int expectedFileId) throws IOException {
1496 int attId = DataInputOutputUtil.readINT(refs);// attrId
1497 assert attId == expectedRecordTag || expectedRecordTag == 0;
1498 int fileId = DataInputOutputUtil.readINT(refs);// fileId
1499 assert expectedFileId == fileId || expectedFileId == 0;
1502 private static void writeRecordHeader(int recordTag, int fileId, DataOutputStream appender) throws IOException {
1503 DataInputOutputUtil.writeINT(appender, recordTag);
1504 DataInputOutputUtil.writeINT(appender, fileId);
1507 private static void checkFileIsValid(int fileId) {
1508 assert fileId > 0 : fileId;
1509 // TODO: This assertion is a bit timey, will remove when bug is caught.
1510 if (!lazyVfsDataCleaning) {
1511 assert !BitUtil.isSet(getFlags(fileId), FREE_RECORD_FLAG) : "Accessing attribute of a deleted page: " + fileId + ":" + getName(fileId);
1515 static int acquireFileContent(int fileId) {
1518 int record = getContentRecordId(fileId);
1519 if (record > 0) getContentStorage().acquireRecord(record);
1522 catch (Throwable e) {
1523 DbConnection.handleError(e);
1531 static void releaseContent(int contentId) {
1534 RefCountingStorage contentStorage = getContentStorage();
1535 if (weHaveContentHashes) {
1536 contentStorage.releaseRecord(contentId, false);
1538 contentStorage.releaseRecord(contentId);
1541 catch (Throwable e) {
1542 DbConnection.handleError(e);
1549 public static int getContentId(int fileId) {
1553 return getContentRecordId(fileId);
1559 catch (Throwable e) {
1560 DbConnection.handleError(e);
1566 static DataOutputStream writeContent(int fileId, boolean readOnly) {
1567 return new ContentOutputStream(fileId, readOnly);
1570 private static final MessageDigest myDigest = ContentHashesUtil.createHashDigest();
1572 static void writeContent(int fileId, ByteSequence bytes, boolean readOnly) {
1574 new ContentOutputStream(fileId, readOnly).writeBytes(bytes);
1576 catch (Throwable e) {
1577 DbConnection.handleError(e);
1581 static int storeUnlinkedContent(byte[] bytes) {
1586 if (weHaveContentHashes) {
1587 recordId = findOrCreateContentRecord(bytes, 0, bytes.length);
1588 if (recordId > 0) return recordId;
1589 recordId = -recordId;
1591 recordId = getContentStorage().acquireNewRecord();
1593 AbstractStorage.StorageDataOutput output = getContentStorage().writeStream(recordId, true);
1594 output.write(bytes);
1598 catch (IOException e) {
1599 DbConnection.handleError(e);
1608 public static DataOutputStream writeAttribute(final int fileId, @NotNull FileAttribute att) {
1609 DataOutputStream stream = new AttributeOutputStream(fileId, att);
1610 if (att.isVersioned()) {
1612 DataInputOutputUtil.writeINT(stream, att.getVersion());
1614 catch (IOException e) {
1615 throw new RuntimeException(e);
1621 private static class ContentOutputStream extends DataOutputStream {
1623 final boolean myFixedSize;
1625 private ContentOutputStream(final int fileId, boolean readOnly) {
1626 super(new BufferExposingByteArrayOutputStream());
1628 myFixedSize = readOnly;
1632 public void close() throws IOException {
1636 final BufferExposingByteArrayOutputStream _out = (BufferExposingByteArrayOutputStream)out;
1637 writeBytes(new ByteSequence(_out.getInternalBuffer(), 0, _out.size()));
1639 catch (Throwable e) {
1640 DbConnection.handleError(e);
1644 public void writeBytes(ByteSequence bytes) throws IOException {
1645 RefCountingStorage contentStorage = getContentStorage();
1648 incModCount(myFileId);
1650 checkFileIsValid(myFileId);
1653 final boolean fixedSize;
1654 if (weHaveContentHashes) {
1655 page = findOrCreateContentRecord(bytes.getBytes(), bytes.getOffset(), bytes.getLength());
1657 incModCount(myFileId);
1658 checkFileIsValid(myFileId);
1660 setContentRecordId(myFileId, page > 0 ? page : -page);
1662 if (page > 0) return;
1666 page = getContentRecordId(myFileId);
1667 if (page == 0 || contentStorage.getRefCount(page) > 1) {
1668 page = contentStorage.acquireNewRecord();
1669 setContentRecordId(myFileId, page);
1671 fixedSize = myFixedSize;
1674 if (useSnappyForCompression) {
1675 BufferExposingByteArrayOutputStream out = new BufferExposingByteArrayOutputStream();
1676 DataOutputStream outputStream = new DataOutputStream(out);
1677 byte[] rawBytes = bytes.getBytes();
1678 if (bytes.getOffset() != 0) {
1679 rawBytes = new byte[bytes.getLength()];
1680 System.arraycopy(bytes.getBytes(), bytes.getOffset(), rawBytes, 0, bytes.getLength());
1682 CompressionUtil.writeCompressed(outputStream, rawBytes, bytes.getLength());
1683 outputStream.close();
1684 bytes = new ByteSequence(out.getInternalBuffer(), 0, out.size());
1686 contentStorage.writeBytes(page, bytes, fixedSize);
1694 private static final boolean DO_HARD_CONSISTENCY_CHECK = false;
1695 private static final boolean DUMP_STATISTICS = weHaveContentHashes; // TODO: remove once not needed
1696 private static long totalContents;
1697 private static long totalReuses;
1698 private static long time;
1699 private static int contents;
1700 private static int reuses;
1702 private static int findOrCreateContentRecord(byte[] bytes, int offset, int length) throws IOException {
1703 assert weHaveContentHashes;
1705 long started = DUMP_STATISTICS ? System.nanoTime():0;
1707 myDigest.update(String.valueOf(length - offset).getBytes(Charset.defaultCharset()));
1708 myDigest.update("\0".getBytes(Charset.defaultCharset()));
1709 myDigest.update(bytes, offset, length);
1710 byte[] digest = myDigest.digest();
1711 long done = DUMP_STATISTICS ? System.nanoTime() - started : 0;
1715 totalContents += length;
1717 if (DUMP_STATISTICS && (contents & 0x3FFF) == 0) {
1718 LOG.info("Contents:" + contents + " of " + totalContents + ", reuses:" + reuses + " of " + totalReuses + " for " + time / 1000000);
1720 PersistentBTreeEnumerator<byte[]> hashesEnumerator = getContentHashesEnumerator();
1721 final int largestId = hashesEnumerator.getLargestId();
1722 int page = hashesEnumerator.enumerate(digest);
1724 if (page <= largestId) {
1726 getContentStorage().acquireRecord(page);
1727 totalReuses += length;
1729 if (DO_HARD_CONSISTENCY_CHECK) {
1730 DataInputStream stream = doReadContentById(page);
1732 for(int c = 0; c < length; ++c) {
1733 if (stream.available() == 0) {
1736 if (bytes[i++] != stream.readByte()) {
1740 if (stream.available() > 0) {
1746 int newRecord = getContentStorage().acquireNewRecord();
1747 if (page != newRecord) {
1748 assert false:"Unexpected content storage modification";
1750 if (DO_HARD_CONSISTENCY_CHECK) {
1751 if (hashesEnumerator.enumerate(digest) != page) {
1755 byte[] bytes1 = hashesEnumerator.valueOf(page);
1756 if (!Arrays.equals(digest, bytes1)) {
1764 private static class AttributeOutputStream extends DataOutputStream {
1765 private final FileAttribute myAttribute;
1766 private final int myFileId;
1768 private AttributeOutputStream(final int fileId, @NotNull FileAttribute attribute) {
1769 super(new BufferExposingByteArrayOutputStream());
1771 myAttribute = attribute;
1775 public void close() throws IOException {
1779 final BufferExposingByteArrayOutputStream _out = (BufferExposingByteArrayOutputStream)out;
1781 if (inlineAttributes && _out.size() < MAX_SMALL_ATTR_SIZE) {
1784 rewriteDirectoryRecordWithAttrContent(_out);
1785 incModCount(myFileId);
1794 incModCount(myFileId);
1795 int page = findAttributePage(myFileId, myAttribute, true);
1796 if (inlineAttributes && page < 0) {
1797 rewriteDirectoryRecordWithAttrContent(new BufferExposingByteArrayOutputStream());
1798 page = findAttributePage(myFileId, myAttribute, true);
1801 if (bulkAttrReadSupport) {
1802 BufferExposingByteArrayOutputStream stream = new BufferExposingByteArrayOutputStream();
1804 writeRecordHeader(DbConnection.getAttributeId(myAttribute.getId()), myFileId, this);
1805 write(_out.getInternalBuffer(), 0, _out.size());
1806 getAttributesStorage().writeBytes(page, new ByteSequence(stream.getInternalBuffer(), 0, stream.size()), myAttribute.isFixedSize());
1809 getAttributesStorage().writeBytes(page, new ByteSequence(_out.getInternalBuffer(), 0, _out.size()), myAttribute.isFixedSize());
1817 catch (Throwable e) {
1818 DbConnection.handleError(e);
1822 void rewriteDirectoryRecordWithAttrContent(BufferExposingByteArrayOutputStream _out) throws IOException {
1823 int recordId = getAttributeRecordId(myFileId);
1824 assert inlineAttributes;
1825 int encodedAttrId = DbConnection.getAttributeId(myAttribute.getId());
1827 Storage storage = getAttributesStorage();
1828 BufferExposingByteArrayOutputStream unchangedPreviousDirectoryStream = null;
1829 boolean directoryRecord = false;
1832 if (recordId == 0) {
1833 recordId = storage.createNewRecord();
1834 setAttributeRecordId(myFileId, recordId);
1835 directoryRecord = true;
1838 DataInputStream attrRefs = storage.readStream(recordId);
1840 DataOutputStream dataStream = null;
1843 final int remainingAtStart = attrRefs.available();
1844 if (bulkAttrReadSupport) {
1845 unchangedPreviousDirectoryStream = new BufferExposingByteArrayOutputStream();
1846 dataStream = new DataOutputStream(unchangedPreviousDirectoryStream);
1847 int attId = DataInputOutputUtil.readINT(attrRefs);
1848 assert attId == DbConnection.RESERVED_ATTR_ID;
1849 int fileId = DataInputOutputUtil.readINT(attrRefs);
1850 assert myFileId == fileId;
1852 writeRecordHeader(attId, fileId, dataStream);
1854 while (attrRefs.available() > 0) {
1855 final int attIdOnPage = DataInputOutputUtil.readINT(attrRefs);
1856 final int attrAddressOrSize = DataInputOutputUtil.readINT(attrRefs);
1858 if (attIdOnPage != encodedAttrId) {
1859 if (dataStream == null) {
1860 unchangedPreviousDirectoryStream = new BufferExposingByteArrayOutputStream();
1861 dataStream = new DataOutputStream(unchangedPreviousDirectoryStream);
1863 DataInputOutputUtil.writeINT(dataStream, attIdOnPage);
1864 DataInputOutputUtil.writeINT(dataStream, attrAddressOrSize);
1866 if (attrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
1867 byte[] b = new byte[attrAddressOrSize];
1868 attrRefs.readFully(b);
1869 dataStream.write(b);
1873 if (attrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
1874 if (_out.size() == attrAddressOrSize) {
1875 // update inplace when new attr has the same size
1876 int remaining = attrRefs.available();
1877 storage.replaceBytes(recordId, remainingAtStart - remaining, new ByteSequence(_out.getInternalBuffer(), 0, _out.size()));
1880 attrRefs.skipBytes(attrAddressOrSize);
1887 if (dataStream != null) dataStream.close();
1891 AbstractStorage.StorageDataOutput directoryStream = storage.writeStream(recordId);
1892 if (directoryRecord) {
1893 if (bulkAttrReadSupport) writeRecordHeader(DbConnection.RESERVED_ATTR_ID, myFileId, directoryStream);
1895 if(unchangedPreviousDirectoryStream != null) {
1896 directoryStream.write(unchangedPreviousDirectoryStream.getInternalBuffer(), 0, unchangedPreviousDirectoryStream.size());
1898 if (_out.size() > 0) {
1899 DataInputOutputUtil.writeINT(directoryStream, encodedAttrId);
1900 DataInputOutputUtil.writeINT(directoryStream, _out.size());
1901 directoryStream.write(_out.getInternalBuffer(), 0, _out.size());
1904 directoryStream.close();
1908 public static void dispose() {
1911 DbConnection.force();
1912 DbConnection.closeFiles();
1914 catch (Throwable e) {
1915 DbConnection.handleError(e);
1918 ourIsDisposed = true;
1923 public static void invalidateCaches() {
1924 DbConnection.createBrokenMarkerFile(null);
1927 static void checkSanity() {
1928 long t = System.currentTimeMillis();
1932 final int fileLength = length();
1933 assert fileLength % RECORD_SIZE == 0;
1934 int recordCount = fileLength / RECORD_SIZE;
1936 IntArrayList usedAttributeRecordIds = new IntArrayList();
1937 IntArrayList validAttributeIds = new IntArrayList();
1938 for (int id = 2; id < recordCount; id++) {
1939 int flags = getFlags(id);
1940 LOG.assertTrue((flags & ~ALL_VALID_FLAGS) == 0, "Invalid flags: 0x" + Integer.toHexString(flags) + ", id: " + id);
1941 if (BitUtil.isSet(flags, FREE_RECORD_FLAG)) {
1942 LOG.assertTrue(DbConnection.myFreeRecords.contains(id), "Record, marked free, not in free list: " + id);
1945 LOG.assertTrue(!DbConnection.myFreeRecords.contains(id), "Record, not marked free, in free list: " + id);
1946 checkRecordSanity(id, recordCount, usedAttributeRecordIds, validAttributeIds);
1954 t = System.currentTimeMillis() - t;
1955 LOG.info("Sanity check took " + t + " ms");
1958 private static void checkRecordSanity(final int id, final int recordCount, final IntArrayList usedAttributeRecordIds,
1959 final IntArrayList validAttributeIds) {
1960 int parentId = getParent(id);
1961 assert parentId >= 0 && parentId < recordCount;
1962 if (parentId > 0 && getParent(parentId) > 0) {
1963 int parentFlags = getFlags(parentId);
1964 assert !BitUtil.isSet(parentFlags, FREE_RECORD_FLAG) : parentId + ": " + Integer.toHexString(parentFlags);
1965 assert BitUtil.isSet(parentFlags, PersistentFS.IS_DIRECTORY_FLAG) : parentId + ": " + Integer.toHexString(parentFlags);
1968 String name = getName(id);
1969 LOG.assertTrue(parentId == 0 || !name.isEmpty(), "File with empty name found under " + getName(parentId) + ", id=" + id);
1971 checkContentsStorageSanity(id);
1972 checkAttributesStorageSanity(id, usedAttributeRecordIds, validAttributeIds);
1974 long length = getLength(id);
1975 assert length >= -1 : "Invalid file length found for " + name + ": " + length;
1978 private static void checkContentsStorageSanity(int id) {
1979 int recordId = getContentRecordId(id);
1980 assert recordId >= 0;
1982 getContentStorage().checkSanity(recordId);
1986 private static void checkAttributesStorageSanity(int id, IntArrayList usedAttributeRecordIds, IntArrayList validAttributeIds) {
1987 int attributeRecordId = getAttributeRecordId(id);
1989 assert attributeRecordId >= 0;
1990 if (attributeRecordId > 0) {
1992 checkAttributesSanity(attributeRecordId, usedAttributeRecordIds, validAttributeIds);
1994 catch (IOException ex) {
1995 DbConnection.handleError(ex);
2000 private static void checkAttributesSanity(final int attributeRecordId, final IntArrayList usedAttributeRecordIds,
2001 final IntArrayList validAttributeIds) throws IOException {
2002 assert !usedAttributeRecordIds.contains(attributeRecordId);
2003 usedAttributeRecordIds.add(attributeRecordId);
2005 try (DataInputStream dataInputStream = getAttributesStorage().readStream(attributeRecordId)) {
2006 if (bulkAttrReadSupport) skipRecordHeader(dataInputStream, 0, 0);
2008 while (dataInputStream.available() > 0) {
2009 int attId = DataInputOutputUtil.readINT(dataInputStream);
2011 if (!validAttributeIds.contains(attId)) {
2012 assert persistentAttributesList || !getNames().valueOf(attId).isEmpty();
2013 validAttributeIds.add(attId);
2016 int attDataRecordIdOrSize = DataInputOutputUtil.readINT(dataInputStream);
2018 if (inlineAttributes) {
2019 if (attDataRecordIdOrSize < MAX_SMALL_ATTR_SIZE) {
2020 dataInputStream.skipBytes(attDataRecordIdOrSize);
2023 else attDataRecordIdOrSize -= MAX_SMALL_ATTR_SIZE;
2025 assert !usedAttributeRecordIds.contains(attDataRecordIdOrSize);
2026 usedAttributeRecordIds.add(attDataRecordIdOrSize);
2028 getAttributesStorage().checkSanity(attDataRecordIdOrSize);
2033 public static void handleError(Throwable e) throws RuntimeException, Error {
2034 DbConnection.handleError(e);
2038 public interface BulkAttrReadCallback {
2039 boolean accepts(int fileId);
2040 boolean execute(int fileId, DataInputStream is);
2043 // custom DataInput implementation instead of DataInputStream (without extra allocations) (api change)
2044 // store each attr in separate file: pro: read only affected data, easy versioning
2046 public static void readAttributeInBulk(FileAttribute attr, BulkAttrReadCallback callback) throws IOException {
2047 String attrId = attr.getId();
2048 int encodedAttrId = DbConnection.getAttributeId(attrId);
2049 synchronized (attrId) {
2050 Storage storage = getAttributesStorage();
2051 RecordIterator recordIterator = storage.recordIterator();
2052 while (recordIterator.hasNextRecordId()) {
2053 int recordId = recordIterator.nextRecordId();
2054 DataInputStream stream = storage.readStream(recordId);
2056 int currentAttrId = DataInputOutputUtil.readINT(stream);
2057 int fileId = DataInputOutputUtil.readINT(stream);
2058 if (!callback.accepts(fileId)) continue;
2060 if (currentAttrId == DbConnection.RESERVED_ATTR_ID) {
2061 if (!inlineAttributes) continue;
2063 while(stream.available() > 0) {
2064 int directoryAttrId = DataInputOutputUtil.readINT(stream);
2065 int directoryAttrAddressOrSize = DataInputOutputUtil.readINT(stream);
2067 if (directoryAttrId != encodedAttrId) {
2068 if (directoryAttrAddressOrSize < MAX_SMALL_ATTR_SIZE) stream.skipBytes(directoryAttrAddressOrSize);
2070 if (directoryAttrAddressOrSize < MAX_SMALL_ATTR_SIZE) {
2071 byte[] b = new byte[directoryAttrAddressOrSize];
2072 stream.readFully(b);
2073 DataInputStream inlineAttrStream = new DataInputStream(new ByteArrayInputStream(b));
2074 int version = DataInputOutputUtil.readINT(inlineAttrStream);
2075 if (version != attr.getVersion()) continue;
2076 boolean result = callback.execute(fileId, inlineAttrStream); // todo
2081 } else if (currentAttrId == encodedAttrId) {
2082 int version = DataInputOutputUtil.readINT(stream);
2083 if (version != attr.getVersion()) continue;
2085 boolean result = callback.execute(fileId, stream); // todo