mirror of
https://github.com/eclipse-cdt/cdt
synced 2025-08-11 10:15:39 +02:00
See 180164, reduce amount of disk-write operations when indexing.
This commit is contained in:
parent
155cfd6798
commit
1def7fbf69
14 changed files with 192 additions and 73 deletions
|
@ -249,7 +249,7 @@ public class DBTest extends BaseTestCase {
|
||||||
IString biss = db.newString(b);
|
IString biss = db.newString(b);
|
||||||
IString aisc = db.newString(acs);
|
IString aisc = db.newString(acs);
|
||||||
IString bisc = db.newString(bcs);
|
IString bisc = db.newString(bcs);
|
||||||
db.setReadOnly();
|
db.setReadOnly(true);
|
||||||
|
|
||||||
assertSignEquals(expected, aiss.compare(bcs, caseSensitive));
|
assertSignEquals(expected, aiss.compare(bcs, caseSensitive));
|
||||||
assertSignEquals(expected, aiss.compare(biss, caseSensitive));
|
assertSignEquals(expected, aiss.compare(biss, caseSensitive));
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class PDOMBugsTest extends BaseTestCase {
|
||||||
wpdom.setProperty("c", "e");
|
wpdom.setProperty("c", "e");
|
||||||
assertEquals("e", wpdom.getProperty("c"));
|
assertEquals("e", wpdom.getProperty("c"));
|
||||||
} finally {
|
} finally {
|
||||||
pdom.releaseWriteLock(0);
|
pdom.releaseWriteLock(0, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,9 +67,17 @@ public interface IWritableIndex extends IIndex {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Releases a write lock, reestablishing a certain amount of read locks.
|
* Releases a write lock, reestablishing a certain amount of read locks.
|
||||||
|
* Fully equivalent to <code>releaseWriteLock(int, true)</code>.
|
||||||
*/
|
*/
|
||||||
void releaseWriteLock(int establishReadLockCount);
|
void releaseWriteLock(int establishReadLockCount);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Releases a write lock, reestablishing a certain amount of read locks.
|
||||||
|
* @param establishReadLockCount amount of read-locks to establish.
|
||||||
|
* @param flushDatabase when true the changes are flushed to disk.
|
||||||
|
*/
|
||||||
|
void releaseWriteLock(int establishReadLockCount, boolean flushDatabase);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resets the counters for cache-hits
|
* Resets the counters for cache-hits
|
||||||
*/
|
*/
|
||||||
|
@ -90,4 +98,9 @@ public interface IWritableIndex extends IIndex {
|
||||||
* no writable fragment.
|
* no writable fragment.
|
||||||
*/
|
*/
|
||||||
IWritableIndexFragment getPrimaryWritableFragment();
|
IWritableIndexFragment getPrimaryWritableFragment();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flushes all caches to the disk.
|
||||||
|
*/
|
||||||
|
void flush() throws CoreException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,8 +60,10 @@ public interface IWritableIndexFragment extends IIndexFragment {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Releases a write lock, reestablishing a certain amount of read locks.
|
* Releases a write lock, reestablishing a certain amount of read locks.
|
||||||
|
* @param establishReadLockCount amount of read-locks to establish
|
||||||
|
* @param flush if <code>true</code> changes are flushed to disk
|
||||||
*/
|
*/
|
||||||
void releaseWriteLock(int establishReadLockCount);
|
void releaseWriteLock(int establishReadLockCount, boolean flush);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write the key, value mapping to the fragment properties. If a mapping for the
|
* Write the key, value mapping to the fragment properties. If a mapping for the
|
||||||
|
@ -72,4 +74,9 @@ public interface IWritableIndexFragment extends IIndexFragment {
|
||||||
* @throws NullPointerException if key is null
|
* @throws NullPointerException if key is null
|
||||||
*/
|
*/
|
||||||
public void setProperty(String propertyName, String value) throws CoreException;
|
public void setProperty(String propertyName, String value) throws CoreException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flushes caches to disk.
|
||||||
|
*/
|
||||||
|
void flush() throws CoreException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,24 +113,37 @@ public class WritableCIndex extends CIndex implements IWritableIndex {
|
||||||
// rollback
|
// rollback
|
||||||
fIsWriteLocked= false;
|
fIsWriteLocked= false;
|
||||||
while (--i >= 0) {
|
while (--i >= 0) {
|
||||||
fWritableFragments[i].releaseWriteLock(giveupReadlockCount);
|
fWritableFragments[i].releaseWriteLock(giveupReadlockCount, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void releaseWriteLock(int establishReadlockCount) {
|
public synchronized void releaseWriteLock(int establishReadlockCount) {
|
||||||
|
releaseWriteLock(establishReadlockCount, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void releaseWriteLock(int establishReadlockCount, boolean flush) {
|
||||||
assert fIsWriteLocked: "No write lock to be released"; //$NON-NLS-1$
|
assert fIsWriteLocked: "No write lock to be released"; //$NON-NLS-1$
|
||||||
assert establishReadlockCount == getReadLockCount(): "Unexpected read lock is not allowed"; //$NON-NLS-1$
|
assert establishReadlockCount == getReadLockCount(): "Unexpected read lock is not allowed"; //$NON-NLS-1$
|
||||||
|
|
||||||
fIsWriteLocked= false;
|
fIsWriteLocked= false;
|
||||||
int i= 0;
|
for (int i = 0; i < fWritableFragments.length; i++) {
|
||||||
for (i = 0; i < fWritableFragments.length; i++) {
|
fWritableFragments[i].releaseWriteLock(establishReadlockCount, flush);
|
||||||
fWritableFragments[i].releaseWriteLock(establishReadlockCount);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public IWritableIndexFragment getPrimaryWritableFragment() {
|
public IWritableIndexFragment getPrimaryWritableFragment() {
|
||||||
return fWritableFragments.length > 0 ? fWritableFragments[0] : null;
|
return fWritableFragments.length > 0 ? fWritableFragments[0] : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void flush() throws CoreException {
|
||||||
|
assert !fIsWriteLocked;
|
||||||
|
int i= 0;
|
||||||
|
for (i = 0; i < fWritableFragments.length; i++) {
|
||||||
|
fWritableFragments[i].flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -564,12 +564,12 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
}
|
}
|
||||||
|
|
||||||
final public void releaseWriteLock() {
|
final public void releaseWriteLock() {
|
||||||
releaseWriteLock(0);
|
releaseWriteLock(0, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void releaseWriteLock(int establishReadLocks) {
|
public void releaseWriteLock(int establishReadLocks, boolean flush) {
|
||||||
try {
|
try {
|
||||||
db.setReadOnly();
|
db.setReadOnly(flush);
|
||||||
} catch (CoreException e) {
|
} catch (CoreException e) {
|
||||||
CCorePlugin.log(e);
|
CCorePlugin.log(e);
|
||||||
}
|
}
|
||||||
|
@ -737,4 +737,8 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
public void resetCacheCounters() {
|
public void resetCacheCounters() {
|
||||||
db.resetCacheCounters();
|
db.resetCacheCounters();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void flush() throws CoreException {
|
||||||
|
db.flush();
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -1037,8 +1037,8 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
||||||
PDOM pdom= (PDOM) getPDOM(cproject);
|
PDOM pdom= (PDOM) getPDOM(cproject);
|
||||||
pdom.acquireWriteLock();
|
pdom.acquireWriteLock();
|
||||||
try {
|
try {
|
||||||
|
pdom.flush();
|
||||||
Database db= pdom.getDB();
|
Database db= pdom.getDB();
|
||||||
db.flushDirtyChunks();
|
|
||||||
FileChannel from= db.getChannel();
|
FileChannel from= db.getChannel();
|
||||||
FileChannel to = new FileOutputStream(targetLocation).getChannel();
|
FileChannel to = new FileOutputStream(targetLocation).getChannel();
|
||||||
from.transferTo(0, from.size(), to);
|
from.transferTo(0, from.size(), to);
|
||||||
|
|
|
@ -108,6 +108,17 @@ abstract public class PDOMWriter {
|
||||||
*/
|
*/
|
||||||
protected abstract IIndexFileLocation findLocation(String absolutePath);
|
protected abstract IIndexFileLocation findLocation(String absolutePath);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fully equivalent to
|
||||||
|
* <code>addSymbols(IASTTranslationUnit, IWritableIndex, int, true, int, IProgressMonitor)</code>.
|
||||||
|
* @since 4.0
|
||||||
|
*/
|
||||||
|
public void addSymbols(IASTTranslationUnit ast,
|
||||||
|
IWritableIndex index, int readlockCount,
|
||||||
|
int configHash, IProgressMonitor pm) throws InterruptedException, CoreException {
|
||||||
|
addSymbols(ast, index, readlockCount, true, configHash, pm);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extracts symbols from the given ast and adds them to the index. It will
|
* Extracts symbols from the given ast and adds them to the index. It will
|
||||||
* make calls to
|
* make calls to
|
||||||
|
@ -115,10 +126,14 @@ abstract public class PDOMWriter {
|
||||||
* {@link #postAddToIndex(IIndexFileLocation, IIndexFile)},
|
* {@link #postAddToIndex(IIndexFileLocation, IIndexFile)},
|
||||||
* {@link #getLastModified(IIndexFileLocation)} and
|
* {@link #getLastModified(IIndexFileLocation)} and
|
||||||
* {@link #findLocation(String)} to obtain further information.
|
* {@link #findLocation(String)} to obtain further information.
|
||||||
|
*
|
||||||
|
* When flushIndex is set to <code>false</code>, you must make sure to flush the
|
||||||
|
* index after your last write operation.
|
||||||
* @since 4.0
|
* @since 4.0
|
||||||
*/
|
*/
|
||||||
public void addSymbols(IASTTranslationUnit ast, IWritableIndex index, int readlockCount, int configHash,
|
public void addSymbols(IASTTranslationUnit ast,
|
||||||
IProgressMonitor pm) throws InterruptedException, CoreException {
|
IWritableIndex index, int readlockCount, boolean flushIndex,
|
||||||
|
int configHash, IProgressMonitor pm) throws InterruptedException, CoreException {
|
||||||
final Map symbolMap= new HashMap();
|
final Map symbolMap= new HashMap();
|
||||||
try {
|
try {
|
||||||
HashSet contextIncludes= new HashSet();
|
HashSet contextIncludes= new HashSet();
|
||||||
|
@ -187,7 +202,7 @@ abstract public class PDOMWriter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
index.releaseWriteLock(readlockCount);
|
index.releaseWriteLock(readlockCount, flushIndex);
|
||||||
}
|
}
|
||||||
fStatistics.fAddToIndexTime+= System.currentTimeMillis()-start;
|
fStatistics.fAddToIndexTime+= System.currentTimeMillis()-start;
|
||||||
}
|
}
|
||||||
|
|
|
@ -282,7 +282,7 @@ public class TeamPDOMImportOperation implements IWorkspaceRunnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
pdom.releaseWriteLock(giveupReadlocks);
|
pdom.releaseWriteLock(giveupReadlocks, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,14 +28,18 @@ final class Chunk {
|
||||||
|
|
||||||
boolean fCacheHitFlag= false;
|
boolean fCacheHitFlag= false;
|
||||||
boolean fDirty= false;
|
boolean fDirty= false;
|
||||||
boolean fWritable= false;
|
boolean fLocked= false; // locked chunks must not be released from cache.
|
||||||
int fCacheIndex= -1;
|
int fCacheIndex= -1;
|
||||||
|
|
||||||
Chunk(Database db, int sequenceNumber) throws CoreException {
|
Chunk(Database db, int sequenceNumber) {
|
||||||
fDatabase= db;
|
fDatabase= db;
|
||||||
fBuffer= ByteBuffer.allocate(Database.CHUNK_SIZE);
|
fBuffer= ByteBuffer.allocate(Database.CHUNK_SIZE);
|
||||||
fSequenceNumber= sequenceNumber;
|
fSequenceNumber= sequenceNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
void read() throws CoreException {
|
||||||
try {
|
try {
|
||||||
|
fBuffer.position(0);
|
||||||
fDatabase.getChannel().read(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
|
fDatabase.getChannel().read(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new CoreException(new DBStatus(e));
|
throw new CoreException(new DBStatus(e));
|
||||||
|
|
|
@ -30,9 +30,9 @@ public final class ChunkCache {
|
||||||
fPageTable= new Chunk[computeLength(maxSize)];
|
fPageTable= new Chunk[computeLength(maxSize)];
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void add(Chunk chunk, boolean writable) {
|
public synchronized void add(Chunk chunk, boolean locked) {
|
||||||
if (writable) {
|
if (locked) {
|
||||||
chunk.fWritable= true;
|
chunk.fLocked= true;
|
||||||
}
|
}
|
||||||
if (chunk.fCacheIndex >= 0) {
|
if (chunk.fCacheIndex >= 0) {
|
||||||
chunk.fCacheHitFlag= true;
|
chunk.fCacheHitFlag= true;
|
||||||
|
|
|
@ -95,7 +95,7 @@ public class Database {
|
||||||
setWritable();
|
setWritable();
|
||||||
createNewChunk();
|
createNewChunk();
|
||||||
setVersion(version);
|
setVersion(version);
|
||||||
setReadOnly();
|
setReadOnly(true);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new CoreException(new DBStatus(e));
|
throw new CoreException(new DBStatus(e));
|
||||||
|
@ -158,24 +158,25 @@ public class Database {
|
||||||
|
|
||||||
// for performance reasons try to find chunk and mark it without
|
// for performance reasons try to find chunk and mark it without
|
||||||
// synchronizing. This means that we might pick up a chunk that
|
// synchronizing. This means that we might pick up a chunk that
|
||||||
// has been paged out, which is ok.
|
// has been paged out, which is fine.
|
||||||
// Furthermore the hitflag may not be seen by the clock-alorithm,
|
// Furthermore the hit-flag may not be seen by the clock-algorithm,
|
||||||
// which might lead to the eviction of a chunk. With the next
|
// which might lead to the eviction of a chunk. With the next
|
||||||
// cache failure we are in sync again, though.
|
// cache failure we are in sync again, though.
|
||||||
Chunk chunk = chunks[index];
|
Chunk chunk = chunks[index];
|
||||||
if (chunk != null && chunk.fWritable == fWritable) {
|
if (chunk != null && (chunk.fLocked || !fWritable)) {
|
||||||
chunk.fCacheHitFlag= true;
|
chunk.fCacheHitFlag= true;
|
||||||
cacheHits++;
|
cacheHits++;
|
||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
// here is the safe code that has to be performed if we cannot
|
// here is the safe code that has to be performed if we cannot
|
||||||
// get ahold of the chunk.
|
// get hold of the chunk.
|
||||||
synchronized(fCache) {
|
synchronized(fCache) {
|
||||||
chunk= chunks[index];
|
chunk= chunks[index];
|
||||||
if (chunk == null) {
|
if (chunk == null) {
|
||||||
cacheMisses++;
|
cacheMisses++;
|
||||||
chunk = chunks[index] = new Chunk(this, index);
|
chunk = chunks[index] = new Chunk(this, index);
|
||||||
|
chunk.read();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
cacheHits++;
|
cacheHits++;
|
||||||
|
@ -239,18 +240,16 @@ public class Database {
|
||||||
}
|
}
|
||||||
|
|
||||||
private int createNewChunk() throws CoreException {
|
private int createNewChunk() throws CoreException {
|
||||||
try {
|
Chunk[] oldtoc = chunks;
|
||||||
Chunk[] oldtoc = chunks;
|
int n = oldtoc.length;
|
||||||
int n = oldtoc.length;
|
int offset = n * CHUNK_SIZE;
|
||||||
int offset = n * CHUNK_SIZE;
|
chunks = new Chunk[n + 1];
|
||||||
file.seek(offset);
|
System.arraycopy(oldtoc, 0, chunks, 0, n);
|
||||||
file.write(new byte[CHUNK_SIZE]);
|
final Chunk chunk= new Chunk(this, n);
|
||||||
chunks = new Chunk[n + 1];
|
chunk.fDirty= true;
|
||||||
System.arraycopy(oldtoc, 0, chunks, 0, n);
|
chunks[n]= chunk;
|
||||||
return offset;
|
fCache.add(chunk, true);
|
||||||
} catch (IOException e) {
|
return offset;
|
||||||
throw new CoreException(new DBStatus(e));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private int getFirstBlock(int blocksize) throws CoreException {
|
private int getFirstBlock(int blocksize) throws CoreException {
|
||||||
|
@ -390,12 +389,12 @@ public class Database {
|
||||||
/**
|
/**
|
||||||
* Closes the database.
|
* Closes the database.
|
||||||
* <p>
|
* <p>
|
||||||
* The behaviour of any further calls to the Database is undefined
|
* The behavior of any further calls to the Database is undefined
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @throws CoreException
|
* @throws CoreException
|
||||||
*/
|
*/
|
||||||
public void close() throws CoreException {
|
public void close() throws CoreException {
|
||||||
setReadOnly();
|
setReadOnly(true);
|
||||||
removeChunksFromCache();
|
removeChunksFromCache();
|
||||||
chunks= new Chunk[0];
|
chunks= new Chunk[0];
|
||||||
try {
|
try {
|
||||||
|
@ -415,9 +414,10 @@ public class Database {
|
||||||
/**
|
/**
|
||||||
* Called from any thread via the cache, protected by {@link #fCache}.
|
* Called from any thread via the cache, protected by {@link #fCache}.
|
||||||
*/
|
*/
|
||||||
void releaseChunk(Chunk chunk) {
|
void releaseChunk(final Chunk chunk) {
|
||||||
if (!chunk.fWritable)
|
if (!chunk.fLocked) {
|
||||||
chunks[chunk.fSequenceNumber]= null;
|
chunks[chunk.fSequenceNumber]= null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -432,26 +432,65 @@ public class Database {
|
||||||
fWritable= true;
|
fWritable= true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setReadOnly() throws CoreException {
|
public void setReadOnly(final boolean flush) throws CoreException {
|
||||||
if (fWritable) {
|
if (fWritable) {
|
||||||
fWritable= false;
|
fWritable= false;
|
||||||
flushDirtyChunks();
|
|
||||||
|
ArrayList dirtyChunks= new ArrayList();
|
||||||
|
synchronized (fCache) {
|
||||||
|
for (int i= chunks.length-1; i >= 0 ; i--) {
|
||||||
|
Chunk chunk= chunks[i];
|
||||||
|
if (chunk != null) {
|
||||||
|
if (chunk.fCacheIndex < 0) {
|
||||||
|
chunk.fLocked= false;
|
||||||
|
chunks[i]= null;
|
||||||
|
if (chunk.fDirty) {
|
||||||
|
dirtyChunks.add(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (chunk.fLocked) {
|
||||||
|
if (!chunk.fDirty) {
|
||||||
|
chunk.fLocked= false;
|
||||||
|
}
|
||||||
|
else if (flush) {
|
||||||
|
chunk.fLocked= false;
|
||||||
|
dirtyChunks.add(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (flush && chunk.fDirty) {
|
||||||
|
dirtyChunks.add(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dirtyChunks.isEmpty()) {
|
||||||
|
for (Iterator it = dirtyChunks.iterator(); it.hasNext();) {
|
||||||
|
Chunk chunk = (Chunk) it.next();
|
||||||
|
chunk.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void flushDirtyChunks() throws CoreException {
|
public void flush() throws CoreException {
|
||||||
|
if (fWritable) {
|
||||||
|
try {
|
||||||
|
setReadOnly(true);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
setWritable();
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// be careful as other readers may access chunks concurrently
|
||||||
ArrayList dirtyChunks= new ArrayList();
|
ArrayList dirtyChunks= new ArrayList();
|
||||||
synchronized (fCache) {
|
synchronized (fCache) {
|
||||||
for (int i = 0; i < chunks.length; i++) {
|
for (int i= chunks.length-1; i >= 0 ; i--) {
|
||||||
Chunk chunk= chunks[i];
|
Chunk chunk= chunks[i];
|
||||||
if (chunk != null && chunk.fWritable) {
|
if (chunk != null && chunk.fDirty) {
|
||||||
chunk.fWritable= false;
|
dirtyChunks.add(chunk);
|
||||||
if (chunk.fCacheIndex < 0) {
|
|
||||||
chunks[i]= null;
|
|
||||||
}
|
|
||||||
if (chunk.fDirty) {
|
|
||||||
dirtyChunks.add(chunk);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -462,6 +501,17 @@ public class Database {
|
||||||
chunk.flush();
|
chunk.flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// only after the chunks are flushed we may unlock and release them.
|
||||||
|
synchronized (fCache) {
|
||||||
|
for (Iterator it = dirtyChunks.iterator(); it.hasNext();) {
|
||||||
|
Chunk chunk = (Chunk) it.next();
|
||||||
|
chunk.fLocked= false;
|
||||||
|
if (chunk.fCacheIndex < 0) {
|
||||||
|
chunks[chunk.fSequenceNumber]= null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void resetCacheCounters() {
|
public void resetCacheCounters() {
|
||||||
|
|
|
@ -69,18 +69,22 @@ public class GeneratePDOM implements ISafeRunnable {
|
||||||
try {
|
try {
|
||||||
CCoreInternals.getPDOMManager().exportProjectPDOM(cproject, targetLocation, converter);
|
CCoreInternals.getPDOMManager().exportProjectPDOM(cproject, targetLocation, converter);
|
||||||
WritablePDOM exportedPDOM= new WritablePDOM(targetLocation, converter, LanguageManager.getInstance().getPDOMLinkageFactoryMappings());
|
WritablePDOM exportedPDOM= new WritablePDOM(targetLocation, converter, LanguageManager.getInstance().getPDOMLinkageFactoryMappings());
|
||||||
|
|
||||||
exportedPDOM.acquireWriteLock(0);
|
|
||||||
try {
|
try {
|
||||||
Map exportProperties= pm.getExportProperties();
|
exportedPDOM.acquireWriteLock(0);
|
||||||
if(exportProperties!=null) {
|
try {
|
||||||
for(Iterator i = exportProperties.entrySet().iterator(); i.hasNext(); ) {
|
Map exportProperties= pm.getExportProperties();
|
||||||
Map.Entry entry = (Map.Entry) i.next();
|
if(exportProperties!=null) {
|
||||||
exportedPDOM.setProperty((String) entry.getKey(), (String) entry.getValue());
|
for(Iterator i = exportProperties.entrySet().iterator(); i.hasNext(); ) {
|
||||||
|
Map.Entry entry = (Map.Entry) i.next();
|
||||||
|
exportedPDOM.setProperty((String) entry.getKey(), (String) entry.getValue());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
exportedPDOM.releaseWriteLock(0, true);
|
||||||
}
|
}
|
||||||
} finally {
|
}
|
||||||
exportedPDOM.releaseWriteLock(0);
|
finally {
|
||||||
|
exportedPDOM.close();
|
||||||
}
|
}
|
||||||
} catch(InterruptedException ie) {
|
} catch(InterruptedException ie) {
|
||||||
String msg= MessageFormat.format(Messages.GeneratePDOM_GenericGenerationFailed, new Object[] {ie.getMessage()});
|
String msg= MessageFormat.format(Messages.GeneratePDOM_GenericGenerationFailed, new Object[] {ie.getMessage()});
|
||||||
|
|
|
@ -176,6 +176,15 @@ public abstract class PDOMIndexerTask extends PDOMWriter implements IPDOMIndexer
|
||||||
* @since 4.0
|
* @since 4.0
|
||||||
*/
|
*/
|
||||||
protected void parseTUs(IWritableIndex index, int readlockCount, Collection sources, Collection headers, IProgressMonitor monitor) throws CoreException, InterruptedException {
|
protected void parseTUs(IWritableIndex index, int readlockCount, Collection sources, Collection headers, IProgressMonitor monitor) throws CoreException, InterruptedException {
|
||||||
|
try {
|
||||||
|
internalParseTUs(index, readlockCount, sources, headers, monitor);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
index.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void internalParseTUs(IWritableIndex index, int readlockCount, Collection sources, Collection headers, IProgressMonitor monitor) throws CoreException, InterruptedException {
|
||||||
int options= 0;
|
int options= 0;
|
||||||
if (checkProperty(IndexerPreferences.KEY_SKIP_ALL_REFERENCES)) {
|
if (checkProperty(IndexerPreferences.KEY_SKIP_ALL_REFERENCES)) {
|
||||||
options |= AbstractLanguage.OPTION_SKIP_FUNCTION_BODIES;
|
options |= AbstractLanguage.OPTION_SKIP_FUNCTION_BODIES;
|
||||||
|
@ -273,7 +282,7 @@ public abstract class PDOMIndexerTask extends PDOMWriter implements IPDOMIndexer
|
||||||
IASTTranslationUnit ast= createAST(tu, scanner, options, pm);
|
IASTTranslationUnit ast= createAST(tu, scanner, options, pm);
|
||||||
fStatistics.fParsingTime += System.currentTimeMillis()-start;
|
fStatistics.fParsingTime += System.currentTimeMillis()-start;
|
||||||
if (ast != null) {
|
if (ast != null) {
|
||||||
addSymbols(ast, index, readlockCount, configHash, pm);
|
addSymbols(ast, index, readlockCount, false, configHash, pm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -330,7 +339,7 @@ public abstract class PDOMIndexerTask extends PDOMWriter implements IPDOMIndexer
|
||||||
|
|
||||||
fStatistics.fParsingTime += System.currentTimeMillis()-start;
|
fStatistics.fParsingTime += System.currentTimeMillis()-start;
|
||||||
if (ast != null) {
|
if (ast != null) {
|
||||||
addSymbols(ast, index, readlockCount, 0, pm);
|
addSymbols(ast, index, readlockCount, false, 0, pm);
|
||||||
updateInfo(-1, +1, 0);
|
updateInfo(-1, +1, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue