1
0
Fork 0
mirror of https://github.com/eclipse-cdt/cdt synced 2025-06-08 18:26:01 +02:00

Fix for 170542, file access by index database not using memory mapped files.

This commit is contained in:
Markus Schorn 2007-03-23 10:52:10 +00:00
parent 31f276dafa
commit d09020f2e4
17 changed files with 540 additions and 268 deletions

View file

@ -90,7 +90,7 @@ public class TeamSharedIndexTest extends IndexTestBase {
private ICProject recreateProject(final String prjName) throws Exception {
final boolean[] changed= {false};
IElementChangedListener waitListener= new IElementChangedListener() {
final IElementChangedListener listener = new IElementChangedListener() {
public void elementChanged(ElementChangedEvent event) {
synchronized (changed) {
changed[0]= true;
@ -98,23 +98,27 @@ public class TeamSharedIndexTest extends IndexTestBase {
}
}
};
CoreModel.getDefault().addElementChangedListener(waitListener);
final IWorkspace workspace = ResourcesPlugin.getWorkspace();
final IProject prjHandle= workspace.getRoot().getProject(prjName);
workspace.run(new IWorkspaceRunnable() {
public void run(IProgressMonitor monitor) throws CoreException {
IProjectDescription desc= IDEWorkbenchPlugin.getPluginWorkspace().newProjectDescription(prjName);
prjHandle.create(desc, NPM);
prjHandle.open(0, NPM);
}
}, null);
synchronized(changed) {
if (!changed[0]) {
changed.wait(INDEXER_WAIT_TIME);
assertTrue(changed[0]);
CoreModel.getDefault().addElementChangedListener(listener);
try {
final IProject prjHandle= workspace.getRoot().getProject(prjName);
workspace.run(new IWorkspaceRunnable() {
public void run(IProgressMonitor monitor) throws CoreException {
IProjectDescription desc= IDEWorkbenchPlugin.getPluginWorkspace().newProjectDescription(prjName);
prjHandle.create(desc, NPM);
prjHandle.open(0, NPM);
}
}, null);
synchronized(changed) {
if (!changed[0]) {
changed.wait(INDEXER_WAIT_TIME);
assertTrue(changed[0]);
}
}
}
CoreModel.getDefault().removeElementChangedListener(waitListener);
finally {
CoreModel.getDefault().removeElementChangedListener(listener);
}
fPDOMManager.joinIndexer(INDEXER_WAIT_TIME, NPM);
return CoreModel.getDefault().create(workspace.getRoot().getProject(prjName));
}

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2006 Symbian Software Systems and others.
* Copyright (c) 2006, 2007 Symbian Software Systems and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
@ -7,6 +7,7 @@
*
* Contributors:
* Symbian - Initial implementation
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.pdom.tests;
@ -22,6 +23,7 @@ import junit.framework.Test;
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
import org.eclipse.cdt.internal.core.pdom.db.BTree;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.db.Database;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
@ -50,7 +52,7 @@ public class BTreeTests extends BaseTestCase {
// and invoke it multiple times per Junit test
protected void init(int degree) throws Exception {
dbFile = File.createTempFile("pdomtest", "db");
db = new Database(dbFile);
db = new Database(dbFile, new ChunkCache(), 0);
rootRecord = Database.DATA_AREA;
comparator = new BTMockRecordComparator();
btree = new BTree(db, rootRecord, degree, comparator);

View file

@ -7,6 +7,7 @@
*
* Contributors:
* Andrew Ferguson (Symbian) - Initial implementation
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.pdom.tests;
@ -17,6 +18,7 @@ import java.util.Properties;
import junit.framework.Test;
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
import org.eclipse.cdt.internal.core.pdom.db.Database;
import org.eclipse.core.runtime.CoreException;
@ -35,7 +37,7 @@ public class DBPropertiesTests extends BaseTestCase {
protected void setUp() throws Exception {
dbLoc = File.createTempFile("test", "db");
dbLoc.deleteOnExit();
db = new Database(dbLoc);
db = new Database(dbLoc, new ChunkCache(), 0);
}
protected void tearDown() throws Exception {

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2005, 2006 QNX Software Systems
* Copyright (c) 2005, 2007 QNX Software Systems
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
@ -8,6 +8,7 @@
* Contributors:
* QNX Software Systems - initial API and implementation
* Andrew Ferguson (Symbian)
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.pdom.tests;
@ -19,6 +20,7 @@ import junit.framework.Test;
import org.eclipse.cdt.core.testplugin.CTestPlugin;
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
import org.eclipse.cdt.internal.core.pdom.db.BTree;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.db.Database;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
@ -32,9 +34,10 @@ public class DBTest extends BaseTestCase {
protected void setUp() throws Exception {
super.setUp();
db = new Database(getTestDir().append(getName()+System.currentTimeMillis()+".dat").toFile());
db = new Database(getTestDir().append(getName()+System.currentTimeMillis()+".dat").toFile(),
new ChunkCache(), 0);
}
public static Test suite() {
return suite(DBTest.class);
}
@ -48,9 +51,11 @@ public class DBTest extends BaseTestCase {
}
protected void tearDown() throws Exception {
db.close();
if(!db.getLocation().delete()) {
db.getLocation().deleteOnExit();
}
db= null;
}
public void testBlockSizeAndFirstBlock() throws Exception {
@ -118,7 +123,7 @@ public class DBTest extends BaseTestCase {
// Tests inserting and retrieving strings
File f = getTestDir().append("testStrings.dat").toFile();
f.delete();
final Database db = new Database(f);
final Database db = new Database(f, new ChunkCache(), 0);
String[] names = {
"ARLENE",

View file

@ -7,6 +7,7 @@
*
* Contributors:
* Andrew Ferguson (Symbian) - Initial implementation
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.pdom.tests;
@ -25,6 +26,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragment;
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
import org.eclipse.cdt.internal.core.pdom.PDOM;
import org.eclipse.cdt.internal.core.pdom.WritablePDOM;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.NullProgressMonitor;
@ -91,7 +93,7 @@ public class PDOMBugsTest extends BaseTestCase {
IIndexLocationConverter cvr= new ResourceContainerRelativeLocationConverter(cproject.getProject());
CCoreInternals.getPDOMManager().exportProjectPDOM(cproject, tmp, cvr);
IWritableIndexFragment pdom = new WritablePDOM(tmp, cvr);
IWritableIndexFragment pdom = new WritablePDOM(tmp, cvr, new ChunkCache());
pdom.acquireReadLock();
try {
String id= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);

View file

@ -1181,14 +1181,14 @@ public class CModelManager implements IResourceChangeListener, ICDescriptorListe
// stop the binary runner for this project
removeBinaryRunner(project);
// stop indexing jobs for this project
CCoreInternals.getPDOMManager().deleteProject(create(project), delta);
CCoreInternals.getPDOMManager().deleteProject(create(project));
}
private void closing(IProject project, IResourceDelta delta) {
// stop the binary runner for this project
removeBinaryRunner(project);
// stop indexing jobs for this project
CCoreInternals.getPDOMManager().removeProject(create(project));
CCoreInternals.getPDOMManager().closeProject(create(project));
}
}

View file

@ -16,6 +16,7 @@ import org.eclipse.osgi.util.NLS;
public class Messages extends NLS {
private static final String BUNDLE_NAME = "org.eclipse.cdt.internal.core.pdom.messages"; //$NON-NLS-1$
public static String Checksums_taskComputeChecksums;
public static String PDOMManager_ClosePDOMJob;
public static String PDOMManager_ExistingFileCollides;
public static String PDOMManager_indexMonitorDetail;
public static String PDOMManager_JoinIndexerTask;

View file

@ -14,7 +14,6 @@
package org.eclipse.cdt.internal.core.pdom;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
@ -48,6 +47,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
import org.eclipse.cdt.internal.core.index.IIndexFragmentInclude;
import org.eclipse.cdt.internal.core.index.IIndexFragmentName;
import org.eclipse.cdt.internal.core.pdom.db.BTree;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
import org.eclipse.cdt.internal.core.pdom.db.Database;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
@ -114,23 +114,22 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
private IIndexLocationConverter locationConverter;
public PDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
loadDatabase(dbPath);
this(dbPath, locationConverter, ChunkCache.getSharedInstance());
}
public PDOM(File dbPath, IIndexLocationConverter locationConverter, ChunkCache cache) throws CoreException {
loadDatabase(dbPath, cache);
this.locationConverter = locationConverter;
}
private void loadDatabase(File dbPath) throws CoreException {
private void loadDatabase(File dbPath, ChunkCache cache) throws CoreException {
fPath= dbPath;
boolean exists= fPath.exists();
db = new Database(fPath);
db = new Database(fPath, cache, VERSION);
fileIndex= null; // holds on to the database, so clear it.
if (exists) {
int version= db.getVersion();
if (version == VERSION) {
readLinkages();
}
}
else {
db.setVersion(VERSION);
int version= db.getVersion();
if (version == VERSION) {
readLinkages();
}
}
@ -138,9 +137,8 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
return locationConverter;
}
public boolean versionMismatch() {
public boolean versionMismatch() throws CoreException {
if (db.getVersion() != VERSION) {
db.setVersion(VERSION);
return true;
} else
return false;
@ -231,8 +229,8 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
// Zero out the File Index and Linkages
clearFileIndex();
db.setVersion(VERSION);
db.putInt(PROPERTIES, 0);
db.putInt(LINKAGES, 0);
fLinkageIDCache.clear();
}
@ -242,10 +240,10 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
fLinkageIDCache.clear();
try {
db.close();
} catch (IOException e) {
} catch (CoreException e) {
CCorePlugin.log(e);
}
loadDatabase(file);
loadDatabase(file, db.getChunkCache());
oldFile.delete();
}
@ -541,6 +539,7 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
while (lockCount != 0 || waitingReaders > 0)
mutex.wait();
--lockCount;
db.setWritable();
}
}
@ -549,6 +548,11 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
}
public void releaseWriteLock(int establishReadLocks) {
try {
db.setReadOnly();
} catch (CoreException e) {
CCorePlugin.log(e);
}
assert lockCount == -1;
lastWriteAccess= System.currentTimeMillis();
synchronized (mutex) {
@ -684,5 +688,10 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
public String getProperty(String propertyName) throws CoreException {
return new DBProperties(db, PROPERTIES).getProperty(propertyName);
}
public void close() throws CoreException {
fLinkageIDCache.clear();
db.close();
}
}

View file

@ -28,6 +28,7 @@ import java.util.Map;
import java.util.Properties;
import org.eclipse.cdt.core.CCorePlugin;
import org.eclipse.cdt.core.CCorePreferenceConstants;
import org.eclipse.cdt.core.dom.IPDOM;
import org.eclipse.cdt.core.dom.IPDOMIndexer;
import org.eclipse.cdt.core.dom.IPDOMIndexerTask;
@ -49,6 +50,7 @@ import org.eclipse.cdt.internal.core.index.IndexFactory;
import org.eclipse.cdt.internal.core.index.IndexerStateEvent;
import org.eclipse.cdt.internal.core.index.provider.IndexProviderManager;
import org.eclipse.cdt.internal.core.pdom.PDOM.IListener;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.dom.PDOMFile;
import org.eclipse.cdt.internal.core.pdom.dom.PDOMProjectIndexLocationConverter;
import org.eclipse.cdt.internal.core.pdom.indexer.IndexerPreferences;
@ -59,7 +61,6 @@ import org.eclipse.cdt.internal.core.pdom.indexer.nulli.PDOMNullIndexer;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.resources.IResourceDelta;
import org.eclipse.core.resources.ResourcesPlugin;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IConfigurationElement;
@ -70,10 +71,13 @@ import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.ListenerList;
import org.eclipse.core.runtime.OperationCanceledException;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.Preferences;
import org.eclipse.core.runtime.QualifiedName;
import org.eclipse.core.runtime.SafeRunner;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.SubProgressMonitor;
import org.eclipse.core.runtime.Preferences.IPropertyChangeListener;
import org.eclipse.core.runtime.Preferences.PropertyChangeEvent;
import org.eclipse.core.runtime.jobs.ISchedulingRule;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.core.runtime.preferences.IEclipsePreferences.IPreferenceChangeListener;
@ -154,6 +158,8 @@ public class PDOMManager implements IWritableIndexManager, IListener {
// the model listener is attached outside of the job in
// order to avoid a race condition where its not noticed
// that new projects are being created
initializeDatabaseCache();
final CoreModel model = CoreModel.getDefault();
model.addElementChangedListener(fCModelListener);
@ -164,6 +170,32 @@ public class PDOMManager implements IWritableIndexManager, IListener {
}
}
private void initializeDatabaseCache() {
adjustCacheSize();
CCorePlugin.getDefault().getPluginPreferences().addPropertyChangeListener(
new IPropertyChangeListener() {
public void propertyChange(PropertyChangeEvent event) {
String prop= event.getProperty();
if (prop.equals(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT) ||
prop.equals(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB)) {
adjustCacheSize();
}
}
}
);
}
protected void adjustCacheSize() {
final Preferences prefs= CCorePlugin.getDefault().getPluginPreferences();
int cachePct= prefs.getInt(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT);
int cacheMax= prefs.getInt(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB);
cachePct= Math.max(1, Math.min(50, cachePct)); // 1%-50%
cacheMax= Math.max(1, cacheMax); // >= 1mb
long m1= Runtime.getRuntime().maxMemory()/100L * cachePct;
long m2= Math.min(m1, cacheMax * 1024L * 1024L);
ChunkCache.getSharedInstance().setMaxSize(m2);
}
public IndexProviderManager getIndexProviderManager() {
return manager;
}
@ -527,28 +559,53 @@ public class PDOMManager implements IWritableIndexManager, IListener {
}
}
}
public void removeProject(ICProject project) {
public void deleteProject(ICProject cproject) {
removeProject(cproject, true);
}
public void closeProject(ICProject cproject) {
removeProject(cproject, false);
}
private void removeProject(ICProject project, final boolean delete) {
IPDOMIndexer indexer= getIndexer(project);
if (indexer != null) {
stopIndexer(indexer);
}
unregisterPreferenceListener(project);
}
WritablePDOM pdom= null;
synchronized (fProjectToPDOM) {
IProject rproject= project.getProject();
pdom = (WritablePDOM) fProjectToPDOM.remove(rproject);
}
public void deleteProject(ICProject cproject, IResourceDelta delta) {
// Project is about to be deleted. Stop all indexing tasks for it
IPDOMIndexer indexer = getIndexer(cproject);
if (indexer != null) {
stopIndexer(indexer);
}
unregisterPreferenceListener(cproject);
// remove entry for project from PDOM map
synchronized (fProjectToPDOM) {
IProject project= cproject.getProject();
fProjectToPDOM.remove(project);
}
if (pdom != null) {
final WritablePDOM finalpdom= pdom;
Job job= new Job(Messages.PDOMManager_ClosePDOMJob) {
protected IStatus run(IProgressMonitor monitor) {
try {
finalpdom.acquireWriteLock();
try {
finalpdom.close();
if (delete) {
finalpdom.getDB().getLocation().delete();
}
} catch (CoreException e) {
CCorePlugin.log(e);
}
finally {
finalpdom.releaseWriteLock();
}
} catch (InterruptedException e) {
}
return Status.OK_STATUS;
}
};
job.setSystem(true);
job.schedule();
}
}
private void stopIndexer(IPDOMIndexer indexer) {
@ -872,23 +929,26 @@ public class PDOMManager implements IWritableIndexManager, IListener {
// overwrite internal location representations
final WritablePDOM newPDOM = new WritablePDOM(targetLocation, pdom.getLocationConverter());
newPDOM.acquireWriteLock();
try {
List notConverted= newPDOM.rewriteLocations(newConverter);
// remove content where converter returns null
for(Iterator i = notConverted.iterator(); i.hasNext(); ) {
PDOMFile file = (PDOMFile) i.next();
file.clear();
newPDOM.acquireWriteLock();
try {
List notConverted= newPDOM.rewriteLocations(newConverter);
// remove content where converter returns null
for(Iterator i = notConverted.iterator(); i.hasNext(); ) {
PDOMFile file = (PDOMFile) i.next();
file.clear();
}
// ensure fragment id has a sensible value, in case callee's do not
// overwrite their own values
String oldId= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
newPDOM.setProperty(IIndexFragment.PROPERTY_FRAGMENT_ID, "exported."+oldId); //$NON-NLS-1$
} finally {
newPDOM.releaseWriteLock();
}
// ensure fragment id has a sensible value, in case callee's do not
// overwrite their own values
String oldId= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
newPDOM.setProperty(IIndexFragment.PROPERTY_FRAGMENT_ID, "exported."+oldId); //$NON-NLS-1$
} finally {
newPDOM.releaseWriteLock();
newPDOM.close();
}
} catch(IOException ioe) {
throw new CoreException(CCorePlugin.createStatus(ioe.getMessage()));

View file

@ -96,8 +96,13 @@ public class TeamPDOMExportOperation implements IWorkspaceRunnable {
// create checksums
PDOM pdom= new PDOM(tmpPDOM, converter);
monitor.setTaskName(Messages.Checksums_taskComputeChecksums);
createChecksums(fProject, pdom, tmpChecksums, subMonitor(monitor, 94));
try {
monitor.setTaskName(Messages.Checksums_taskComputeChecksums);
createChecksums(fProject, pdom, tmpChecksums, subMonitor(monitor, 94));
}
finally {
pdom.close();
}
// create archive
createArchive(tmpPDOM, tmpChecksums);
@ -143,11 +148,6 @@ public class TeamPDOMExportOperation implements IWorkspaceRunnable {
}
finally {
pdom.releaseReadLock();
try {
pdom.getDB().close();
} catch (IOException e) {
CCorePlugin.log(e);
}
}
int i=0;
IWorkspaceRoot root= ResourcesPlugin.getWorkspace().getRoot();

View file

@ -26,6 +26,7 @@ import org.eclipse.cdt.core.index.IIndexFileLocation;
import org.eclipse.cdt.core.index.IIndexLocationConverter;
import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
import org.eclipse.cdt.internal.core.pdom.dom.PDOMBinding;
@ -36,8 +37,13 @@ import org.eclipse.core.runtime.CoreException;
public class WritablePDOM extends PDOM implements IWritableIndexFragment {
public WritablePDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
super(dbPath, locationConverter);
this(dbPath, locationConverter, ChunkCache.getSharedInstance());
}
public WritablePDOM(File dbPath, IIndexLocationConverter locationConverter, ChunkCache cache) throws CoreException {
super(dbPath, locationConverter, cache);
}
public IIndexFragmentFile addFile(IIndexFileLocation location) throws CoreException {
return super.addFile(location);
}

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2005, 2006 QNX Software Systems and others.
* Copyright (c) 2005, 2007 QNX Software Systems and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
@ -13,113 +13,111 @@
package org.eclipse.cdt.internal.core.pdom.db;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel.MapMode;
import java.util.Set;
import java.nio.ByteBuffer;
import org.eclipse.core.runtime.CoreException;
/**
* @author Doug Schaefer
*
* Caches the content of a piece of the database.
*/
public class Chunk {
final class Chunk {
final private ByteBuffer fBuffer;
private MappedByteBuffer buffer;
final Database fDatabase;
final int fSequenceNumber;
// Cache info
private Database db;
int index;
Chunk(RandomAccessFile file, int offset) throws CoreException {
boolean fCacheHitFlag= false;
boolean fDirty= false;
boolean fWritable= false;
int fCacheIndex= -1;
Chunk(Database db, int sequenceNumber) throws CoreException {
fDatabase= db;
fBuffer= ByteBuffer.allocate(Database.CHUNK_SIZE);
fSequenceNumber= sequenceNumber;
try {
index = offset / Database.CHUNK_SIZE;
buffer = file.getChannel().map(MapMode.READ_WRITE, offset, Database.CHUNK_SIZE);
fDatabase.getChannel().read(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
} catch (IOException e) {
throw new CoreException(new DBStatus(e));
}
}
void flush() throws CoreException {
try {
fBuffer.position(0);
fDatabase.getChannel().write(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
} catch (IOException e) {
throw new CoreException(new DBStatus(e));
}
fDirty= false;
}
public void putByte(int offset, byte value) {
buffer.put(offset % Database.CHUNK_SIZE, value);
fDirty= true;
fBuffer.put(offset % Database.CHUNK_SIZE, value);
}
public byte getByte(int offset) {
return buffer.get(offset % Database.CHUNK_SIZE);
return fBuffer.get(offset % Database.CHUNK_SIZE);
}
public byte[] getBytes(int offset, int length) {
byte[] bytes = new byte[length];
buffer.position(offset % Database.CHUNK_SIZE);
buffer.get(bytes, 0, length);
fBuffer.position(offset % Database.CHUNK_SIZE);
fBuffer.get(bytes, 0, length);
return bytes;
}
public void putBytes(int offset, byte[] bytes) {
buffer.position(offset % Database.CHUNK_SIZE);
buffer.put(bytes, 0, bytes.length);
fDirty= true;
fBuffer.position(offset % Database.CHUNK_SIZE);
fBuffer.put(bytes, 0, bytes.length);
}
public void putInt(int offset, int value) {
buffer.putInt(offset % Database.CHUNK_SIZE, value);
fDirty= true;
fBuffer.putInt(offset % Database.CHUNK_SIZE, value);
}
public int getInt(int offset) {
return buffer.getInt(offset % Database.CHUNK_SIZE);
return fBuffer.getInt(offset % Database.CHUNK_SIZE);
}
public void putShort(int offset, short value) {
buffer.putShort(offset % Database.CHUNK_SIZE, value);
fDirty= true;
fBuffer.putShort(offset % Database.CHUNK_SIZE, value);
}
public short getShort(int offset) {
return buffer.getShort(offset % Database.CHUNK_SIZE);
return fBuffer.getShort(offset % Database.CHUNK_SIZE);
}
public long getLong(int offset) {
return buffer.getLong(offset % Database.CHUNK_SIZE);
return fBuffer.getLong(offset % Database.CHUNK_SIZE);
}
public void putLong(int offset, long value) {
buffer.putLong(offset % Database.CHUNK_SIZE, value);
fDirty= true;
fBuffer.putLong(offset % Database.CHUNK_SIZE, value);
}
public void putChar(int offset, char value) {
buffer.putChar(offset % Database.CHUNK_SIZE, value);
fDirty= true;
fBuffer.putChar(offset % Database.CHUNK_SIZE, value);
}
public char getChar(int offset) {
return buffer.getChar(offset % Database.CHUNK_SIZE);
return fBuffer.getChar(offset % Database.CHUNK_SIZE);
}
public void getCharArray(int offset, char[] result) {
buffer.position(offset % Database.CHUNK_SIZE);
buffer.asCharBuffer().get(result);
fBuffer.position(offset % Database.CHUNK_SIZE);
fBuffer.asCharBuffer().get(result);
}
void clear(int offset, int length) {
buffer.position(offset % Database.CHUNK_SIZE);
buffer.put(new byte[length]);
}
/**
* Allow this Chunk to be reclaimed. Objects allocated by thus Chunk
* may be registered with a ReferenceQueue to allow for notification
* on deallocation. References registered with the queue are added to
* the Set references.
*
* @param queue ReferenceQueue to register allocated objects with, or
* null if notification is not required.
* @param references Populated with references which were registered
* with the queue.
*/
void reclaim(ReferenceQueue queue, Set references) {
if (queue != null) {
references.add(new WeakReference(buffer, queue));
}
buffer = null;
fDirty= true;
fBuffer.position(offset % Database.CHUNK_SIZE);
fBuffer.put(new byte[length]);
}
}

View file

@ -0,0 +1,134 @@
/*******************************************************************************
* Copyright (c) 2007 Wind River Systems, Inc. and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Markus Schorn - initial API and implementation
*******************************************************************************/
package org.eclipse.cdt.internal.core.pdom.db;
public final class ChunkCache {
private static ChunkCache sSharedInstance= new ChunkCache();
private Chunk[] fPageTable;
private boolean fTableIsFull= false;
private int fPointer= 0;
public static ChunkCache getSharedInstance() {
return sSharedInstance;
}
public ChunkCache() {
this(5*1024*1024);
}
public ChunkCache(long maxSize) {
fPageTable= new Chunk[computeLength(maxSize)];
}
public synchronized void add(Chunk chunk, boolean writable) {
if (writable) {
chunk.fWritable= true;
}
if (chunk.fCacheIndex >= 0) {
chunk.fCacheHitFlag= true;
return;
}
if (fTableIsFull) {
evictChunk();
chunk.fCacheIndex= fPointer;
fPageTable[fPointer]= chunk;
}
else {
chunk.fCacheIndex= fPointer;
fPageTable[fPointer]= chunk;
fPointer++;
if (fPointer == fPageTable.length) {
fPointer= 0;
fTableIsFull= true;
}
}
}
/**
* Evicts a chunk from the page table and the chunk table.
* After this method returns, {@link #fPointer} will contain
* the index of the evicted chunk within the page table.
*/
private void evictChunk() {
/*
* Use the CLOCK algorithm to determine which chunk to evict.
* i.e., if the chunk in the current slot of the page table has been
* recently referenced (i.e. the reference flag is set), unset the
* reference flag and move to the next slot. Otherwise, evict the
* chunk in the current slot.
*/
while (true) {
Chunk chunk = fPageTable[fPointer];
if (chunk.fCacheHitFlag) {
chunk.fCacheHitFlag= false;
fPointer= (fPointer + 1) % fPageTable.length;
} else {
chunk.fDatabase.releaseChunk(chunk);
chunk.fCacheIndex= -1;
fPageTable[fPointer] = null;
return;
}
}
}
public synchronized void remove(Chunk chunk) {
final int idx= chunk.fCacheIndex;
if (idx >= 0) {
if (fTableIsFull) {
fPointer= fPageTable.length-1;
fTableIsFull= false;
}
else {
fPointer--;
}
chunk.fCacheIndex= -1;
final Chunk move= fPageTable[fPointer];
fPageTable[idx]= move;
move.fCacheIndex= idx;
fPageTable[fPointer]= null;
}
}
/**
* Clears the page table and changes it to hold chunks with
* maximum total memory of <code>maxSize</code>.
* @param maxSize the total size of the chunks in bytes.
*/
public synchronized void setMaxSize(long maxSize) {
final int newLength= computeLength(maxSize);
final int oldLength= fTableIsFull ? fPageTable.length : fPointer;
if (newLength > oldLength) {
Chunk[] newTable= new Chunk[newLength];
System.arraycopy(fPageTable, 0, newTable, 0, oldLength);
fTableIsFull= false;
fPointer= oldLength;
}
else {
for (int i=newLength; i<oldLength; i++) {
final Chunk chunk= fPageTable[i];
chunk.fDatabase.releaseChunk(chunk);
chunk.fCacheIndex= -1;
}
Chunk[] newTable= new Chunk[newLength];
System.arraycopy(fPageTable, 0, newTable, 0, newLength);
fTableIsFull= true;
fPointer= 0;
}
}
private int computeLength(long maxSize) {
long maxLength= Math.min(maxSize/Database.CHUNK_SIZE, Integer.MAX_VALUE);
return Math.max(1, (int)maxLength);
}
}

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2005, 2006 QNX Software Systems and others.
* Copyright (c) 2005, 2007 QNX Software Systems and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
@ -16,9 +16,9 @@ package org.eclipse.cdt.internal.core.pdom.db;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.ReferenceQueue;
import java.util.HashSet;
import java.util.Set;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Iterator;
import org.eclipse.cdt.core.CCorePlugin;
import org.eclipse.core.runtime.CoreException;
@ -61,10 +61,12 @@ public class Database {
private final File location;
private final RandomAccessFile file;
Chunk[] toc;
private boolean fWritable= false;
private Chunk[] chunks;
private long malloced;
private long freed;
private ChunkCache fCache;
// public for tests only, you shouldn't need these
public static final int VERSION_OFFSET = 0;
@ -78,32 +80,36 @@ public class Database {
public static final int MAX_SIZE = CHUNK_SIZE - 4; // Room for overhead
public Database(File location) throws CoreException {
public Database(File location, ChunkCache cache, int version) throws CoreException {
try {
this.location = location;
this.file = new RandomAccessFile(location, "rw"); //$NON-NLS-1$
fCache= cache;
// Allocate chunk table, make sure we have at least one
long nChunks = file.length() / CHUNK_SIZE;
chunks = new Chunk[(int)nChunks];
if (nChunks == 0) {
file.seek(0);
file.write(new byte[CHUNK_SIZE]); // the header chunk
++nChunks;
setWritable();
createNewChunk();
setVersion(version);
setReadOnly();
}
toc = new Chunk[(int)nChunks];
toc[0] = new Chunk(file, 0);
} catch (IOException e) {
throw new CoreException(new DBStatus(e));
}
}
public int getVersion() {
return toc[0].getInt(0);
public FileChannel getChannel() {
return file.getChannel();
}
public void setVersion(int version) {
toc[0].putInt(0, version);
public int getVersion() throws CoreException {
return getChunk(0).getInt(0);
}
public void setVersion(int version) throws CoreException {
getChunk(0).putInt(0, version);
}
/**
@ -111,93 +117,66 @@ public class Database {
* @throws CoreException
*/
public void clear(long timeout) throws CoreException {
// Clear out the memory headers
toc[0].clear(4, DATA_AREA - 4);
int version= getVersion();
removeChunksFromCache();
if (!truncate(timeout)) {
// Truncation timed out so the database size couldn't be changed.
// The best we can do is mark all chunks as unallocated blocks.
// clear out memory headers
Chunk header= getChunk(0);
setVersion(version);
header.clear(4, DATA_AREA - 4);
chunks = new Chunk[] {header};
// Since the block list grows at the head, add all non-header
// chunks backwards to ensure list of blocks is ordered first
// to last.
for (int block = (toc.length - 1) * CHUNK_SIZE; block > 0; block -= CHUNK_SIZE) {
addBlock(getChunk(block), CHUNK_SIZE, block);
}
try {
getChannel().truncate(CHUNK_SIZE);
}
catch (IOException e) {
CCorePlugin.log(e);
}
malloced = freed = 0;
}
/**
* Truncate the database as small as possible to reclaim disk space.
* This method returns false if truncation does not succeed within the
* given timeout period (in milliseconds). A timeout of 0 will cause
* this method to block until the database is successfully truncated.
*
* @param timeout maximum amount of milliseconds to wait before giving up;
* 0 means wait indefinitely.
* @return true if truncation succeeds; false if the operation times out.
* @throws CoreException if an IO error occurs during truncation
*/
private boolean truncate(long timeout) throws CoreException {
// Queue all the chunks to be reclaimed.
ReferenceQueue queue = new ReferenceQueue();
Set references = new HashSet();
int totalChunks = toc.length;
for (int i = 0; i < toc.length; i++) {
if (toc[i] != null) {
toc[i].reclaim(queue, references);
toc[i] = null;
private void removeChunksFromCache() {
synchronized (fCache) {
for (int i = 0; i < chunks.length; i++) {
Chunk chunk= chunks[i];
if (chunk != null) {
fCache.remove(chunk);
chunks[i]= null;
}
}
}
System.gc();
try {
// Wait for each chunk to be reclaimed.
int totalReclaimed = references.size();
while (totalReclaimed > 0) {
queue.remove(timeout);
totalReclaimed--;
}
// Truncate everything but the header chunk.
try {
file.getChannel().truncate(CHUNK_SIZE);
// Reinitialize header chunk.
toc = new Chunk[] { new Chunk(file, 0) };
return true;
} catch (IOException e) {
// Bug 168420:
// Truncation failed so we'll reuse the existing
// file.
toc = new Chunk[totalChunks];
toc[0] = new Chunk(file, 0);
return false;
}
}
catch (InterruptedException e) {
// Truncation took longer than we wanted, so we'll
// reinitialize the header chunk and leave the file
// size alone.
toc[0] = new Chunk(file, 0);
return false;
}
}
/**
* Return the Chunk that contains the given offset.
*
* @param offset
* @return
* @throws CoreException
*/
public Chunk getChunk(int offset) throws CoreException {
int index = offset / CHUNK_SIZE;
Chunk chunk = toc[index];
if (chunk == null) {
chunk = toc[index] = new Chunk(file, index * CHUNK_SIZE);
// for performance reasons try to find chunk and mark it without
// synchronizing. This means that we might pick up a chunk that
// has been paged out, which is ok.
// Furthermore the hitflag may not be seen by the clock-alorithm,
// which might lead to the eviction of a chunk. With the next
// cache failure we are in sync again, though.
Chunk chunk = chunks[index];
if (chunk != null && chunk.fWritable == fWritable) {
chunk.fCacheHitFlag= true;
return chunk;
}
return chunk;
// here is the safe code that has to be performed if we cannot
// get ahold of the chunk.
synchronized(fCache) {
chunk= chunks[index];
if (chunk == null) {
chunk = chunks[index] = new Chunk(this, index);
}
fCache.add(chunk, fWritable);
return chunk;
}
}
/**
@ -229,11 +208,10 @@ public class Database {
// get the block
Chunk chunk;
if (freeblock == 0) {
// Out of memory, allocate a new chunk
int i = createChunk();
chunk = toc[i];
freeblock = i * CHUNK_SIZE;
// allocate a new chunk
freeblock= createNewChunk();
blocksize = CHUNK_SIZE;
chunk = getChunk(freeblock);
} else {
chunk = getChunk(freeblock);
removeBlock(chunk, blocksize, freeblock);
@ -254,28 +232,27 @@ public class Database {
return freeblock + 4;
}
private int createChunk() throws CoreException {
private int createNewChunk() throws CoreException {
try {
Chunk[] oldtoc = toc;
Chunk[] oldtoc = chunks;
int n = oldtoc.length;
int offset = n * CHUNK_SIZE;
file.seek(offset);
file.write(new byte[CHUNK_SIZE]);
toc = new Chunk[n + 1];
System.arraycopy(oldtoc, 0, toc, 0, n);
toc[n] = new Chunk(file, offset);
return n;
chunks = new Chunk[n + 1];
System.arraycopy(oldtoc, 0, chunks, 0, n);
return offset;
} catch (IOException e) {
throw new CoreException(new DBStatus(e));
}
}
private int getFirstBlock(int blocksize) {
return toc[0].getInt((blocksize / MIN_SIZE) * INT_SIZE);
private int getFirstBlock(int blocksize) throws CoreException {
return getChunk(0).getInt((blocksize / MIN_SIZE) * INT_SIZE);
}
private void setFirstBlock(int blocksize, int block) {
toc[0].putInt((blocksize / MIN_SIZE) * INT_SIZE, block);
private void setFirstBlock(int blocksize, int block) throws CoreException {
getChunk(0).putInt((blocksize / MIN_SIZE) * INT_SIZE, block);
}
private void removeBlock(Chunk chunk, int blocksize, int block) throws CoreException {
@ -321,53 +298,43 @@ public class Database {
}
public void putByte(int offset, byte value) throws CoreException {
Chunk chunk = getChunk(offset);
chunk.putByte(offset, value);
getChunk(offset).putByte(offset, value);
}
public byte getByte(int offset) throws CoreException {
Chunk chunk = getChunk(offset);
return chunk.getByte(offset);
return getChunk(offset).getByte(offset);
}
public void putInt(int offset, int value) throws CoreException {
Chunk chunk = getChunk(offset);
chunk.putInt(offset, value);
getChunk(offset).putInt(offset, value);
}
public int getInt(int offset) throws CoreException {
Chunk chunk = getChunk(offset);
return chunk.getInt(offset);
return getChunk(offset).getInt(offset);
}
public void putShort(int offset, short value) throws CoreException {
Chunk chunk = getChunk(offset);
chunk.putShort(offset, value);
getChunk(offset).putShort(offset, value);
}
public short getShort(int offset) throws CoreException {
Chunk chunk = getChunk(offset);
return chunk.getShort(offset);
return getChunk(offset).getShort(offset);
}
public void putLong(int offset, long value) throws CoreException {
Chunk chunk= getChunk(offset);
chunk.putLong(offset, value);
getChunk(offset).putLong(offset, value);
}
public long getLong(int offset) throws CoreException {
Chunk chunk = getChunk(offset);
return chunk.getLong(offset);
return getChunk(offset).getLong(offset);
}
public void putChar(int offset, char value) throws CoreException {
Chunk chunk = getChunk(offset);
chunk.putChar(offset, value);
getChunk(offset).putChar(offset, value);
}
public char getChar(int offset) throws CoreException {
Chunk chunk = getChunk(offset);
return chunk.getChar(offset);
return getChunk(offset).getChar(offset);
}
public IString newString(String string) throws CoreException {
@ -392,15 +359,15 @@ public class Database {
return new ShortString(this, offset);
}
public int getNumChunks() {
return toc.length;
public int getChunkCount() {
return chunks.length;
}
public void reportFreeBlocks() throws CoreException {
System.out.println("Allocated size: " + toc.length * CHUNK_SIZE); //$NON-NLS-1$
System.out.println("Allocated size: " + chunks.length * CHUNK_SIZE); //$NON-NLS-1$
System.out.println("malloc'ed: " + malloced); //$NON-NLS-1$
System.out.println("free'd: " + freed); //$NON-NLS-1$
System.out.println("wasted: " + (toc.length * CHUNK_SIZE - (malloced - freed))); //$NON-NLS-1$
System.out.println("wasted: " + (chunks.length * CHUNK_SIZE - (malloced - freed))); //$NON-NLS-1$
System.out.println("Free blocks"); //$NON-NLS-1$
for (int bs = MIN_SIZE; bs <= CHUNK_SIZE; bs += MIN_SIZE) {
int count = 0;
@ -413,15 +380,23 @@ public class Database {
System.out.println("Block size: " + bs + "=" + count); //$NON-NLS-1$ //$NON-NLS-2$
}
}
/**
* Closes the database, releasing the file lock. This is public for testing purposes only.
* Closes the database.
* <p>
* The behaviour of any further calls to the Database is undefined
* @throws IOException
* @throws CoreException
*/
public void close() throws IOException {
file.close();
public void close() throws CoreException {
setReadOnly();
removeChunksFromCache();
chunks= new Chunk[0];
try {
file.close();
} catch (IOException e) {
throw new CoreException(new DBStatus(e));
}
}
/**
@ -430,4 +405,56 @@ public class Database {
public File getLocation() {
return location;
}
/**
* Called from any thread via the cache, protected by {@link #fCache}.
*/
void releaseChunk(Chunk chunk) {
if (!chunk.fWritable)
chunks[chunk.fSequenceNumber]= null;
}
/**
* Returns the cache used for this database.
* @since 4.0
*/
public ChunkCache getChunkCache() {
return fCache;
}
public void setWritable() {
fWritable= true;
}
public void setReadOnly() throws CoreException {
if (fWritable) {
fWritable= false;
flushDirtyChunks();
}
}
public void flushDirtyChunks() throws CoreException {
ArrayList dirtyChunks= new ArrayList();
synchronized (fCache) {
for (int i = 0; i < chunks.length; i++) {
Chunk chunk= chunks[i];
if (chunk != null && chunk.fWritable) {
chunk.fWritable= false;
if (chunk.fCacheIndex < 0) {
chunks[i]= null;
}
if (chunk.fDirty) {
dirtyChunks.add(chunk);
}
}
}
}
if (!dirtyChunks.isEmpty()) {
for (Iterator it = dirtyChunks.iterator(); it.hasNext();) {
Chunk chunk = (Chunk) it.next();
chunk.flush();
}
}
}
}

View file

@ -12,6 +12,7 @@ WritablePDOM_error_unknownLinkage=AST specifies unknown linkage ''{0}''
PDOMManager_notifyJob_label=Notify Index Change Listeners
PDOMManager_JoinIndexerTask=Join Indexer
PDOMManager_StartJob_name=Initialize Indexing
PDOMManager_ClosePDOMJob=Close database
PDOMManager_notifyTask_message=Notify Listeners
PDOMManager_indexMonitorDetail={0}/{1} sources, {2} headers
PDOMManager_ExistingFileCollides=A pdom already exists at location {0}

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2000, 2005 QNX Software Systems and others.
* Copyright (c) 2000, 2007 QNX Software Systems and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
@ -7,6 +7,7 @@
*
* Contributors:
* QNX Software Systems - Initial API and implementation
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.core;
@ -44,5 +45,24 @@ public class CCorePreferenceConstants {
* Default code formatter
*/
public static final String DEFAULT_CODE_FORMATTER = CCorePlugin.PLUGIN_ID + ".defaultCodeFormatter"; //$NON-NLS-1$
/**
* Cache size for the index in percentage of max memory.
*/
public static final String INDEX_DB_CACHE_SIZE_PCT = CCorePlugin.PLUGIN_ID + ".indexDBCacheSizePct"; //$NON-NLS-1$
/**
* Default cache size of the index-db in percentage of max memory.
*/
public static final String DEFAULT_INDEX_DB_CACHE_SIZE_PCT = "10"; //$NON-NLS-1$
/**
* Absolute maximum size of the index-db in megabytes.
*/
public static final String MAX_INDEX_DB_CACHE_SIZE_MB = CCorePlugin.PLUGIN_ID + ".maxIndexDBCacheSizeMB"; //$NON-NLS-1$
/**
* Default absolute maximum size of the index-db in megabytes.
*/
public static final String DEFAULT_MAX_INDEX_DB_CACHE_SIZE_MB = "64"; //$NON-NLS-1$
}

View file

@ -42,8 +42,9 @@ public class CCorePreferenceInitializer extends AbstractPreferenceInitializer {
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_TAGS, CCorePreferenceConstants.DEFAULT_TASK_TAG);
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_PRIORITIES, CCorePreferenceConstants.DEFAULT_TASK_PRIORITY);
defaultOptionsMap.put(CCorePreferenceConstants.CODE_FORMATTER, CCorePreferenceConstants.DEFAULT_CODE_FORMATTER);
defaultOptionsMap.put(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT, CCorePreferenceConstants.DEFAULT_INDEX_DB_CACHE_SIZE_PCT);
defaultOptionsMap.put(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB, CCorePreferenceConstants.DEFAULT_MAX_INDEX_DB_CACHE_SIZE_MB);
// Store default values to default preferences
IEclipsePreferences defaultPreferences = ((IScopeContext) new DefaultScope()).getNode(CCorePlugin.PLUGIN_ID);
for (Iterator iter = defaultOptionsMap.entrySet().iterator(); iter.hasNext();) {