Generate indexes in memory

This commit is contained in:
Victor Shcherb 2012-10-22 00:57:52 +02:00
parent 35f6a1757a
commit b6a7320aa6
6 changed files with 60 additions and 20 deletions

View file

@ -38,7 +38,7 @@ public class DataTileManager<T> {
@SuppressWarnings("rawtypes")
public int getObjectsCount(){
int x = 0;
for(List s : objects.values()){
for(List s : objects.valueCollection()){
x += s.size();
}
return x;
@ -53,7 +53,7 @@ public class DataTileManager<T> {
@SuppressWarnings({ "rawtypes", "unchecked" })
public List<T> getAllObjects(){
List<T> l = new ArrayList<T>();
for(List s : objects.values()){
for(List s : objects.valueCollection()){
l.addAll(s);
}
return l;

View file

@ -20,7 +20,8 @@ public enum DBDialect {
DERBY,
H2,
NOSQL,
SQLITE;
SQLITE,
SQLITE_IN_MEMORY;
public void deleteTableIfExists(String table, Statement stat) throws SQLException {
if(this == DERBY){
@ -87,14 +88,15 @@ public enum DBDialect {
throw new SQLException(status.ToString());
}
return dbAccessor;
} else if (DBDialect.SQLITE == this) {
} else if (DBDialect.SQLITE == this || DBDialect.SQLITE_IN_MEMORY == this) {
try {
Class.forName("org.sqlite.JDBC");
} catch (ClassNotFoundException e) {
log.error("Illegal configuration", e);
throw new IllegalStateException(e);
}
Connection connection = DriverManager.getConnection("jdbc:sqlite:" + fileName);
Connection connection = DriverManager.getConnection("jdbc:sqlite:" + (DBDialect.SQLITE_IN_MEMORY == this? ":memory:":
fileName));
Statement statement = connection.createStatement();
statement.executeUpdate("PRAGMA synchronous = 0");
//no journaling, saves some I/O access, but database can go corrupt

View file

@ -699,7 +699,7 @@ public class IndexCreator {
deleteDatabaseIndexes);
indexMapCreator.commitAndCloseFiles(getRTreeMapIndexNonPackFileName(), getRTreeMapIndexPackFileName(),
deleteDatabaseIndexes);
indexRouteCreator.commitAndCloseFiles(getRTreeRouteIndexPackFileName(), getRTreeRouteIndexPackFileName(),
indexRouteCreator.commitAndCloseFiles(getRTreeRouteIndexNonPackFileName(), getRTreeRouteIndexPackFileName(),
deleteDatabaseIndexes);
if (mapConnection != null) {
@ -741,10 +741,10 @@ public class IndexCreator {
public static void main(String[] args) throws IOException, SAXException, SQLException, InterruptedException {
long time = System.currentTimeMillis();
IndexCreator creator = new IndexCreator(new File("/home/victor/projects/OsmAnd/data/osm-gen/")); //$NON-NLS-1$
// creator.setIndexMap(true);
// creator.setIndexAddress(true);
// creator.setIndexPOI(true);
// creator.setIndexTransport(true);
creator.setIndexMap(true);
creator.setIndexAddress(true);
creator.setIndexPOI(true);
creator.setIndexTransport(true);
creator.setIndexRouting(true);
// creator.deleteDatabaseIndexes = false;

View file

@ -330,17 +330,17 @@ public class IndexRouteCreator extends AbstractIndexPartCreator {
if (rte != null) {
RandomAccessFile file = rte.getFileHdr().getFile();
file.close();
if (rTreeMapIndexNonPackFileName != null) {
File f = new File(rTreeMapIndexNonPackFileName);
if (f.exists() && deleteDatabaseIndexes) {
f.delete();
}
}
if (rTreeMapIndexNonPackFileName != null) {
File f = new File(rTreeMapIndexNonPackFileName);
if (f.exists() && deleteDatabaseIndexes) {
f.delete();
}
if (rTreeMapIndexPackFileName != null) {
File f = new File(rTreeMapIndexPackFileName);
if (f.exists() && deleteDatabaseIndexes) {
f.delete();
}
}
if (rTreeMapIndexPackFileName != null) {
File f = new File(rTreeMapIndexPackFileName);
if (f.exists() && deleteDatabaseIndexes) {
f.delete();
}
}
}

View file

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="utf-8"?>
<batch_process>
<process_attributes mapZooms="" renderingTypesFile="" zoomWaySmoothness="2"
osmDbDialect="sqlite_in_memory" mapDbDialect="sqlite_in_memory"/>
<!-- There are 3 subprocess :
1. Download fresh osm files from servers to 'directory_for_osm_files' (override existings).
2. Generate index files from all files in 'directory_for_osm_files' and put all indexes into 'directory_for_index_files'
3. Upload index files from 'directory_for_index_files' to googlecode.
If directory directory_for_uploaded_files is specified all uploaded files will be moved to it
All these subprocess could be ran independently ! So you can create some files check them and after that try to upload on googlecode,
or you can upload any file you have to googlecode (just put into 'directory_for_index_files')
-->
<!-- zoomWaySmoothness - 1-4, typical mapZooms - 8-10;11-12;13-14;15 -->
<process directory_for_osm_files=".work/osm" directory_for_index_files="/var/lib/jenkins/indexes" directory_for_generation=".work"
skipExistingIndexesAt="/var/lib/jenkins/indexes/uploaded" indexPOI="true" indexMap="true" indexRouting="true" indexTransport="true" indexAddress="true">
<!-- Add wget="C:/Program Files/GNUWin32/bin/wget.exe" to process, to use wget for download.
On linux systems if wget is in your path it can be wget="wget" or you can make own script with wget command:
wget="/path/to/script/wget.sh"
Defaultly enabled parameter of wget is: &-&-read-timeout=5 that prevents hanging of download from cloudmade/geofabrik server
-->
</process>
</batch_process>

View file

@ -0,0 +1,14 @@
# remove backup and create new backup
# we should not rm, just do incremental updates for now! rm -rf backup
# remove all previous files
mkdir ~/indexes
mkdir ~/indexes/uploaded
rm -rf .work
mkdir .work
mkdir .work/osm
if [ -z $INDEXES_FILE ]; then INDEXES_FILE="build-scripts/regions/indexes.xml"; echo "$INDEXES_FILE"; fi
echo 'Running java net.osmand.data.index.IndexBatchCreator with $INDEXES_FILE'
java -XX:+UseParallelGC -Xmx8096M -Xmn512M -Djava.util.logging.config.file=build-scripts/batch-logging.properties -cp "DataExtractionOSM/OsmAndMapCreator.jar:DataExtractionOSM/lib/*.jar" net.osmand.data.index.IndexBatchCreator build-scripts/indexes-batch-generate-inmem.xml "$INDEXES_FILE"