fix performance
git-svn-id: https://osmand.googlecode.com/svn/trunk@610 e29c36b1-1cfa-d876-8d93-3434fc2bb7b8
This commit is contained in:
parent
ec94b03c1a
commit
42483aad80
3 changed files with 37 additions and 17 deletions
|
@ -9,7 +9,10 @@ import java.io.RandomAccessFile;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
||||
import net.osmand.Algoritms;
|
||||
import net.osmand.LogUtil;
|
||||
import net.osmand.osm.MapRenderingTypes;
|
||||
import net.osmand.osm.MapUtils;
|
||||
|
||||
|
@ -23,6 +26,8 @@ public class BinaryMapIndexReader {
|
|||
private List<MapRoot> mapIndexes = new ArrayList<MapRoot>();
|
||||
private CodedInputStreamRAF codedIS;
|
||||
|
||||
private final static Log log = LogUtil.getLog(BinaryMapIndexReader.class);
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -189,6 +194,8 @@ public class BinaryMapIndexReader {
|
|||
}
|
||||
}
|
||||
}
|
||||
log.info("Search is done. Visit " + req.numberOfVisitedObjects + " objects. Read " + req.numberOfAcceptedObjects + " objects.");
|
||||
log.info("Read " + req.numberOfReadSubtrees + " subtrees. Go through " + req.numberOfAcceptedSubtrees + " subtrees.");
|
||||
return req.getSearchResults();
|
||||
}
|
||||
|
||||
|
@ -200,7 +207,7 @@ public class BinaryMapIndexReader {
|
|||
int cleft = 0;
|
||||
int ctop = 0;
|
||||
int cbottom = 0;
|
||||
|
||||
req.numberOfReadSubtrees++;
|
||||
while(true){
|
||||
int t = codedIS.readTag();
|
||||
int tag = WireFormat.getTagFieldNumber(t);
|
||||
|
@ -209,6 +216,8 @@ public class BinaryMapIndexReader {
|
|||
// coordinates are init
|
||||
if(cright < req.left || cleft > req.right || ctop > req.bottom || cbottom < req.top){
|
||||
return;
|
||||
} else {
|
||||
req.numberOfAcceptedSubtrees++;
|
||||
}
|
||||
}
|
||||
switch (tag) {
|
||||
|
@ -306,6 +315,7 @@ public class BinaryMapIndexReader {
|
|||
int maxX = 0;
|
||||
int minY = Integer.MAX_VALUE;
|
||||
int maxY = 0;
|
||||
req.numberOfVisitedObjects++;
|
||||
while(codedIS.getBytesUntilLimit() > 0){
|
||||
int x = codedIS.readSInt32() + px;
|
||||
int y = codedIS.readSInt32() + py;
|
||||
|
@ -334,6 +344,7 @@ public class BinaryMapIndexReader {
|
|||
codedIS.skipRawBytes(codedIS.getBytesUntilLimit());
|
||||
return null;
|
||||
}
|
||||
req.numberOfAcceptedObjects++;
|
||||
|
||||
BinaryMapDataObject dataObject = new BinaryMapDataObject();
|
||||
dataObject.coordinates = req.cacheCoordinates.toArray();
|
||||
|
@ -479,6 +490,12 @@ public class BinaryMapIndexReader {
|
|||
List<BinaryMapDataObject> searchResults = new ArrayList<BinaryMapDataObject>();
|
||||
TIntArrayList cacheCoordinates = new TIntArrayList();
|
||||
|
||||
// TRACE INFO
|
||||
int numberOfVisitedObjects = 0;
|
||||
int numberOfAcceptedObjects = 0;
|
||||
int numberOfReadSubtrees = 0;
|
||||
int numberOfAcceptedSubtrees = 0;
|
||||
|
||||
protected SearchRequest(){
|
||||
}
|
||||
|
||||
|
|
|
@ -1480,11 +1480,11 @@ public class IndexCreator {
|
|||
|
||||
for (int i = 0; i < parent.getTotalElements(); i++) {
|
||||
Rect re = e[i].getRect();
|
||||
if(e[i].getElementType() == rtree.Node.LEAF_NODE){
|
||||
if (e[i].getElementType() == rtree.Node.LEAF_NODE) {
|
||||
long id = ((LeafElement) e[i]).getPtr();
|
||||
selectData.setLong(1, id);
|
||||
ResultSet rs = selectData.executeQuery();
|
||||
if(rs.next()){
|
||||
if (rs.next()) {
|
||||
writer.writeMapData(id, rs.getBytes(IndexBinaryMapRenderObject.NODES.ordinal()+ 1),
|
||||
rs.getBytes(IndexBinaryMapRenderObject.TYPES.ordinal()+ 1), rs.getString(IndexBinaryMapRenderObject.NAME.ordinal()+ 1),
|
||||
rs.getInt(IndexBinaryMapRenderObject.HIGHWAY.ordinal()+ 1), rs.getBytes(IndexBinaryMapRenderObject.RESTRICTIONS.ordinal()+ 1));
|
||||
|
@ -1819,6 +1819,7 @@ public class IndexCreator {
|
|||
if (indexMap) {
|
||||
progress.setGeneralProgress("[95 of 100]");
|
||||
progress.startTask("Serializing map data...", -1);
|
||||
assert rtree.Node.MAX < 50 : "It is better for performance";
|
||||
try {
|
||||
for (int i = 0; i < MAP_ZOOMS.length-1; i++) {
|
||||
mapTree[i].flush();
|
||||
|
@ -1958,11 +1959,11 @@ public class IndexCreator {
|
|||
// creator.setIndexPOI(true);
|
||||
// creator.setIndexTransport(true);
|
||||
|
||||
creator.setNodesDBFile(new File("e:/Information/OSM maps/osmand/minsk.tmp.odb"));
|
||||
creator.generateIndexes(new File("e:/Information/OSM maps/belarus osm/minsk.osm"), new ConsoleProgressImplementation(3), null);
|
||||
// creator.setNodesDBFile(new File("e:/Information/OSM maps/osmand/minsk.tmp.odb"));
|
||||
// creator.generateIndexes(new File("e:/Information/OSM maps/belarus osm/minsk.osm"), new ConsoleProgressImplementation(3), null);
|
||||
|
||||
// creator.setNodesDBFile(new File("e:/Information/OSM maps/osmand/belarus_nodes.tmp.odb"));
|
||||
// creator.generateIndexes(new File("e:/Information/OSM maps/belarus osm/belarus.osm.bz2"), new ConsoleProgressImplementation(3), null);
|
||||
creator.setNodesDBFile(new File("e:/Information/OSM maps/osmand/belarus_nodes.tmp.odb"));
|
||||
creator.generateIndexes(new File("e:/Information/OSM maps/belarus osm/belarus.osm.bz2"), new ConsoleProgressImplementation(3), null);
|
||||
|
||||
// creator.setNodesDBFile(new File("e:/Information/OSM maps/osmand/ams.tmp.odb"));
|
||||
// creator.generateIndexes(new File("e:/Information/OSM maps/osm_map/ams_part_map.osm"), new ConsoleProgressImplementation(3), null);
|
||||
|
|
|
@ -33,9 +33,11 @@ import java.util.Arrays;
|
|||
public class Node implements Cloneable //can be made abstract if leaf and non leaf required
|
||||
{
|
||||
/**max no. of entries in a node*/
|
||||
public final static int MAX = 169;//84;//101; //50;//testing 3
|
||||
// public final static int MAX = 169;//84;//101; //50;//testing 3
|
||||
public final static int MAX = 40;//84;//101; //50;//testing 3
|
||||
/**min. no. of entries in a node*/
|
||||
public final static int MIN= 84;//51; //25;//testing 2
|
||||
// public final static int MIN= 84;//51; //25;//testing 2
|
||||
public final static int MIN= 20;//51; //25;//testing 2
|
||||
/**The size of the cache.<br>
|
||||
Minimum cache size is 50% of total no. of elements (1lakh records has 597 nodes).
|
||||
<br>Maximum cache size should be 70%, beyound that there may not be major improvements but the
|
||||
|
|
Loading…
Reference in a new issue