Region File better????

-Moved most of the file accessing of Region to RegionFile
-Disabled most of the header check except the length check(it will be
back soon)
-Max chunk size arbitrarily raised to 4MiB because I wanted sectors
longer than 16B
-Sectors now have a mandatory 1B header that identifies it
	-0 is a null sector, it ends every chunk
	-1 is a normal data sector, it has a "parity" byte that makes sure it
is reading chunks in linear order(fun fact: it isnt at the moment)
	-2 is a jump to a different location. this isnt implemented well yet
	-3 will be a "bulk data" sector. Multiple chunks with identical data
can point here. Probably only useful when it is easily identifiable,
like multiple chunks being one entire block, like air.
-Removed all chunk length references as I think they do not make sense
when it can use different sectors for non-data purposes.
This commit is contained in:
opfromthestart 2021-09-09 19:58:44 -04:00
parent c5dfe3d0b7
commit 46bcb85044
2 changed files with 229 additions and 131 deletions

View File

@ -17,8 +17,6 @@
*/ */
package ru.windcorp.progressia.test.region; package ru.windcorp.progressia.test.region;
import static ru.windcorp.progressia.test.region.TestWorldDiskIO.REGION_DIAMETER;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.BufferedOutputStream; import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
@ -27,15 +25,10 @@ import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.zip.DeflaterOutputStream; import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream; import java.util.zip.InflaterInputStream;
import org.apache.logging.log4j.LogManager;
import glm.vec._3.i.Vec3i; import glm.vec._3.i.Vec3i;
import ru.windcorp.progressia.common.state.IOContext; import ru.windcorp.progressia.common.state.IOContext;
import ru.windcorp.progressia.common.world.DecodingException; import ru.windcorp.progressia.common.world.DecodingException;
@ -50,55 +43,37 @@ public class Region {
private static final boolean RESET_CORRUPTED = true; private static final boolean RESET_CORRUPTED = true;
// 1 MiB
private static final int MAX_CHUNK_SIZE = 1024 * 1024;
private static final int SECTORS_BYTES = Short.BYTES;
private static final int SECTOR_SIZE = MAX_CHUNK_SIZE >> (SECTORS_BYTES*8);
public int loadedChunks; public int loadedChunks;
private static final int DEFINITION_SIZE = Integer.BYTES + Short.BYTES;
private static final int HEADER_SIZE = DEFINITION_SIZE * REGION_DIAMETER * REGION_DIAMETER * REGION_DIAMETER;
private AtomicBoolean isUsing = new AtomicBoolean(false); private AtomicBoolean isUsing = new AtomicBoolean(false);
private AtomicBoolean isClosed = new AtomicBoolean(false); private AtomicBoolean isClosed = new AtomicBoolean(false);
private final RandomAccessFile file; private final RegionFile file;
private final ChunkMap<Integer> offsets = ChunkMaps.newHashMap(); private final ChunkMap<Integer> offsets = ChunkMaps.newHashMap();
private final ChunkMap<Integer> lengths = ChunkMaps.newHashMap();
public Region(RandomAccessFile file) throws IOException { public Region(RandomAccessFile file) throws IOException {
this.file = file; this.file = new RegionFile(file);
try { try {
confirmHeaderHealth(); this.file.confirmHeaderHealth(offsets);
} catch (IOException e) { } catch (IOException e) {
TestWorldDiskIO.LOG.debug("Uh the file broke"); TestWorldDiskIO.LOG.debug("Uh the file broke");
if (RESET_CORRUPTED) { if (RESET_CORRUPTED) {
byte headerBytes[] = new byte[HEADER_SIZE]; this.file.makeHeader();
Arrays.fill(headerBytes, (byte) 0);
try {
file.write(headerBytes);
} catch (IOException e1) {
e.addSuppressed(e1);
throw e;
}
} }
} }
} }
public RandomAccessFile getFile() { public RegionFile getFile() {
return file; return file;
} }
public void close() throws IOException { public void close() throws IOException {
this.file.close(); this.file.close();
isClosed.lazySet(true);; isClosed.lazySet(true);
} }
public int getOffset(Vec3i chunkLoc) { public int getOffset(Vec3i chunkLoc) {
@ -113,18 +88,6 @@ public class Region {
offsets.put(pos, offset); offsets.put(pos, offset);
} }
public int getLength(Vec3i chunkLoc) {
return lengths.get(chunkLoc);
}
public boolean hasLength(Vec3i pos) {
return lengths.containsKey(pos);
}
public void putLength(Vec3i pos, int length) {
lengths.put(pos, length);
}
public AtomicBoolean isClosed() public AtomicBoolean isClosed()
{ {
return isClosed; return isClosed;
@ -135,68 +98,18 @@ public class Region {
return isUsing; return isUsing;
} }
private void confirmHeaderHealth() throws IOException {
Set<Integer> used = new HashSet<Integer>();
int maxUsed = 0;
final int chunksPerRegion = REGION_DIAMETER * REGION_DIAMETER * REGION_DIAMETER;
file.seek(0);
if (file.length() < HEADER_SIZE) {
throw new IOException("File is too short to contain a header");
}
for (int i = 0; i < chunksPerRegion; i++) {
int offset = file.readInt();
int sectorLength = file.readShort();
if (sectorLength == 0) {
continue;
}
Vec3i pos = new Vec3i();
pos.x = i / REGION_DIAMETER / REGION_DIAMETER;
pos.y = (i / REGION_DIAMETER) % REGION_DIAMETER;
pos.z = i % REGION_DIAMETER;
offsets.put(pos, offset);
lengths.put(pos, sectorLength);
if (offset+sectorLength > maxUsed)
{
maxUsed = offset + sectorLength;
}
for (int sector = 0; sector < sectorLength; sector++) {
if (!used.add(offset + sector)) {
throw new IOException("A sector is used twice");
}
}
}
LogManager.getLogger("Region").debug("Efficiency of {}", (double) used.size()/maxUsed);
}
public void save(DefaultChunkData chunk, Server server) throws IOException { public void save(DefaultChunkData chunk, Server server) throws IOException {
isUsing.set(true); isUsing.set(true);
Vec3i pos = TestWorldDiskIO.getInRegionCoords(chunk.getPosition()); Vec3i pos = TestWorldDiskIO.getInRegionCoords(chunk.getPosition());
int definitionOffset = DEFINITION_SIZE * (pos.z + REGION_DIAMETER * (pos.y + REGION_DIAMETER * pos.x));
if (!hasOffset(pos)) { if (!hasOffset(pos)) {
allocateChunk(definitionOffset, pos); putOffset(pos, file.allocateChunk(pos));
} }
int dataOffset = getOffset(pos); int dataOffset = getOffset(pos);
byte[] buffer = saveToBuffer(chunk, server); byte[] buffer = saveToBuffer(chunk, server);
if (hasLength(pos) && buffer.length > getLength(pos)*SECTOR_SIZE )
{
byte emptyBuffer[] = new byte[getLength(pos)*SECTOR_SIZE];
writeBuffer(emptyBuffer, definitionOffset, dataOffset, pos);
allocateChunk(definitionOffset, pos);
dataOffset = getOffset(pos);
}
writeBuffer(buffer, definitionOffset, dataOffset, pos); file.writeBuffer(buffer, dataOffset, pos);
isUsing.set(false); isUsing.set(false);
} }
@ -216,29 +129,7 @@ public class Region {
return arrayStream.toByteArray(); return arrayStream.toByteArray();
} }
private void writeBuffer(byte[] buffer, int definitionOffset, int dataOffset, Vec3i pos) throws IOException {
file.seek(HEADER_SIZE + SECTOR_SIZE * dataOffset);
file.write(buffer);
file.seek(definitionOffset + Integer.BYTES);
int sectors = (int) buffer.length / SECTOR_SIZE + 1;
file.writeShort(sectors);
putLength(pos, sectors);
}
private void allocateChunk(int definitionOffset, Vec3i pos) throws IOException {
int outputLen = (int) file.length();
int dataOffset = (int) (outputLen - HEADER_SIZE) / SECTOR_SIZE + 1;
file.seek(definitionOffset);
file.writeInt(dataOffset);
file.setLength(HEADER_SIZE + dataOffset * SECTOR_SIZE);
putOffset(pos, dataOffset);
}
public DefaultChunkData load(Vec3i chunkPos, DefaultWorldData world, Server server) public DefaultChunkData load(Vec3i chunkPos, DefaultWorldData world, Server server)
throws IOException, throws IOException,
@ -246,17 +137,15 @@ public class Region {
isUsing.set(true); isUsing.set(true);
int dataOffset = 0; int dataOffset = 0;
int sectorLength = 0;
Vec3i pos = TestWorldDiskIO.getInRegionCoords(chunkPos); Vec3i pos = TestWorldDiskIO.getInRegionCoords(chunkPos);
if (hasOffset(pos)) { if (hasOffset(pos)) {
dataOffset = getOffset(pos); dataOffset = getOffset(pos);
sectorLength = getLength(pos);
} else { } else {
return null; return null;
} }
byte[] buffer = readBuffer(dataOffset, sectorLength); byte[] buffer = file.readBuffer(dataOffset);
DefaultChunkData result = loadFromBuffer(buffer, chunkPos, world, server); DefaultChunkData result = loadFromBuffer(buffer, chunkPos, world, server);
isUsing.set(false); isUsing.set(false);
return result; return result;
@ -278,12 +167,4 @@ public class Region {
TestWorldDiskIO.readGenerationHint(result, dataStream, server); TestWorldDiskIO.readGenerationHint(result, dataStream, server);
return result; return result;
} }
private byte[] readBuffer(int dataOffset, int sectorLength) throws IOException {
file.seek(HEADER_SIZE + SECTOR_SIZE * dataOffset);
byte buffer[] = new byte[SECTOR_SIZE * sectorLength];
file.read(buffer);
return buffer;
}
} }

View File

@ -0,0 +1,217 @@
package ru.windcorp.progressia.test.region;
import static ru.windcorp.progressia.test.region.TestWorldDiskIO.REGION_DIAMETER;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.logging.log4j.LogManager;
import glm.vec._3.i.Vec3i;
import ru.windcorp.progressia.common.world.generic.ChunkMap;
/**Backend for the .progressia_region file.
* Use similarly to a file object
*
*/
public class RegionFile {
// 4 MiB
private static final int MAX_CHUNK_SIZE = 4 * 1024 * 1024;
private static final int SECTORS_BYTES = Short.BYTES;
private static final int SECTOR_SIZE = MAX_CHUNK_SIZE >> (SECTORS_BYTES*8);
private static final int SECTOR_HEADER_LENGTH = 1;
final byte endBytes[] = new byte[SECTOR_SIZE];
private Map<Integer, Vec3i> isFilledMap = new HashMap(); // TODO ill do this at the same time I finish the new confirmheaderhealth
public enum SectorType
{
Ending (0), // Just an empty block
Data (1), // has a byte counting up in position 1, and then
PartitionLink (2),
BulkSaved (3); // TODO implement this
private final byte data;
SectorType(int i)
{
this.data = (byte) i;
}
}
private static final int DEFINITION_SIZE = Integer.BYTES;
private static final int HEADER_SIZE = DEFINITION_SIZE * REGION_DIAMETER * REGION_DIAMETER * REGION_DIAMETER;
private final RandomAccessFile file;
public RegionFile(RandomAccessFile inFile)
{
file = inFile;
}
public void confirmHeaderHealth(ChunkMap<Integer> offsets) throws IOException {
Set<Integer> used = new HashSet<Integer>();
int maxUsed = 0;
final int chunksPerRegion = REGION_DIAMETER * REGION_DIAMETER * REGION_DIAMETER;
file.seek(0);
if (file.length() < HEADER_SIZE) {
throw new IOException("File is too short to contain a header");
}
// for (int i = 0; i < chunksPerRegion; i++) { // TODO ill make the rest in a bit
// int offset = file.readInt();
//
// if (offset == 0) {
// continue;
// }
//
// Vec3i pos = new Vec3i();
// pos.x = i / REGION_DIAMETER / REGION_DIAMETER;
// pos.y = (i / REGION_DIAMETER) % REGION_DIAMETER;
// pos.z = i % REGION_DIAMETER;
//
// offsets.put(pos, offset);
//
// boolean shouldEnd = false;
// while (!shouldEnd)
// {
// if (offset > maxUsed)
// {
// maxUsed = offset;
// }
//
// if (!used.add(offset)) {
// throw new IOException("A sector is used twice");
// }
//
// }
// }
LogManager.getLogger("Region").debug("Efficiency of {}", (double) used.size()/maxUsed);
}
public void makeHeader() throws IOException
{
file.seek(0);
for (int i=0;i<HEADER_SIZE;i++)
{
file.write(0);
}
}
public void writeBuffer(byte[] buffer, int dataOffset, Vec3i pos) throws IOException {
file.seek(HEADER_SIZE + SECTOR_SIZE * dataOffset);
int loc=0;
byte tempBuffer[] = new byte[SECTOR_SIZE];
byte counter = 0;
boolean isDone = false;
while (!isDone)
{
tempBuffer[0] = 1;
tempBuffer[1] = counter;
counter++;
for (int i=0;i<(SECTOR_SIZE-SECTOR_HEADER_LENGTH-1);i++)
{
if (loc*(SECTOR_SIZE-SECTOR_HEADER_LENGTH-1) + i<buffer.length)
{
tempBuffer[i+SECTOR_HEADER_LENGTH+1] = buffer[loc*(SECTOR_SIZE-SECTOR_HEADER_LENGTH-1) + i];
}
else
{
isDone = true;
break;
}
}
loc++;
if (file.getFilePointer()<256)
LogManager.getLogger("Region").debug("at {}, ({},{},{}), {}", file.getFilePointer(),pos.x,pos.y,pos.z, dataOffset);
file.write(tempBuffer);
}
file.write(endBytes);
}
public int allocateChunk( Vec3i pos) throws IOException {
int definitionOffset = DEFINITION_SIZE * (pos.z + REGION_DIAMETER * (pos.y + REGION_DIAMETER * pos.x));
int outputLen = (int) file.length();
int dataOffset = (int) (outputLen - HEADER_SIZE) / SECTOR_SIZE + 1;
file.seek(definitionOffset);
file.writeInt(dataOffset);
file.setLength(HEADER_SIZE + dataOffset * SECTOR_SIZE);
return dataOffset;
}
public byte[] readBuffer(int dataOffset) throws IOException {
file.seek(HEADER_SIZE + SECTOR_SIZE * dataOffset);
int bufferPos = 0;
byte buffer[] = new byte[SECTOR_SIZE*16];
byte tempBuffer[] = new byte[SECTOR_SIZE];
boolean reachedEnd = false;
byte counter = 0;
while (!reachedEnd)
{
int bytesRead = file.read(tempBuffer, 0, SECTOR_SIZE);
if (bytesRead==0)
{
reachedEnd = true;
continue;
}
if (tempBuffer[0] == SectorType.Data.data)
{
if (tempBuffer[1] != counter)
{
throw new IOException("Sectors were read out of order\nExpected chunk number "+Byte.toString(counter)+" but encountered number " + Byte.toString(tempBuffer[1]));
}
counter++;
if (buffer.length - bufferPos < SECTOR_SIZE-SECTOR_HEADER_LENGTH-1)
{
byte newBuffer[] = new byte[buffer.length + SECTOR_SIZE*16];
for (int i=0;i<buffer.length;i++) // TODO dedicated copy, java-y at least
{
newBuffer[i] = buffer[i];
}
buffer = newBuffer;
}
for (int i=0;i<SECTOR_SIZE-SECTOR_HEADER_LENGTH-1;i++)
{
buffer[bufferPos+i] = tempBuffer[i+2];
}
bufferPos += SECTOR_SIZE-SECTOR_HEADER_LENGTH-1;
}
else if (tempBuffer[0] == SectorType.Ending.data)
{
reachedEnd = true;
}
else if (tempBuffer[0] == SectorType.PartitionLink.data)
{
int newOffset = ((tempBuffer[4]*256 + tempBuffer[3])*256 + tempBuffer[2])*256 + tempBuffer[1];
file.seek(HEADER_SIZE + SECTOR_SIZE * newOffset);
}
else
{
throw new IOException("Invalid sector ID.");
}
}
return buffer;
}
public void close() throws IOException {
file.close();
}
}