2019-11-01 18:11:05 +00:00
package com.boydti.fawe.jnbt.anvil ;
2019-11-06 09:29:20 +00:00
import com.boydti.fawe.FaweCache ;
2019-11-02 07:07:40 +00:00
import com.boydti.fawe.beta.Trimable ;
2019-11-06 10:38:36 +00:00
import com.boydti.fawe.beta.implementation.IChunkExtent ;
2019-11-06 09:29:20 +00:00
import com.boydti.fawe.beta.implementation.processors.ExtentBatchProcessorHolder ;
2019-11-01 18:11:05 +00:00
import com.boydti.fawe.object.RunnableVal4 ;
import com.boydti.fawe.object.io.BufferedRandomAccessFile ;
import com.boydti.fawe.object.io.FastByteArrayInputStream ;
import com.boydti.fawe.util.MainUtil ;
import com.boydti.fawe.util.MathMan ;
2019-11-06 09:29:20 +00:00
import com.sk89q.jnbt.CompoundTag ;
2019-11-01 18:11:05 +00:00
import com.sk89q.jnbt.NBTInputStream ;
2019-11-06 09:29:20 +00:00
import com.sk89q.worldedit.WorldEditException ;
2021-03-29 13:29:16 +00:00
import com.sk89q.worldedit.internal.util.LogManagerCompat ;
2019-11-06 09:29:20 +00:00
import com.sk89q.worldedit.math.BlockVector3 ;
2019-11-01 18:11:05 +00:00
import com.sk89q.worldedit.world.World ;
import it.unimi.dsi.fastutil.ints.Int2ObjectMap ;
import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap ;
2019-11-10 11:38:45 +00:00
import it.unimi.dsi.fastutil.io.FastByteArrayOutputStream ;
2021-03-29 13:29:16 +00:00
import org.apache.logging.log4j.Logger ;
2020-07-14 02:50:59 +00:00
2019-11-01 18:11:05 +00:00
import java.io.BufferedInputStream ;
import java.io.File ;
import java.io.FileNotFoundException ;
import java.io.IOException ;
2019-11-10 11:38:45 +00:00
import java.io.InputStream ;
2019-11-01 18:11:05 +00:00
import java.io.RandomAccessFile ;
import java.lang.reflect.Field ;
import java.util.ArrayList ;
import java.util.Arrays ;
import java.util.List ;
import java.util.concurrent.ForkJoinPool ;
2019-11-10 11:38:45 +00:00
import java.util.concurrent.ForkJoinTask ;
import java.util.concurrent.Future ;
2019-11-01 18:11:05 +00:00
import java.util.concurrent.TimeUnit ;
2019-11-02 07:07:40 +00:00
import java.util.function.Consumer ;
2020-01-03 17:02:18 +00:00
import java.util.stream.IntStream ;
2019-11-10 11:38:45 +00:00
import java.util.zip.Deflater ;
2019-11-01 18:11:05 +00:00
import java.util.zip.Inflater ;
import java.util.zip.InflaterInputStream ;
/ * *
* Chunk format : http : //minecraft.gamepedia.com/Chunk_format#Entity_format
2019-11-11 21:02:28 +00:00
* e . g . , ` . Level . Entities . # ` ( Starts with a . as the root tag is unnamed )
2019-11-02 07:40:11 +00:00
* Note : This class isn ' t thread safe . You can use it in an async thread , but not multiple at the same time
2019-11-01 18:11:05 +00:00
* /
2019-11-06 10:38:36 +00:00
public class MCAFile extends ExtentBatchProcessorHolder implements Trimable , IChunkExtent {
2019-11-01 18:11:05 +00:00
2021-03-29 13:29:16 +00:00
private static final Logger LOGGER = LogManagerCompat . getLogger ( ) ;
2019-11-01 18:11:05 +00:00
private static Field fieldBuf2 ;
static {
try {
fieldBuf2 = InflaterInputStream . class . getDeclaredField ( " buf " ) ;
fieldBuf2 . setAccessible ( true ) ;
} catch ( Throwable e ) {
e . printStackTrace ( ) ;
}
}
2019-11-02 07:07:40 +00:00
private final ForkJoinPool pool ;
private final byte [ ] locations ;
private boolean readLocations ;
private File file ;
2019-11-01 18:11:05 +00:00
private RandomAccessFile raf ;
2019-11-02 07:07:40 +00:00
2019-11-01 18:11:05 +00:00
private boolean deleted ;
2020-10-05 17:41:41 +00:00
private int X ;
private int Z ;
2019-11-02 07:07:40 +00:00
private MCAChunk [ ] chunks ;
private boolean [ ] chunkInitialized ;
2019-11-02 07:40:11 +00:00
private Object [ ] locks ;
2019-11-10 11:38:45 +00:00
private Deflater deflater = new Deflater ( 1 , false ) ;
2019-11-01 18:11:05 +00:00
2019-11-02 07:07:40 +00:00
public MCAFile ( ForkJoinPool pool ) {
this . pool = pool ;
this . locations = new byte [ 4096 ] ;
this . chunks = new MCAChunk [ 32 * 32 ] ;
this . chunkInitialized = new boolean [ this . chunks . length ] ;
2019-11-02 07:40:11 +00:00
this . locks = new Object [ this . chunks . length ] ;
for ( int i = 0 ; i < locks . length ; i + + ) {
locks [ i ] = new Object ( ) ;
}
2019-11-02 07:07:40 +00:00
}
@Override
public boolean trim ( boolean aggressive ) {
boolean hasChunk = false ;
for ( int i = 0 ; i < chunkInitialized . length ; i + + ) {
if ( ! chunkInitialized [ i ] ) {
chunks [ i ] = null ;
} else {
hasChunk = true ;
}
2019-11-01 18:11:05 +00:00
}
2019-11-02 07:07:40 +00:00
return ! hasChunk ;
}
public MCAFile init ( File file ) throws FileNotFoundException {
2019-11-01 18:11:05 +00:00
String [ ] split = file . getName ( ) . split ( " \\ . " ) ;
2019-11-02 07:40:11 +00:00
int X = Integer . parseInt ( split [ 1 ] ) ;
int Z = Integer . parseInt ( split [ 2 ] ) ;
2019-11-02 07:07:40 +00:00
return init ( file , X , Z ) ;
2019-11-01 18:11:05 +00:00
}
2019-11-02 07:07:40 +00:00
public MCAFile init ( File file , int mcrX , int mcrZ ) throws FileNotFoundException {
if ( raf ! = null ) {
2019-11-02 11:13:42 +00:00
flush ( true ) ;
2019-11-02 07:40:11 +00:00
for ( int i = 0 ; i < 4096 ; i + + ) {
locations [ i ] = 0 ;
}
try {
raf . close ( ) ;
} catch ( IOException e ) {
e . printStackTrace ( ) ;
2019-11-02 07:07:40 +00:00
}
2019-11-02 07:40:11 +00:00
raf = null ;
2019-11-02 07:07:40 +00:00
}
2019-11-02 07:40:11 +00:00
deleted = false ;
Arrays . fill ( chunkInitialized , false ) ;
readLocations = false ;
this . X = mcrX ;
this . Z = mcrZ ;
2019-11-01 18:11:05 +00:00
this . file = file ;
2019-11-02 07:07:40 +00:00
if ( ! file . exists ( ) ) {
throw new FileNotFoundException ( file . getName ( ) ) ;
}
return this ;
}
public MCAFile init ( World world , int mcrX , int mcrZ ) throws FileNotFoundException {
return init ( new File ( world . getStoragePath ( ) . toFile ( ) , File . separator + " regions " + File . separator + " r. " + mcrX + " . " + mcrZ + " .mca " ) ) ;
}
2019-11-06 09:29:20 +00:00
@Override
public BlockVector3 getMinimumPoint ( ) {
return BlockVector3 . at ( this . X < < 9 , 0 , this . Z < < 9 ) ;
}
@Override
public BlockVector3 getMaximumPoint ( ) {
2020-01-29 20:01:38 +00:00
return BlockVector3 . at ( ( this . X < < 9 ) + 511 , FaweCache . IMP . WORLD_MAX_Y , ( this . Z < < 9 ) + 511 ) ;
2019-11-06 09:29:20 +00:00
}
@Override
public boolean setTile ( int x , int y , int z , CompoundTag tile ) throws WorldEditException {
// final IChunk chunk = getChunk(x >> 4, z >> 4);
// return chunk.setTile(x & 15, y, z & 15, tile);
return false ;
}
2019-11-02 07:07:40 +00:00
public int getIndex ( int chunkX , int chunkZ ) {
return ( ( chunkX & 31 ) < < 2 ) + ( ( chunkZ & 31 ) < < 7 ) ;
}
private RandomAccessFile getRaf ( ) throws FileNotFoundException {
if ( this . raf = = null ) {
this . raf = new RandomAccessFile ( file , " rw " ) ;
}
return this . raf ;
}
private void readHeader ( ) throws IOException {
if ( ! readLocations ) {
readLocations = true ;
getRaf ( ) ;
if ( raf . length ( ) < 8192 ) {
raf . setLength ( 8192 ) ;
} else {
raf . seek ( 0 ) ;
raf . readFully ( locations ) ;
}
}
2019-11-01 18:11:05 +00:00
}
public void clear ( ) {
if ( raf ! = null ) {
try {
raf . close ( ) ;
} catch ( IOException e ) {
e . printStackTrace ( ) ;
}
}
2019-11-02 07:40:11 +00:00
deleted = false ;
readLocations = false ;
Arrays . fill ( chunkInitialized , false ) ;
2019-11-01 18:11:05 +00:00
}
public void setDeleted ( boolean deleted ) {
this . deleted = deleted ;
}
public boolean isDeleted ( ) {
return deleted ;
}
public int getX ( ) {
return X ;
}
public int getZ ( ) {
return Z ;
}
public RandomAccessFile getRandomAccessFile ( ) {
return raf ;
}
public File getFile ( ) {
return file ;
}
public MCAChunk getCachedChunk ( int cx , int cz ) {
2019-11-02 07:07:40 +00:00
int pair = getIndex ( cx , cz ) ;
MCAChunk chunk = chunks [ pair ] ;
if ( chunk ! = null & & chunkInitialized [ pair ] ) {
return chunk ;
2019-11-01 18:11:05 +00:00
}
2019-11-02 07:07:40 +00:00
return null ;
2019-11-01 18:11:05 +00:00
}
public void setChunk ( MCAChunk chunk ) {
int cx = chunk . getX ( ) ;
int cz = chunk . getZ ( ) ;
2019-11-02 07:07:40 +00:00
int pair = getIndex ( cx , cz ) ;
chunks [ pair ] = chunk ;
2019-11-01 18:11:05 +00:00
}
2019-11-06 10:38:36 +00:00
@Override
public MCAChunk getOrCreateChunk ( int chunkX , int chunkZ ) {
try {
return getChunk ( chunkX , chunkZ ) ;
} catch ( IOException e ) {
// TODO generate?
return null ;
}
}
2019-11-01 18:11:05 +00:00
public MCAChunk getChunk ( int cx , int cz ) throws IOException {
2019-11-02 07:07:40 +00:00
int pair = getIndex ( cx , cz ) ;
MCAChunk chunk = chunks [ pair ] ;
if ( chunk = = null ) {
2019-11-02 07:40:11 +00:00
Object lock = locks [ pair ] ;
synchronized ( lock ) {
chunk = chunks [ pair ] ;
if ( chunk = = null ) {
chunk = new MCAChunk ( ) ;
chunk . setPosition ( cx , cz ) ;
chunks [ pair ] = chunk ;
}
}
2019-11-02 07:07:40 +00:00
} else if ( chunkInitialized [ pair ] ) {
return chunk ;
}
2019-11-02 07:40:11 +00:00
synchronized ( chunk ) {
if ( ! chunkInitialized [ pair ] ) {
readChunk ( chunk , pair ) ;
chunkInitialized [ pair ] = true ;
}
}
2019-11-02 07:07:40 +00:00
return chunk ;
2019-11-01 18:11:05 +00:00
}
2019-11-02 07:07:40 +00:00
private MCAChunk readChunk ( MCAChunk chunk , int i ) throws IOException {
2019-11-01 18:11:05 +00:00
int offset = ( ( ( locations [ i ] & 0xFF ) < < 16 ) + ( ( locations [ i + 1 ] & 0xFF ) < < 8 ) + ( ( locations [ i + 2 ] & 0xFF ) ) ) < < 12 ;
if ( offset = = 0 ) {
return null ;
}
2019-11-02 07:07:40 +00:00
int size = ( locations [ i + 3 ] & 0xFF ) < < 12 ;
try ( NBTInputStream nis = getChunkIS ( offset ) ) {
chunk . read ( nis , false ) ;
2019-11-01 18:11:05 +00:00
}
2020-10-06 18:44:33 +00:00
//TODO multithreaded
2019-11-01 18:11:05 +00:00
return chunk ;
}
/ * *
* CX , CZ , OFFSET , SIZE
*
* @param onEach
* @throws IOException
* /
public void forEachSortedChunk ( RunnableVal4 < Integer , Integer , Integer , Integer > onEach ) throws IOException {
char [ ] offsets = new char [ ( int ) ( raf . length ( ) / 4096 ) - 2 ] ;
Arrays . fill ( offsets , Character . MAX_VALUE ) ;
char i = 0 ;
for ( int z = 0 ; z < 32 ; z + + ) {
for ( int x = 0 ; x < 32 ; x + + , i + = 4 ) {
int offset = ( ( ( locations [ i ] & 0xFF ) < < 16 ) + ( ( locations [ i + 1 ] & 0xFF ) < < 8 ) + ( ( locations [ i + 2 ] & 0xFF ) ) ) - 2 ;
int size = locations [ i + 3 ] & 0xFF ;
if ( size ! = 0 ) {
if ( offset < offsets . length ) {
offsets [ offset ] = i ;
} else {
2021-03-29 13:29:16 +00:00
LOGGER . debug ( " Ignoring invalid offset " + offset ) ;
2019-11-01 18:11:05 +00:00
}
}
}
}
for ( i = 0 ; i < offsets . length ; i + + ) {
int index = offsets [ i ] ;
if ( index ! = Character . MAX_VALUE ) {
int offset = i + 2 ;
int size = locations [ index + 3 ] & 0xFF ;
int index2 = index > > 2 ;
int x = ( index2 ) & 31 ;
int z = ( index2 ) > > 5 ;
onEach . run ( x , z , offset < < 12 , size < < 12 ) ;
}
}
}
/ * *
* @param onEach cx , cz , offset , size
* /
public void forEachChunk ( RunnableVal4 < Integer , Integer , Integer , Integer > onEach ) {
int i = 0 ;
for ( int z = 0 ; z < 32 ; z + + ) {
for ( int x = 0 ; x < 32 ; x + + , i + = 4 ) {
int offset = ( ( ( locations [ i ] & 0xFF ) < < 16 ) + ( ( locations [ i + 1 ] & 0xFF ) < < 8 ) + ( ( locations [ i + 2 ] & 0xFF ) ) ) ;
int size = locations [ i + 3 ] & 0xFF ;
if ( size ! = 0 ) {
onEach . run ( x , z , offset < < 12 , size < < 12 ) ;
}
}
}
}
2019-11-02 07:07:40 +00:00
public void forEachChunk ( Consumer < MCAChunk > onEach ) {
2019-11-01 18:11:05 +00:00
int i = 0 ;
for ( int z = 0 ; z < 32 ; z + + ) {
for ( int x = 0 ; x < 32 ; x + + , i + = 4 ) {
int offset = ( ( ( locations [ i ] & 0xFF ) < < 16 ) + ( ( locations [ i + 1 ] & 0xFF ) < < 8 ) + ( ( locations [ i + 2 ] & 0xFF ) ) ) ;
int size = locations [ i + 3 ] & 0xFF ;
if ( size ! = 0 ) {
try {
2019-11-02 07:07:40 +00:00
onEach . accept ( getChunk ( x , z ) ) ;
2020-10-05 17:41:41 +00:00
} catch ( Throwable ignored ) {
2019-11-01 18:11:05 +00:00
}
}
}
}
}
public int getOffset ( int cx , int cz ) {
2019-11-02 07:07:40 +00:00
int i = getIndex ( cx , cz ) ;
2019-11-01 18:11:05 +00:00
int offset = ( ( ( locations [ i ] & 0xFF ) < < 16 ) + ( ( locations [ i + 1 ] & 0xFF ) < < 8 ) + ( ( locations [ i + 2 ] & 0xFF ) ) ) ;
return offset < < 12 ;
}
public int getSize ( int cx , int cz ) {
2019-11-02 07:07:40 +00:00
int i = getIndex ( cx , cz ) ;
2019-11-01 18:11:05 +00:00
return ( locations [ i + 3 ] & 0xFF ) < < 12 ;
}
2019-11-10 11:38:45 +00:00
public FastByteArrayInputStream getChunkCompressedBytes ( int offset ) throws IOException {
2019-11-01 18:11:05 +00:00
if ( offset = = 0 ) {
return null ;
}
synchronized ( raf ) {
raf . seek ( offset ) ;
int size = raf . readInt ( ) ;
int compression = raf . read ( ) ;
2020-01-29 20:01:38 +00:00
byte [ ] data = FaweCache . IMP . BYTE_BUFFER_VAR . get ( size ) ;
2019-11-10 11:38:45 +00:00
raf . readFully ( data , 0 , size ) ;
FastByteArrayInputStream result = new FastByteArrayInputStream ( data , 0 , size ) ;
return result ;
2019-11-01 18:11:05 +00:00
}
}
private NBTInputStream getChunkIS ( int offset ) throws IOException {
try {
2019-11-10 11:38:45 +00:00
return getChunkIS ( getChunkCompressedBytes ( offset ) ) ;
2019-11-01 18:11:05 +00:00
} catch ( IllegalAccessException unlikely ) {
unlikely . printStackTrace ( ) ;
return null ;
}
}
2019-11-10 11:38:45 +00:00
private NBTInputStream getChunkIS ( InputStream is ) throws IllegalAccessException {
InflaterInputStream iis = new InflaterInputStream ( is , new Inflater ( ) , 1 ) ;
2020-01-29 20:01:38 +00:00
fieldBuf2 . set ( iis , FaweCache . IMP . BYTE_BUFFER_8192 . get ( ) ) ;
2019-11-10 11:38:45 +00:00
BufferedInputStream bis = new BufferedInputStream ( iis ) ;
NBTInputStream nis = new NBTInputStream ( bis ) ;
return nis ;
2019-11-01 18:11:05 +00:00
}
/ * *
* @param onEach chunk
* /
2019-11-02 07:07:40 +00:00
public void forEachCachedChunk ( Consumer < MCAChunk > onEach ) {
for ( int i = 0 ; i < chunks . length ; i + + ) {
MCAChunk chunk = chunks [ i ] ;
if ( chunk ! = null & & this . chunkInitialized [ i ] ) {
onEach . accept ( chunk ) ;
2019-11-01 18:11:05 +00:00
}
}
}
public List < MCAChunk > getCachedChunks ( ) {
2020-01-03 17:02:18 +00:00
int size = ( int ) IntStream . range ( 0 , chunks . length )
. filter ( i - > chunks [ i ] ! = null & & this . chunkInitialized [ i ] ) . count ( ) ;
2019-11-02 07:07:40 +00:00
ArrayList < MCAChunk > list = new ArrayList < > ( size ) ;
for ( int i = 0 ; i < chunks . length ; i + + ) {
MCAChunk chunk = chunks [ i ] ;
if ( chunk ! = null & & this . chunkInitialized [ i ] ) {
list . add ( chunk ) ;
}
2019-11-01 18:11:05 +00:00
}
2019-11-02 07:07:40 +00:00
return list ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
private FastByteArrayOutputStream toBytes ( MCAChunk chunk ) throws IOException {
2019-11-01 18:11:05 +00:00
if ( chunk . isDeleted ( ) ) {
return null ;
}
2020-01-29 20:01:38 +00:00
byte [ ] writeBuffer = FaweCache . IMP . BYTE_BUFFER_VAR . get ( 4096 ) ;
2019-11-10 11:38:45 +00:00
FastByteArrayOutputStream uncompressed = chunk . toBytes ( writeBuffer ) ;
if ( uncompressed . array . length > writeBuffer . length ) {
2020-01-29 20:01:38 +00:00
FaweCache . IMP . BYTE_BUFFER_VAR . set ( uncompressed . array ) ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
writeBuffer = uncompressed . array ;
2020-01-29 20:01:38 +00:00
byte [ ] buffer = FaweCache . IMP . BYTE_BUFFER_8192 . get ( ) ;
2019-11-10 11:38:45 +00:00
int length = uncompressed . length ;
uncompressed . reset ( ) ;
// cheat, reusing the same buffer to read/write
int compressedLength = MainUtil . compress ( uncompressed . array , length , buffer , uncompressed , deflater ) ;
return uncompressed ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
private void writeSafe ( RandomAccessFile raf , int offset , byte [ ] data , int length ) throws IOException {
int len = length + 5 ;
2019-11-01 18:11:05 +00:00
raf . seek ( offset ) ;
if ( raf . length ( ) - offset < len ) {
raf . setLength ( ( ( offset + len + 4095 ) / 4096 ) * 4096 ) ;
}
// Length of remaining data
2019-11-10 11:38:45 +00:00
raf . writeInt ( length + 1 ) ;
2019-11-01 18:11:05 +00:00
// Compression type
raf . write ( 2 ) ;
2019-11-10 11:38:45 +00:00
raf . write ( data , 0 , length ) ;
2019-11-01 18:11:05 +00:00
}
private void writeHeader ( RandomAccessFile raf , int cx , int cz , int offsetMedium , int sizeByte , boolean writeTime ) throws IOException {
2019-11-02 07:07:40 +00:00
int i = getIndex ( cx , cz ) ;
2019-11-01 18:11:05 +00:00
locations [ i ] = ( byte ) ( offsetMedium > > 16 ) ;
locations [ i + 1 ] = ( byte ) ( offsetMedium > > 8 ) ;
locations [ i + 2 ] = ( byte ) ( offsetMedium ) ;
locations [ i + 3 ] = ( byte ) sizeByte ;
raf . seek ( i ) ;
raf . write ( ( offsetMedium > > 16 ) ) ;
raf . write ( ( offsetMedium > > 8 ) ) ;
raf . write ( ( offsetMedium > > 0 ) ) ;
raf . write ( sizeByte ) ;
raf . seek ( i + 4096 ) ;
if ( offsetMedium = = 0 & & sizeByte = = 0 ) {
raf . writeInt ( 0 ) ;
} else {
raf . writeInt ( ( int ) ( System . currentTimeMillis ( ) / 1000L ) ) ;
}
}
2019-11-02 11:13:42 +00:00
public void close ( ) {
2020-10-05 17:41:41 +00:00
if ( raf = = null ) {
return ;
}
2019-11-01 18:11:05 +00:00
synchronized ( raf ) {
if ( raf ! = null ) {
2019-11-02 11:13:42 +00:00
flush ( true ) ;
2019-11-01 18:11:05 +00:00
try {
raf . close ( ) ;
} catch ( IOException e ) {
e . printStackTrace ( ) ;
}
raf = null ;
}
}
}
public boolean isModified ( ) {
if ( isDeleted ( ) ) {
return true ;
}
2019-11-02 07:40:11 +00:00
for ( int i = 0 ; i < chunks . length ; i + + ) {
MCAChunk chunk = chunks [ i ] ;
if ( chunk ! = null & & this . chunkInitialized [ i ] ) {
if ( chunk . isModified ( ) | | chunk . isDeleted ( ) ) {
return true ;
2019-11-01 18:11:05 +00:00
}
}
}
return false ;
}
/ * *
* Write the chunk to the file
2019-11-02 07:40:11 +00:00
* @param wait - If the flush method needs to wait for the pool
2019-11-01 18:11:05 +00:00
* /
2019-11-02 07:40:11 +00:00
public void flush ( boolean wait ) {
2019-11-01 18:11:05 +00:00
synchronized ( raf ) {
// If the file is marked as deleted, nothing is written
if ( isDeleted ( ) ) {
clear ( ) ;
file . delete ( ) ;
return ;
}
// Chunks that need to be relocated
Int2ObjectOpenHashMap < byte [ ] > relocate = new Int2ObjectOpenHashMap < > ( ) ;
// The position of each chunk
final Int2ObjectOpenHashMap < Integer > offsetMap = new Int2ObjectOpenHashMap < > ( ) ; // Offset -> <byte cx, byte cz, short size>
// The data of each modified chunk
2019-11-10 11:38:45 +00:00
final Int2ObjectOpenHashMap < Future < byte [ ] > > compressedMap = new Int2ObjectOpenHashMap < > ( ) ;
2019-11-01 18:11:05 +00:00
// The data of each chunk that needs to be moved
2019-11-10 11:38:45 +00:00
final Int2ObjectOpenHashMap < Future < byte [ ] > > append = new Int2ObjectOpenHashMap < > ( ) ;
2019-11-01 18:11:05 +00:00
// Get the current time for the chunk timestamp
long now = System . currentTimeMillis ( ) ;
// Load the chunks into the append or compressed map
2019-11-02 07:40:11 +00:00
final ForkJoinPool finalPool = this . pool ;
2019-11-10 11:38:45 +00:00
boolean modified = false ;
for ( int i = 0 ; i < chunks . length ; i + + ) {
if ( this . chunkInitialized [ i ] ) {
MCAChunk chunk = chunks [ i ] ;
if ( chunk ! = null & & chunk . isModified ( ) & & ! chunk . isDeleted ( ) ) {
modified = true ;
ForkJoinTask < byte [ ] > future = pool . submit ( ( ) - > {
FastByteArrayOutputStream compressed = toBytes ( chunk ) ;
return Arrays . copyOf ( compressed . array , compressed . length ) ;
2019-11-01 18:11:05 +00:00
} ) ;
}
}
2019-11-10 11:38:45 +00:00
}
//
// forEachCachedChunk(chunk -> {
// if (chunk.isModified() || chunk.isDeleted()) {
// modified[0] = true;
// chunk.setLastUpdate(now);
// if (!chunk.isDeleted()) {
// MCAFile.this.pool.submit(() -> {
// try {
// byte[] compressed = toBytes(chunk);
// int pair = MathMan.pair((short) (chunk.getX() & 31), (short) (chunk.getZ() & 31));
// Int2ObjectOpenHashMap map;
// if (getOffset(chunk.getX(), chunk.getZ()) == 0) {
// map = append;
// } else {
// map = compressedMap;
// }
// synchronized (map) {
// map.put(pair, compressed);
// }
// } catch (Throwable e) {
// e.printStackTrace();
// }
// });
// }
// }
// });
if ( ! modified ) {
// Not modified, do nothing
return ;
}
// If any changes were detected
file . setLastModified ( now ) ;
// Load the offset data into the offset map
forEachChunk ( new RunnableVal4 < Integer , Integer , Integer , Integer > ( ) {
@Override
public void run ( Integer cx , Integer cz , Integer offset , Integer size ) {
short pair1 = MathMan . pairByte ( ( byte ) ( cx & 31 ) , ( byte ) ( cz & 31 ) ) ;
short pair2 = ( short ) ( size > > 12 ) ;
offsetMap . put ( ( int ) offset , ( Integer ) MathMan . pair ( pair1 , pair2 ) ) ;
}
2019-11-02 07:40:11 +00:00
} ) ;
2019-11-01 18:11:05 +00:00
2019-11-10 11:38:45 +00:00
int start = 8192 ;
int written = start ;
int end = 8192 ;
int nextOffset = 8192 ;
try {
for ( int count = 0 ; count < offsetMap . size ( ) ; count + + ) {
// Get the previous position of the next chunk
Integer loc = offsetMap . get ( nextOffset ) ;
while ( loc = = null ) {
nextOffset + = 4096 ;
loc = offsetMap . get ( nextOffset ) ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
int offset = nextOffset ;
// Get the x/z from the paired location
short cxz = MathMan . unpairX ( loc ) ;
int cx = MathMan . unpairShortX ( cxz ) ;
int cz = MathMan . unpairShortY ( cxz ) ;
// Get the size from the pair
int size = MathMan . unpairY ( loc ) < < 12 ;
nextOffset + = size ;
end = Math . min ( start + size , end ) ;
int pair = getIndex ( cx , cz ) ;
2019-11-10 14:19:36 +00:00
Future < byte [ ] > future = null ;
2019-11-10 11:38:45 +00:00
byte [ ] newBytes = relocate . get ( pair ) ;
int newBytesLength = 0 ;
// newBytes is null if the chunk isn't modified or marked for moving
if ( newBytes = = null ) {
MCAChunk cached = getCachedChunk ( cx , cz ) ;
// If the previous offset marks the current write position (start) then we only write the header
if ( offset = = start ) {
if ( cached = = null | | ! cached . isModified ( ) ) {
writeHeader ( raf , cx , cz , start > > 12 , size > > 12 , true ) ;
start + = size ;
written = start + size ;
continue ;
2019-11-01 18:11:05 +00:00
} else {
2019-11-10 11:38:45 +00:00
future = compressedMap . get ( pair ) ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
} else {
// The chunk needs to be moved, fetch the data if necessary
future = compressedMap . get ( pair ) ;
if ( future = = null ) {
if ( cached = = null | | ! cached . isDeleted ( ) ) {
FastByteArrayInputStream result = getChunkCompressedBytes ( getOffset ( cx , cz ) ) ;
2019-11-10 14:19:36 +00:00
newBytes = result . array ;
newBytesLength = result . length ;
2019-11-01 18:11:05 +00:00
}
}
}
2019-11-10 14:19:36 +00:00
} else {
newBytesLength = newBytes . length ;
2019-11-10 11:38:45 +00:00
}
if ( future ! = null ) {
newBytes = future . get ( ) ;
newBytesLength = newBytes . length ;
}
2019-11-01 18:11:05 +00:00
2019-11-10 11:38:45 +00:00
if ( newBytes = = null ) {
writeHeader ( raf , cx , cz , 0 , 0 , false ) ;
continue ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
// The length to be written (compressed data + 5 byte chunk header)
int len = newBytesLength + 5 ;
int oldSize = ( size + 4095 ) > > 12 ;
int newSize = ( len + 4095 ) > > 12 ;
int nextOffset2 = end ;
// If the current write position (start) + length of data to write (len) are longer than the position of the next chunk, we need to move the next chunks
while ( start + len > end ) {
Integer nextLoc = offsetMap . get ( nextOffset2 ) ;
if ( nextLoc ! = null ) {
short nextCXZ = MathMan . unpairX ( nextLoc ) ;
int nextCX = MathMan . unpairShortX ( nextCXZ ) ;
int nextCZ = MathMan . unpairShortY ( nextCXZ ) ;
MCAChunk cached = getCachedChunk ( nextCX , nextCZ ) ;
if ( cached = = null | | ! cached . isModified ( ) ) {
FastByteArrayInputStream tmp = getChunkCompressedBytes ( nextOffset2 ) ;
byte [ ] nextBytes = Arrays . copyOf ( tmp . array , tmp . length ) ;
relocate . put ( MathMan . pair ( ( short ) ( nextCX & 31 ) , ( short ) ( nextCZ & 31 ) ) , nextBytes ) ;
}
int nextSize = MathMan . unpairY ( nextLoc ) < < 12 ;
end + = nextSize ;
nextOffset2 + = nextSize ;
} else {
end + = 4096 ;
nextOffset2 + = 4096 ;
2019-11-01 18:11:05 +00:00
}
}
2019-11-10 11:38:45 +00:00
// Write the chunk + chunk header
writeSafe ( raf , start , newBytes , newBytesLength ) ;
// Write the location data (beginning of file)
writeHeader ( raf , cx , cz , start > > 12 , newSize , true ) ;
written = start + newBytesLength + 5 ;
start + = newSize < < 12 ;
}
// Write all the chunks which need to be appended
if ( ! append . isEmpty ( ) ) {
for ( Int2ObjectMap . Entry < Future < byte [ ] > > entry : append . int2ObjectEntrySet ( ) ) {
int pair = entry . getIntKey ( ) ;
short cx = MathMan . unpairX ( pair ) ;
short cz = MathMan . unpairY ( pair ) ;
byte [ ] bytes = entry . getValue ( ) . get ( ) ;
int len = bytes . length + 5 ;
int newSize = ( len + 4095 ) > > 12 ;
writeSafe ( raf , start , bytes , bytes . length ) ;
writeHeader ( raf , cx , cz , start > > 12 , newSize , true ) ;
written = start + bytes . length + 5 ;
start + = newSize < < 12 ;
2019-11-01 18:11:05 +00:00
}
}
2019-11-10 11:38:45 +00:00
// Round the file length, since the vanilla server doesn't like it for some reason
raf . setLength ( 4096 * ( ( written + 4095 ) / 4096 ) ) ;
if ( raf instanceof BufferedRandomAccessFile ) {
( ( BufferedRandomAccessFile ) raf ) . flush ( ) ;
2019-11-01 18:11:05 +00:00
}
2019-11-10 11:38:45 +00:00
raf . close ( ) ;
} catch ( Throwable e ) {
e . printStackTrace ( ) ;
}
if ( wait ) {
pool . awaitQuiescence ( Long . MAX_VALUE , TimeUnit . MILLISECONDS ) ;
2019-11-01 18:11:05 +00:00
}
}
}
2019-11-11 21:02:28 +00:00
}