mirror of
https://github.com/plexusorg/Plex-FAWE.git
synced 2024-11-16 17:16:11 +00:00
Cleanup multiple batch usage for chunk deletion.
This commit is contained in:
parent
d763ab374c
commit
8fcc22c21d
@ -133,17 +133,13 @@ public class ChunkCommands {
|
|||||||
newBatch.worldPath = worldDir.toAbsolutePath().normalize().toString();
|
newBatch.worldPath = worldDir.toAbsolutePath().normalize().toString();
|
||||||
newBatch.backup = true;
|
newBatch.backup = true;
|
||||||
final Region selection = session.getSelection(player.getWorld());
|
final Region selection = session.getSelection(player.getWorld());
|
||||||
int chunkCount;
|
|
||||||
if (selection instanceof CuboidRegion) {
|
if (selection instanceof CuboidRegion) {
|
||||||
newBatch.minChunk = BlockVector2.at(selection.getMinimumPoint().getBlockX() >> 4, selection.getMinimumPoint().getBlockZ() >> 4);
|
newBatch.minChunk = BlockVector2.at(selection.getMinimumPoint().getBlockX() >> 4, selection.getMinimumPoint().getBlockZ() >> 4);
|
||||||
newBatch.maxChunk = BlockVector2.at(selection.getMaximumPoint().getBlockX() >> 4, selection.getMaximumPoint().getBlockZ() >> 4);
|
newBatch.maxChunk = BlockVector2.at(selection.getMaximumPoint().getBlockX() >> 4, selection.getMaximumPoint().getBlockZ() >> 4);
|
||||||
final BlockVector2 dist = newBatch.maxChunk.subtract(newBatch.minChunk).add(1, 1);
|
|
||||||
chunkCount = dist.getBlockX() * dist.getBlockZ();
|
|
||||||
} else {
|
} else {
|
||||||
// this has a possibility to OOM for very large selections still
|
// this has a possibility to OOM for very large selections still
|
||||||
Set<BlockVector2> chunks = selection.getChunks();
|
Set<BlockVector2> chunks = selection.getChunks();
|
||||||
newBatch.chunks = new ArrayList<>(chunks);
|
newBatch.chunks = new ArrayList<>(chunks);
|
||||||
chunkCount = chunks.size();
|
|
||||||
}
|
}
|
||||||
if (beforeTime != null) {
|
if (beforeTime != null) {
|
||||||
newBatch.deletionPredicates = new ArrayList<>();
|
newBatch.deletionPredicates = new ArrayList<>();
|
||||||
@ -161,9 +157,15 @@ public class ChunkCommands {
|
|||||||
throw new StopExecutionException(TextComponent.of("Failed to write chunk list: " + e.getMessage()));
|
throw new StopExecutionException(TextComponent.of("Failed to write chunk list: " + e.getMessage()));
|
||||||
}
|
}
|
||||||
|
|
||||||
player.print(String.format("%d chunk(s) have been marked for deletion and will be deleted the next time the server starts.", chunkCount));
|
player.print(String.format("%d chunk(s) have been marked for deletion the next time the server starts.",
|
||||||
player.print(TextComponent.of("You can mark more chunks for deletion, or to stop the server now, run: ", TextColor.LIGHT_PURPLE)
|
newBatch.getChunkCount()));
|
||||||
.append(TextComponent.of("/stop", TextColor.AQUA).clickEvent(ClickEvent.of(ClickEvent.Action.SUGGEST_COMMAND, "/stop"))));
|
if (currentInfo.batches.size() > 1) {
|
||||||
|
player.printDebug(String.format("%d chunks total marked for deletion. (May have overlaps).",
|
||||||
|
currentInfo.batches.stream().mapToInt(ChunkDeletionInfo.ChunkBatch::getChunkCount).sum()));
|
||||||
|
}
|
||||||
|
player.print(TextComponent.of("You can mark more chunks for deletion, or to stop now, run: ", TextColor.LIGHT_PURPLE)
|
||||||
|
.append(TextComponent.of("/stop", TextColor.AQUA)
|
||||||
|
.clickEvent(ClickEvent.of(ClickEvent.Action.SUGGEST_COMMAND, "/stop"))));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ public final class ChunkDeleter {
|
|||||||
|
|
||||||
public static void writeInfo(ChunkDeletionInfo info, Path chunkFile) throws IOException, JsonIOException {
|
public static void writeInfo(ChunkDeletionInfo info, Path chunkFile) throws IOException, JsonIOException {
|
||||||
String json = chunkDeleterGson.toJson(info, new TypeToken<ChunkDeletionInfo>() {}.getType());
|
String json = chunkDeleterGson.toJson(info, new TypeToken<ChunkDeletionInfo>() {}.getType());
|
||||||
try (BufferedWriter writer = Files.newBufferedWriter(chunkFile, StandardOpenOption.CREATE)) {
|
try (BufferedWriter writer = Files.newBufferedWriter(chunkFile)) {
|
||||||
writer.write(json);
|
writer.write(json);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -130,10 +130,14 @@ public final class ChunkDeleter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private boolean runBatch(ChunkDeletionInfo.ChunkBatch chunkBatch) {
|
private boolean runBatch(ChunkDeletionInfo.ChunkBatch chunkBatch) {
|
||||||
logger.debug("Processing deletion batch.");
|
int chunkCount = chunkBatch.getChunkCount();
|
||||||
|
logger.debug("Processing deletion batch with {} chunks.", chunkCount);
|
||||||
final Map<Path, Stream<BlockVector2>> regionToChunkList = groupChunks(chunkBatch);
|
final Map<Path, Stream<BlockVector2>> regionToChunkList = groupChunks(chunkBatch);
|
||||||
BiPredicate<RegionAccess, BlockVector2> predicate = createPredicates(chunkBatch.deletionPredicates);
|
BiPredicate<RegionAccess, BlockVector2> predicate = createPredicates(chunkBatch.deletionPredicates);
|
||||||
shouldPreload = chunkBatch.chunks == null;
|
shouldPreload = chunkBatch.chunks == null;
|
||||||
|
deletionsRequested += chunkCount;
|
||||||
|
debugRate = chunkCount / 10;
|
||||||
|
|
||||||
return regionToChunkList.entrySet().stream().allMatch(entry -> {
|
return regionToChunkList.entrySet().stream().allMatch(entry -> {
|
||||||
Path regionPath = entry.getKey();
|
Path regionPath = entry.getKey();
|
||||||
if (!Files.exists(regionPath)) return true;
|
if (!Files.exists(regionPath)) return true;
|
||||||
@ -152,8 +156,6 @@ public final class ChunkDeleter {
|
|||||||
private Map<Path, Stream<BlockVector2>> groupChunks(ChunkDeletionInfo.ChunkBatch chunkBatch) {
|
private Map<Path, Stream<BlockVector2>> groupChunks(ChunkDeletionInfo.ChunkBatch chunkBatch) {
|
||||||
Path worldPath = Paths.get(chunkBatch.worldPath);
|
Path worldPath = Paths.get(chunkBatch.worldPath);
|
||||||
if (chunkBatch.chunks != null) {
|
if (chunkBatch.chunks != null) {
|
||||||
deletionsRequested += chunkBatch.chunks.size();
|
|
||||||
debugRate = chunkBatch.chunks.size() / 10;
|
|
||||||
return chunkBatch.chunks.stream()
|
return chunkBatch.chunks.stream()
|
||||||
.collect(Collectors.groupingBy(RegionFilePos::new))
|
.collect(Collectors.groupingBy(RegionFilePos::new))
|
||||||
.entrySet().stream().collect(Collectors.toMap(
|
.entrySet().stream().collect(Collectors.toMap(
|
||||||
@ -193,10 +195,6 @@ public final class ChunkDeleter {
|
|||||||
groupedChunks.put(regionPath, stream);
|
groupedChunks.put(regionPath, stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final BlockVector2 dist = maxChunk.subtract(minChunk).add(1, 1);
|
|
||||||
final int batchSize = dist.getBlockX() * dist.getBlockZ();
|
|
||||||
debugRate = batchSize / 10;
|
|
||||||
this.deletionsRequested += batchSize;
|
|
||||||
return groupedChunks;
|
return groupedChunks;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -256,7 +254,7 @@ public final class ChunkDeleter {
|
|||||||
if (deletionPredicate.test(region, chunk)) {
|
if (deletionPredicate.test(region, chunk)) {
|
||||||
region.deleteChunk(chunk);
|
region.deleteChunk(chunk);
|
||||||
totalChunksDeleted++;
|
totalChunksDeleted++;
|
||||||
if (totalChunksDeleted % debugRate == 0) {
|
if (debugRate != 0 && totalChunksDeleted % debugRate == 0) {
|
||||||
logger.debug("Deleted {} chunks so far.", totalChunksDeleted);
|
logger.debug("Deleted {} chunks so far.", totalChunksDeleted);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -38,6 +38,12 @@ public class ChunkDeletionInfo {
|
|||||||
public List<BlockVector2> chunks;
|
public List<BlockVector2> chunks;
|
||||||
public BlockVector2 minChunk;
|
public BlockVector2 minChunk;
|
||||||
public BlockVector2 maxChunk;
|
public BlockVector2 maxChunk;
|
||||||
|
|
||||||
|
public int getChunkCount() {
|
||||||
|
if (chunks != null) return chunks.size();
|
||||||
|
final BlockVector2 dist = maxChunk.subtract(minChunk).add(1, 1);
|
||||||
|
return dist.getBlockX() * dist.getBlockZ();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class DeletionPredicate {
|
public static class DeletionPredicate {
|
||||||
|
Loading…
Reference in New Issue
Block a user