pub mod entry;
mod allocation;
mod allocator;
mod layer;
pub use allocation::Allocation;
pub use entry::Entry;
pub use layer::Layer;
use allocator::Allocator;
pub const SIZE: u32 = 2048;
#[derive(Debug)]
pub struct Atlas {
texture: wgpu::Texture,
texture_view: wgpu::TextureView,
layers: Vec<Layer>,
}
impl Atlas {
pub fn new(device: &wgpu::Device) -> Self {
let extent = wgpu::Extent3d {
width: SIZE,
height: SIZE,
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: extent,
array_layer_count: 2,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::SAMPLED,
});
let texture_view = texture.create_default_view();
Atlas {
texture,
texture_view,
layers: vec![Layer::Empty, Layer::Empty],
}
}
pub fn view(&self) -> &wgpu::TextureView {
&self.texture_view
}
pub fn layer_count(&self) -> usize {
self.layers.len()
}
pub fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) -> Option<Entry> {
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
let buffer =
device.create_buffer_with_data(data, wgpu::BufferUsage::COPY_SRC);
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&buffer,
width,
height,
0,
&allocation,
encoder,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * width + x) as usize * 4;
self.upload_allocation(
&buffer,
width,
height,
offset,
&fragment.allocation,
encoder,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
pub fn remove(&mut self, entry: &Entry) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
if width == SIZE && height == SIZE {
let mut empty_layers = self
.layers
.iter_mut()
.enumerate()
.filter(|(_, layer)| layer.is_empty());
if let Some((i, layer)) = empty_layers.next() {
*layer = Layer::Full;
return Some(Entry::Contiguous(Allocation::Full { layer: i }));
}
self.layers.push(Layer::Full);
return Some(Entry::Contiguous(Allocation::Full {
layer: self.layers.len() - 1,
}));
}
if width > SIZE || height > SIZE {
let mut fragments = Vec::new();
let mut y = 0;
while y < height {
let height = std::cmp::min(height - y, SIZE);
let mut x = 0;
while x < width {
let width = std::cmp::min(width - x, SIZE);
let allocation = self.allocate(width, height)?;
if let Entry::Contiguous(allocation) = allocation {
fragments.push(entry::Fragment {
position: (x, y),
allocation,
});
}
x += width;
}
y += height;
}
return Some(Entry::Fragmented {
size: (width, height),
fragments,
});
}
for (i, layer) in self.layers.iter_mut().enumerate() {
match layer {
Layer::Empty => {
let mut allocator = Allocator::new(SIZE);
if let Some(region) = allocator.allocate(width, height) {
*layer = Layer::Busy(allocator);
return Some(Entry::Contiguous(Allocation::Partial {
region,
layer: i,
}));
}
}
Layer::Busy(allocator) => {
if let Some(region) = allocator.allocate(width, height) {
return Some(Entry::Contiguous(Allocation::Partial {
region,
layer: i,
}));
}
}
_ => {}
}
}
let mut allocator = Allocator::new(SIZE);
if let Some(region) = allocator.allocate(width, height) {
self.layers.push(Layer::Busy(allocator));
return Some(Entry::Contiguous(Allocation::Partial {
region,
layer: self.layers.len() - 1,
}));
}
None
}
fn deallocate(&mut self, allocation: &Allocation) {
log::info!("Deallocating atlas: {:?}", allocation);
match allocation {
Allocation::Full { layer } => {
self.layers[*layer] = Layer::Empty;
}
Allocation::Partial { layer, region } => {
let layer = &mut self.layers[*layer];
if let Layer::Busy(allocator) = layer {
allocator.deallocate(region);
if allocator.is_empty() {
*layer = Layer::Empty;
}
}
}
}
}
fn upload_allocation(
&mut self,
buffer: &wgpu::Buffer,
image_width: u32,
image_height: u32,
offset: usize,
allocation: &Allocation,
encoder: &mut wgpu::CommandEncoder,
) {
let (x, y) = allocation.position();
let (width, height) = allocation.size();
let layer = allocation.layer();
let extent = wgpu::Extent3d {
width,
height,
depth: 1,
};
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer,
offset: offset as u64,
bytes_per_row: 4 * image_width,
rows_per_image: image_height,
},
wgpu::TextureCopyView {
texture: &self.texture,
array_layer: layer as u32,
mip_level: 0,
origin: wgpu::Origin3d { x, y, z: 0 },
},
extent,
);
}
fn grow(
&mut self,
amount: usize,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) {
if amount == 0 {
return;
}
let new_texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
width: SIZE,
height: SIZE,
depth: 1,
},
array_layer_count: self.layers.len() as u32,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::SAMPLED,
});
let amount_to_copy = self.layers.len() - amount;
for (i, layer) in
self.layers.iter_mut().take(amount_to_copy).enumerate()
{
if layer.is_empty() {
continue;
}
encoder.copy_texture_to_texture(
wgpu::TextureCopyView {
texture: &self.texture,
array_layer: i as u32,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::TextureCopyView {
texture: &new_texture,
array_layer: i as u32,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: SIZE,
height: SIZE,
depth: 1,
},
);
}
self.texture = new_texture;
self.texture_view = self.texture.create_default_view();
}
}