When I try to create a ComputeBuffer of bools…
new ComputeBuffer(len, sizeof(bool));
…it complains that the stride is invalid because 1 byte is not a multiple of 4.
Invalid stride 1 for Compute Buffer - must be greater than 0, less or equal to 2048 and a multiple of 4.
Do I really need to wrap my bool inside a struct? Or just use int instead?
Yes, you can use int. If you’re at all concerned you probably want to use a bit array; you can pack 32 bools into single int. In C# that would be like…
struct UncheckedBitArray
{
private readonly uint[] _array;
public UncheckedBitArray(int length) => _array = new uint[(length - 1)/32 + 1];
public bool this[int index] {
get => (_array[index/32] & (1U << (index%32))) != 0;
set { if(value) _array[index/32] |= 1U << (index%32); else _array[index/32] &= ~(1U << (index%32)); }}
}
You can send that buffer to a compute shader and access it in a similar way.
1 Like
… and in compute shader (HLSL), these functions might be useful:
// read single byte from four-bytes unsigned int number, index must have values from 0 to 3
uint ReadByteFromUint(uint u32, uint index)
{
return (u32 >> (index << 3u)) & 255u;
}
// read single bit from single byte, index must have values from 0 to 7
uint ReadBitFromByte(uint byte, uint index)
{
return (byte >> index) & 1u;
}
// write single bit (0 or 1) to single byte, index must have values from 0 to 7
uint WriteBitToByte (uint bit, uint byte, uint index)
{
return (byte & ~(1u << index)) | (bit << index);
}
// write single byte to four-bytes unsigned int number, index must have values from 0 to 3
uint WriteByteToUint(uint byte, uint u32, uint index)
{
return (byte << (index << 3u)) | (u32 & (4294967295u ^ (255u << (index << 3u))));
}
// returns the count of set bits (value of 1) in a 32-bit uint
uint BitCount(uint i)
{
i = i - ((i >> 1u) & 0x55555555u);
i = (i & 0x33333333u) + ((i >> 2u) & 0x33333333u);
return (((i + (i >> 4u)) & 0x0F0F0F0Fu) * 0x01010101u) >> 24u;
}
// returns 32-bit uint from two 32-bit uints comparison
uint BitwiseOr(uint x, uint y)
{
return x | y;
}
// returns 32-bit uint from two 32-bit uints addition
uint BitwiseAnd(uint x, uint y)
{
return x & y;
}
On C# side:
byte[] BitArrayToBytes(BitArray bits)
{
byte[] bytes = new byte[(bits.Length - 1) / 8 + 1];
bits.CopyTo(bytes, 0);
return bytes;
}
BitArray BytesToBitArray(byte[] bytes)
{
return new BitArray(bytes);
}
uint[] BytesToUints(byte[] bytes)
{
uint[] uints = new uint[bytes.Length / 4];
System.Buffer.BlockCopy(bytes, 0, uints, 0, bytes.Length);
return uints;
}
string[] PrintUints (uint[] uints)
{
List<string> lines = new List<string>();
string line = "";
for (int i = 0; i < uints.Length; i++)
{
line = line + uints[i].ToString() + ",";
if ((i+1) % 8 == 0)
{
lines.Add(line);
line = "";
}
}
return lines.ToArray();
}
2 Likes