-
Notifications
You must be signed in to change notification settings - Fork 3
Image filter (Bun)
In this example we're going to build a server-side app that apply a filter on an image. It'll be a real server this time, one that accepts requests from the browser and sends different images based on parameters in the URL.
First, we initialize the project:
mkdir filter
cd filter
bun init
bun init helps you get started with a minimal project and tries to guess sensible defaults. Press ^C anytime to quit
package name (filter):
entry point (index.ts): src/index.js
Then we install modules that we need:
bun install fastify sharp bun-zigar
bunx bun-zigar preload
mkdir zig img
We'll be using Fastify, a modern alternative to Express.js, and Sharp, a popular image processing library.
After creating the basic skeleton, add index.js
:
import Fastify from 'fastify';
import Sharp from 'sharp';
import { fileURLToPath } from 'url';
const fastify = Fastify();
fastify.get('/', (req, reply) => {
const name = 'sample';
const filter = 'sepia';
const tags = [
{ width: 150, height: 100, intensity: 0.0 },
{ width: 150, height: 100, intensity: 0.3 },
{ width: 300, height: 300, intensity: 0.2 },
{ width: 300, height: 300, intensity: 0.4 },
{ width: 400, height: 400, intensity: 0.3 },
{ width: 500, height: 200, intensity: 0.5 },
].map((params) => {
const json = JSON.stringify(params);
const base64 = Buffer.from(json).toString('base64');
const url = `img/${name}/${filter}/${base64}`;
return `<p><img src="${url}"></p>`;
});
reply.type('text/html');
return `
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Image filter test</title>
</head>
<body>${tags.join('')}</body>
</html>`;
});
fastify.get('/img/:name/:filter/:base64', async (req, reply) => {
const { name, filter, base64 } = req.params;
const json = Buffer.from(base64, 'base64');
const params = JSON.parse(json);
const url = new URL(`../img/${name}.png`, import.meta.url);
const path = fileURLToPath(url);
const { width, height, ...filterParams } = params;
// open image, resize it, and get raw data
const inputImage = Sharp(path).ensureAlpha().resize(width, height);
const { data, info } = await inputImage.raw().toBuffer({ resolveWithObject: true });
// place raw data into new image and output it as JPEG
const outputImage = Sharp(data, { raw: info, });
reply.type('image/jpeg');
return outputImage.jpeg().toBuffer();
});
const address = await fastify.listen({ port: 3000 });
console.log(`Listening at ${address}`);
The root route /
maps to an HTML page with a number of <img>
tags referencing images at
different settings. The handler of /img/:name/:filter/:base64
generates these images. It
decompresses the source image, resizes it, and then obtains the raw pixel data. It then immediately
saves the data as a JPEG image. We'll add the filtering step after we've verified that the basic
code works.
Finally, download the following image into img
as sample.png
(or choose an image of your own):
We are ready to start the server:
bun src/index.js
When you open the link, you should see the following:
Okay, now it's the time to implement the image filtering functionality. Save the following code
as sepai.zig
in the zig
directory:
// Pixel Bender kernel "Sepia" (translated using pb2zig)
const std = @import("std");
pub const kernel = struct {
// kernel information
pub const namespace = "AIF";
pub const vendor = "Adobe Systems";
pub const version = 2;
pub const description = "a variable sepia filter";
pub const parameters = .{
.intensity = .{
.type = f32,
.minValue = 0.0,
.maxValue = 1.0,
.defaultValue = 0.0,
},
};
pub const inputImages = .{
.src = .{ .channels = 4 },
};
pub const outputImages = .{
.dst = .{ .channels = 4 },
};
// generic kernel instance type
fn Instance(comptime InputStruct: type, comptime OutputStruct: type, comptime ParameterStruct: type) type {
return struct {
params: ParameterStruct,
input: InputStruct,
output: OutputStruct,
outputCoord: @Vector(2, u32) = @splat(0),
// output pixel
dst: @Vector(4, f32) = undefined,
// functions defined in kernel
pub fn evaluatePixel(self: *@This()) void {
const intensity = self.params.intensity;
const src = self.input.src;
const dst = self.output.dst;
self.dst = @splat(0.0);
var rgbaColor: @Vector(4, f32) = undefined;
var yiqaColor: @Vector(4, f32) = undefined;
const YIQMatrix: [4]@Vector(4, f32) = .{
.{
0.299,
0.596,
0.212,
0.0,
},
.{
0.587,
-0.275,
-0.523,
0.0,
},
.{
0.114,
-0.321,
0.311,
0.0,
},
.{ 0.0, 0.0, 0.0, 1.0 },
};
const inverseYIQ: [4]@Vector(4, f32) = .{
.{ 1.0, 1.0, 1.0, 0.0 },
.{
0.956,
-0.272,
-1.1,
0.0,
},
.{
0.621,
-0.647,
1.7,
0.0,
},
.{ 0.0, 0.0, 0.0, 1.0 },
};
rgbaColor = src.sampleNearest(self.outCoord());
yiqaColor = @"M * V"(YIQMatrix, rgbaColor);
yiqaColor[1] = intensity;
yiqaColor[2] = 0.0;
self.dst = @"M * V"(inverseYIQ, yiqaColor);
dst.setPixel(self.outputCoord[0], self.outputCoord[1], self.dst);
}
pub fn outCoord(self: *@This()) @Vector(2, f32) {
return .{ @as(f32, @floatFromInt(self.outputCoord[0])) + 0.5, @as(f32, @floatFromInt(self.outputCoord[1])) + 0.5 };
}
};
}
// kernel instance creation function
pub fn create(input: anytype, output: anytype, params: anytype) Instance(@TypeOf(input), @TypeOf(output), @TypeOf(params)) {
return .{
.input = input,
.output = output,
.params = params,
};
}
// built-in Pixel Bender functions
fn @"M * V"(m1: anytype, v2: anytype) @TypeOf(v2) {
const ar = @typeInfo(@TypeOf(m1)).Array;
var t1: @TypeOf(m1) = undefined;
inline for (m1, 0..) |column, c| {
inline for (0..ar.len) |r| {
t1[r][c] = column[r];
}
}
var result: @TypeOf(v2) = undefined;
inline for (t1, 0..) |column, c| {
result[c] = @reduce(.Add, column * v2);
}
return result;
}
};
pub const Input = KernelInput(u8, kernel);
pub const Output = KernelOutput(u8, kernel);
pub const Parameters = KernelParameters(kernel);
pub fn createOutput(allocator: std.mem.Allocator, width: u32, height: u32, input: Input, params: Parameters) !Output {
return createPartialOutput(allocator, width, height, 0, height, input, params);
}
pub fn createPartialOutput(allocator: std.mem.Allocator, width: u32, height: u32, start: u32, count: u32, input: Input, params: Parameters) !Output {
var output: Output = undefined;
inline for (std.meta.fields(Output)) |field| {
const ImageT = @TypeOf(@field(output, field.name));
@field(output, field.name) = .{
.data = try allocator.alloc(ImageT.Pixel, count * width),
.width = width,
.height = height,
.offset = start * width,
};
}
var instance = kernel.create(input, output, params);
if (@hasDecl(@TypeOf(instance), "evaluateDependents")) {
instance.evaluateDependents();
}
const end = start + count;
instance.outputCoord[1] = start;
while (instance.outputCoord[1] < end) : (instance.outputCoord[1] += 1) {
instance.outputCoord[0] = 0;
while (instance.outputCoord[0] < width) : (instance.outputCoord[0] += 1) {
instance.evaluatePixel();
}
}
return output;
}
const ColorSpace = enum { srgb, @"display-p3" };
pub fn Image(comptime T: type, comptime len: comptime_int, comptime writable: bool) type {
return struct {
pub const Pixel = @Vector(4, T);
pub const FPixel = @Vector(len, f32);
pub const channels = len;
data: if (writable) []Pixel else []const Pixel,
width: u32,
height: u32,
colorSpace: ColorSpace = .srgb,
offset: usize = 0,
fn constrain(v: anytype, min: f32, max: f32) @TypeOf(v) {
const lower: @TypeOf(v) = @splat(min);
const upper: @TypeOf(v) = @splat(max);
const v2 = @select(f32, v > lower, v, lower);
return @select(f32, v2 < upper, v2, upper);
}
fn pbPixelFromFloatPixel(pixel: Pixel) FPixel {
if (len == 4) {
return pixel;
}
const mask: @Vector(len, i32) = switch (len) {
1 => .{0},
2 => .{ 0, 3 },
3 => .{ 0, 1, 2 },
else => @compileError("Unsupported number of channels: " ++ len),
};
return @shuffle(f32, pixel, undefined, mask);
}
fn floatPixelFromPBPixel(pixel: FPixel) Pixel {
if (len == 4) {
return pixel;
}
const alpha: @Vector(1, T) = if (len == 1 or len == 3) .{1} else undefined;
const mask: @Vector(len, i32) = switch (len) {
1 => .{ 0, 0, 0, -1 },
2 => .{ 0, 0, 0, 1 },
3 => .{ 0, 1, 2, -1 },
else => @compileError("Unsupported number of channels: " ++ len),
};
return @shuffle(T, pixel, alpha, mask);
}
fn pbPixelFromIntPixel(pixel: Pixel) FPixel {
const numerator: FPixel = switch (len) {
1 => @floatFromInt(@shuffle(T, pixel, undefined, @Vector(1, i32){0})),
2 => @floatFromInt(@shuffle(T, pixel, undefined, @Vector(2, i32){ 0, 3 })),
3 => @floatFromInt(@shuffle(T, pixel, undefined, @Vector(3, i32){ 0, 1, 2 })),
4 => @floatFromInt(pixel),
else => @compileError("Unsupported number of channels: " ++ len),
};
const denominator: FPixel = @splat(@floatFromInt(std.math.maxInt(T)));
return numerator / denominator;
}
fn intPixelFromPBPixel(pixel: FPixel) Pixel {
const max: f32 = @floatFromInt(std.math.maxInt(T));
const multiplier: FPixel = @splat(max);
const product: FPixel = constrain(pixel * multiplier, 0, max);
const maxAlpha: @Vector(1, f32) = .{std.math.maxInt(T)};
return switch (len) {
1 => @intFromFloat(@shuffle(f32, product, maxAlpha, @Vector(4, i32){ 0, 0, 0, -1 })),
2 => @intFromFloat(@shuffle(f32, product, undefined, @Vector(4, i32){ 0, 0, 0, 1 })),
3 => @intFromFloat(@shuffle(f32, product, maxAlpha, @Vector(4, i32){ 0, 1, 2, -1 })),
4 => @intFromFloat(product),
else => @compileError("Unsupported number of channels: " ++ len),
};
}
fn getPixel(self: @This(), x: u32, y: u32) FPixel {
const index = (y * self.width) + x - self.offset;
const src_pixel = self.data[index];
const pixel: FPixel = switch (@typeInfo(T)) {
.Float => pbPixelFromFloatPixel(src_pixel),
.Int => pbPixelFromIntPixel(src_pixel),
else => @compileError("Unsupported type: " ++ @typeName(T)),
};
return pixel;
}
fn setPixel(self: @This(), x: u32, y: u32, pixel: FPixel) void {
if (comptime !writable) {
return;
}
const index = (y * self.width) + x - self.offset;
const dst_pixel: Pixel = switch (@typeInfo(T)) {
.Float => floatPixelFromPBPixel(pixel),
.Int => intPixelFromPBPixel(pixel),
else => @compileError("Unsupported type: " ++ @typeName(T)),
};
self.data[index] = dst_pixel;
}
fn pixelSize(self: @This()) @Vector(2, f32) {
_ = self;
return .{ 1, 1 };
}
fn pixelAspectRatio(self: @This()) f32 {
_ = self;
return 1;
}
inline fn getPixelAt(self: @This(), coord: @Vector(2, f32)) FPixel {
const left_top: @Vector(2, f32) = .{ 0, 0 };
const bottom_right: @Vector(2, f32) = .{ @floatFromInt(self.width - 1), @floatFromInt(self.height - 1) };
if (@reduce(.And, coord >= left_top) and @reduce(.And, coord <= bottom_right)) {
const ic: @Vector(2, u32) = @intFromFloat(coord);
return self.getPixel(ic[0], ic[1]);
} else {
return @splat(0);
}
}
fn sampleNearest(self: @This(), coord: @Vector(2, f32)) FPixel {
return self.getPixelAt(@floor(coord));
}
fn sampleLinear(self: @This(), coord: @Vector(2, f32)) FPixel {
const c = coord - @as(@Vector(2, f32), @splat(0.5));
const c0 = @floor(c);
const f0 = c - c0;
const f1 = @as(@Vector(2, f32), @splat(1)) - f0;
const w: @Vector(4, f32) = .{
f1[0] * f1[1],
f0[0] * f1[1],
f1[0] * f0[1],
f0[0] * f0[1],
};
const p00 = self.getPixelAt(c0);
const p01 = self.getPixelAt(c0 + @as(@Vector(2, f32), .{ 0, 1 }));
const p10 = self.getPixelAt(c0 + @as(@Vector(2, f32), .{ 1, 0 }));
const p11 = self.getPixelAt(c0 + @as(@Vector(2, f32), .{ 1, 1 }));
var result: FPixel = undefined;
comptime var i = 0;
inline while (i < len) : (i += 1) {
const p: @Vector(4, f32) = .{ p00[i], p10[i], p01[i], p11[i] };
result[i] = @reduce(.Add, p * w);
}
return result;
}
};
}
pub fn KernelInput(comptime T: type, comptime Kernel: type) type {
const input_fields = std.meta.fields(@TypeOf(Kernel.inputImages));
comptime var struct_fields: [input_fields.len]std.builtin.Type.StructField = undefined;
inline for (input_fields, 0..) |field, index| {
const input = @field(Kernel.inputImages, field.name);
const ImageT = Image(T, input.channels, false);
const default_value: ImageT = undefined;
struct_fields[index] = .{
.name = field.name,
.type = ImageT,
.default_value = @ptrCast(&default_value),
.is_comptime = false,
.alignment = @alignOf(ImageT),
};
}
return @Type(.{
.Struct = .{
.layout = .auto,
.fields = &struct_fields,
.decls = &.{},
.is_tuple = false,
},
});
}
pub fn KernelOutput(comptime T: type, comptime Kernel: type) type {
const output_fields = std.meta.fields(@TypeOf(Kernel.outputImages));
comptime var struct_fields: [output_fields.len]std.builtin.Type.StructField = undefined;
inline for (output_fields, 0..) |field, index| {
const output = @field(Kernel.outputImages, field.name);
const ImageT = Image(T, output.channels, true);
const default_value: ImageT = undefined;
struct_fields[index] = .{
.name = field.name,
.type = ImageT,
.default_value = @ptrCast(&default_value),
.is_comptime = false,
.alignment = @alignOf(ImageT),
};
}
return @Type(.{
.Struct = .{
.layout = .auto,
.fields = &struct_fields,
.decls = &.{},
.is_tuple = false,
},
});
}
pub fn KernelParameters(comptime Kernel: type) type {
const param_fields = std.meta.fields(@TypeOf(Kernel.parameters));
comptime var struct_fields: [param_fields.len]std.builtin.Type.StructField = undefined;
inline for (param_fields, 0..) |field, index| {
const param = @field(Kernel.parameters, field.name);
const default_value: ?*const anyopaque = get_def: {
const value: param.type = if (@hasField(@TypeOf(param), "defaultValue"))
param.defaultValue
else switch (@typeInfo(param.type)) {
.Int, .Float => 0,
.Bool => false,
.Vector => @splat(0),
else => @compileError("Unrecognized parameter type: " ++ @typeName(param.type)),
};
break :get_def @ptrCast(&value);
};
struct_fields[index] = .{
.name = field.name,
.type = param.type,
.default_value = default_value,
.is_comptime = false,
.alignment = @alignOf(param.type),
};
}
return @Type(.{
.Struct = .{
.layout = .auto,
.fields = &struct_fields,
.decls = &.{},
.is_tuple = false,
},
});
}
The above code was translated from a Pixel Bender filter using pb2zig. Consult the intro page for an explanation of how it works.
After creating the Zig file, insert the following lines into the image route handler, right after
the call to inputImage.raw().toBuffer()
:
// push data through filter
const { createOutput } = await import(`../zig/${filter}.zig`);
const input = {
src: {
data,
width: info.width,
height: info.height,
}
};
const output = createOutput(info.width, info.height, input, filterParams);
const { dst } = output;
createOutput()
has the follow declaration:
pub fn createOutput(
allocator: std.mem.Allocator,
width: u32,
height: u32,
input: Input,
params: Parameters,
) !Output
allocator
is automatically provided by Zigar. width
and height
come from the object returned
by Sharp. filterParams
is what remains after width
and height
have been taken out from the
params
object, i.e. { intensity: [number] }
.
Input
is a parameterized type:
pub const Input = KernelInput(u8, kernel);
Which expands to:
pub const Input = struct {
src: Image(u8, 4, false);
};
Then further to:
pub const Input = struct {
src: struct {
pub const Pixel = @Vector(4, u8);
pub const FPixel = @Vector(4, f32);
pub const channels = 4;
data: []const Pixel,
width: u32,
height: u32,
colorSpace: ColorSpace = .srgb,
offset: usize = 0,
};
};
So input.src.data
is a slice pointer to four-wide u8
vectors, with each vector representing the
RGBA value of a pixel. Zigar can automatically cast the Buffer
we received from Sharp into the
target type. That's why the initializer for the argument input
is simply:
const input = {
src: {
data,
width: info.width,
height: info.height,
}
};
Like Input
, Output
is a parameterized type. It too can potentially contain multiple images. In
this case (and most cases), there's only one:
pub const Output = struct {
dst: {
pub const Pixel = @Vector(4, u8);
pub const FPixel = @Vector(4, f32);
pub const channels = 4;
data: []Pixel,
width: u32,
height: u32,
colorSpace: ColorSpace = .srgb,
offset: usize = 0,
},
};
dst.data
points to memory allocated from allocator
. To get a raw buffer for Sharp, we can
access dst.data.dataView.buffer
. Sharp also accepts Uint8Array
as raw data, so we do the
following instead:
// place raw data into new image and output it as JPEG
const outputImage = Sharp(dst.data.typedArray, { raw: info });
Restart the server after making the needed changes. You should now see the following in the browser:
Follow the same steps as described in the the hello world example. First change the import statement:
const { createOutput } = await import(`../lib/${filter}.zigar`);
Then create bun-zigar.toml
:
optimize = "ReleaseSmall"
[sourceFiles]
"lib/sepia.zigar" = "zig/sepia.zig"
[[targets]]
platform = "linux"
arch = "x64"
[[targets]]
platform = "linux"
arch = "arm64"
[[targets]]
platform = "linux-musl"
arch = "x64"
[[targets]]
platform = "linux-musl"
arch = "arm64"
And build the libraries:
bunx bun-zigar build
You can find the complete source code for this example here.
Finally, we have an actual server-side app. And it does something cool! A major advantage of using Zig for a task like image processing is that the same code can be deployed on the browser too. Consult the Vite or Webpack version of this example to learn how to do it.
The image filter employed for this example is very rudimentary. Check out pb2zig's project page to see more advanced code.
That's it for now. I hope this tutorial is enough to get you started with using Zigar.