I would expect for C to be way faster than node.js, but in my application it does not.
What I am trying to do?
A simple software shader animation that changes the linux framebuffer:
In C (out of desperation) I have everything in the main:
// anime.c
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <linux/fb.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <stdint.h>
#include <math.h>
#include <string.h>
#define BYTE 8
#define MAX_BYTE 255
int main()
{
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
int fb_fd = open("/dev/fb0", O_RDWR);
if (fb_fd == -1)
{
perror("Error: cannot open framebuffer device");
exit(1);
}
if (ioctl(fb_fd, FBIOGET_VSCREENINFO, &vinfo) == -1)
{
perror("Error reading variable information");
exit(2);
}
int w = vinfo.xres;
int h = vinfo.yres;
int color_channels = (vinfo.bits_per_pixel / BYTE);
long screen_size = w * h * color_channels;
uint8_t *buffer = (uint8_t *)mmap(0, screen_size, PROT_READ | PROT_WRITE, MAP_SHARED, fb_fd, 0);
double t = 0;
while (t < 10)
{
int k;
for (k = 0; k < screen_size; k += color_channels)
{
uint32_t i = k / (color_channels * w);
uint32_t j = (k / color_channels) % w;
uint32_t x = j;
uint32_t y = h - 1 - i;
double px = (double)x * t / w;
double py = (double)y * t / h;
uint8_t r = (uint8_t)(MAX_BYTE * fmod(px, 1.0));
uint8_t g = (uint8_t)(MAX_BYTE * fmod(py, 1.0));
uint8_t b = (uint8_t)0;
buffer[k] = b;
buffer[k + 1] = g;
buffer[k + 2] = r;
buffer[k + 3] = MAX_BYTE;
}
t = t + 0.05;
usleep(50);
}
munmap(buffer, screen_size);
close(fb_fd);
return 0;
}
I compile with gcc anime.c -o anime -lm
. And run it sudo ./anime
In node.js:
// anime.mjs
import fs from 'node:fs';
const MAX_BYTE = 255;
const framebufferPath = '/dev/fb0';
const Screen = () => {
const ans = {};
ans.width = 2560;
ans.height = 1440;
ans.bytesPerPixel = 4;
ans.channels = 4;
ans.size = ans.width * ans.height * ans.bytesPerPixel;
ans.buffer = Buffer.allocUnsafe(ans.size);
ans.fb = fs.openSync(framebufferPath, 'r+');
ans.map = (lambda, time) => {
const channels = ans.bytesPerPixel;
const w = ans.width;
const h = ans.height;
for (let k = 0; k < ans.size; k += channels) {
const i = k / (channels * w);
const j = (k / channels) % w;
const x = j;
const y = h - 1 - i;
const color = lambda(x, y, time);
if (!color) continue;
ans.buffer[k] = MAX_BYTE * color[2];
ans.buffer[k + 1] = MAX_BYTE * color[1];
ans.buffer[k + 2] = MAX_BYTE * color[0];
ans.buffer[k + 3] = MAX_BYTE;
}
return ans.paint();
}
ans.paint = () => {
fs.writeSync(ans.fb, ans.buffer, 0, ans.size, 0);
return ans;
}
return ans;
}
const screen = Screen();
const render = (x, y, t) => {
const px = (x * t) / screen.width;
const py = (y * t) / screen.height;
return [px % 1, py % 1, 0];
}
const play = ({ oldTime, time }) => {
const newTime = new Date().getTime();
const dt = (newTime - oldTime) * 1e-3;
screen.map(render, time);
if(time > 10) return;
setTimeout(() => play({ oldTime: newTime, time: time + dt }));
}
play({ oldTime: new Date().getTime(), time: 0 });
I run it using sudo node anime.mjs
.
The performance in C is worse than in node, what am I missing ?