Making the hashing buffer reusable instead of malloc every file

This commit is contained in:
2026-03-08 10:59:13 +01:00
parent c846952cbf
commit 2fc9bf31b0
2 changed files with 9 additions and 7 deletions

View File

@@ -375,7 +375,7 @@ void scan_folder_windows_parallel(const char *base, DirQueue *q) {
}
// ----------------------------- Hashing helpers -----------------------------
static void xxh3_hash_file_stream(const char *path, char *out_hex) {
static void xxh3_hash_file_stream(const char *path, char *out_hex, BYTE *buf) {
// compute XXH3_128 over file. POSIX and Windows use standard reads in this
// helper.
// On Windows try to use overlapped synchronous chunked reads for higher
@@ -391,7 +391,7 @@ static void xxh3_hash_file_stream(const char *path, char *out_hex) {
XXH3_state_t state;
XXH3_128bits_reset(&state);
BYTE *buf = (BYTE *)malloc(READ_BLOCK);
// BYTE *buf = (BYTE *)malloc(READ_BLOCK);
DWORD read = 0;
BOOL ok;
while (ReadFile(hFile, buf, READ_BLOCK, &read, NULL) && read > 0) {
@@ -400,7 +400,7 @@ static void xxh3_hash_file_stream(const char *path, char *out_hex) {
}
h = XXH3_128bits_digest(&state);
CloseHandle(hFile);
free(buf);
// free(buf);
snprintf(out_hex, HASH_STRLEN, "%016llx%016llx", (unsigned long long)h.high64,
(unsigned long long)h.low64);
}
@@ -411,6 +411,7 @@ static DWORD WINAPI hash_worker(LPVOID arg) {
WorkerContext *ctx = (WorkerContext *)arg;
MPMCQueue *q = ctx->queue;
mem_arena *local_arena = ctx->arena;
BYTE *buf = (BYTE *)malloc(READ_BLOCK);
for (;;) {
FileEntry *fe = mpmc_pop(q);
@@ -418,7 +419,7 @@ static DWORD WINAPI hash_worker(LPVOID arg) {
break;
char hash[HASH_STRLEN];
xxh3_hash_file_stream(fe->path, hash);
xxh3_hash_file_stream(fe->path, hash, buf);
char created[32], modified[32];
format_time(fe->created_time, created, sizeof(created));
@@ -440,6 +441,7 @@ static DWORD WINAPI hash_worker(LPVOID arg) {
free(fe->path);
free(fe);
}
free(buf);
return 0;
}