99 lines
3.7 KiB
Diff
99 lines
3.7 KiB
Diff
|
From 360af4cf6f18469df97c11af4cd5696e0ca8b3ef Mon Sep 17 00:00:00 2001
|
||
|
From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
|
||
|
Date: Thu, 9 Oct 2014 22:44:29 -0400
|
||
|
Subject: [PATCH] sd-journal: do not reset sd_j_enumerate_unique position on
|
||
|
error
|
||
|
|
||
|
systemctl would call sd_j_enumerate_unique() interleaved with
|
||
|
sd_j_next(). But the latter can remove a file if it detects an
|
||
|
error in it. In those circumstances sd_j_enumerate_unique would
|
||
|
restart with the first file in hashmap. With many corrupted files
|
||
|
sd_j_enumerate_unique might iterate over the list multiple times.
|
||
|
|
||
|
Avoid this by jumping to the next file in unique list if possible,
|
||
|
or setting a flag that tells sd_j_enumerate_unique that it is done
|
||
|
otherwise.
|
||
|
---
|
||
|
src/journal/journal-internal.h | 4 ++++
|
||
|
src/journal/sd-journal.c | 18 ++++++++++++------
|
||
|
2 files changed, 16 insertions(+), 6 deletions(-)
|
||
|
|
||
|
diff --git a/src/journal/journal-internal.h b/src/journal/journal-internal.h
|
||
|
index 2f1f7fc771..e591fb61f8 100644
|
||
|
--- a/src/journal/journal-internal.h
|
||
|
+++ b/src/journal/journal-internal.h
|
||
|
@@ -124,6 +124,10 @@ struct sd_journal {
|
||
|
|
||
|
bool on_network;
|
||
|
bool no_new_files;
|
||
|
+ bool unique_file_lost; /* File we were iterating over got
|
||
|
+ removed, and there were no more
|
||
|
+ files, so sd_j_enumerate_unique
|
||
|
+ will return a value equal to 0. */
|
||
|
|
||
|
size_t data_threshold;
|
||
|
|
||
|
diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c
|
||
|
index 1fc9f01d0a..b72a0867e7 100644
|
||
|
--- a/src/journal/sd-journal.c
|
||
|
+++ b/src/journal/sd-journal.c
|
||
|
@@ -1375,8 +1375,11 @@ static void remove_file_real(sd_journal *j, JournalFile *f) {
|
||
|
}
|
||
|
|
||
|
if (j->unique_file == f) {
|
||
|
- j->unique_file = NULL;
|
||
|
+ /* Jump to the next unique_file or NULL if that one was last */
|
||
|
+ j->unique_file = hashmap_next(j->files, j->unique_file->path);
|
||
|
j->unique_offset = 0;
|
||
|
+ if (!j->unique_file)
|
||
|
+ j->unique_file_lost = true;
|
||
|
}
|
||
|
|
||
|
journal_file_close(f);
|
||
|
@@ -2490,6 +2493,7 @@ _public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
|
||
|
j->unique_field = f;
|
||
|
j->unique_file = NULL;
|
||
|
j->unique_offset = 0;
|
||
|
+ j->unique_file_lost = false;
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
@@ -2506,9 +2510,13 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
|
||
|
k = strlen(j->unique_field);
|
||
|
|
||
|
if (!j->unique_file) {
|
||
|
+ if (j->unique_file_lost)
|
||
|
+ return 0;
|
||
|
+
|
||
|
j->unique_file = hashmap_first(j->files);
|
||
|
if (!j->unique_file)
|
||
|
return 0;
|
||
|
+
|
||
|
j->unique_offset = 0;
|
||
|
}
|
||
|
|
||
|
@@ -2538,13 +2546,10 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
|
||
|
|
||
|
/* We reached the end of the list? Then start again, with the next file */
|
||
|
if (j->unique_offset == 0) {
|
||
|
- JournalFile *n;
|
||
|
-
|
||
|
- n = hashmap_next(j->files, j->unique_file->path);
|
||
|
- if (!n)
|
||
|
+ j->unique_file = hashmap_next(j->files, j->unique_file->path);
|
||
|
+ if (!j->unique_file)
|
||
|
return 0;
|
||
|
|
||
|
- j->unique_file = n;
|
||
|
continue;
|
||
|
}
|
||
|
|
||
|
@@ -2632,6 +2637,7 @@ _public_ void sd_journal_restart_unique(sd_journal *j) {
|
||
|
|
||
|
j->unique_file = NULL;
|
||
|
j->unique_offset = 0;
|
||
|
+ j->unique_file_lost = false;
|
||
|
}
|
||
|
|
||
|
_public_ int sd_journal_reliable_fd(sd_journal *j) {
|