import libdnf-0.63.0-11.el8

This commit is contained in:
CentOS Sources 2022-09-27 16:49:13 -04:00 committed by Stepan Oksanichenko
parent 76bf697c75
commit 030eb8f1a1
16 changed files with 1479 additions and 2 deletions

View File

@ -0,0 +1,107 @@
From 7d8f9cfcdf7725fef2c99ecb2dedcdff1e9506d7 Mon Sep 17 00:00:00 2001
From: Jaroslav Rohel <jrohel@redhat.com>
Date: Wed, 13 Apr 2022 12:26:10 +0200
Subject: [PATCH 26/34] context: Substitute all repository config options
(RhBug:2076853)
It also solves the problem: Substitution of variables in `baseurl`
does not work in microdnf and PackageKit unless `metalink` or `mirrorlist`
is set at the same time.
---
libdnf/dnf-repo.cpp | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/libdnf/dnf-repo.cpp b/libdnf/dnf-repo.cpp
index 710045fb..9d42e3e3 100644
--- a/libdnf/dnf-repo.cpp
+++ b/libdnf/dnf-repo.cpp
@@ -83,6 +83,7 @@ typedef struct
LrHandle *repo_handle;
LrResult *repo_result;
LrUrlVars *urlvars;
+ bool unit_test_mode; /* ugly hack for unit tests */
} DnfRepoPrivate;
G_DEFINE_TYPE_WITH_PRIVATE(DnfRepo, dnf_repo, G_TYPE_OBJECT)
@@ -847,8 +848,11 @@ dnf_repo_conf_reset(libdnf::ConfigRepo &config)
/* Loads repository configuration from GKeyFile */
static void
-dnf_repo_conf_from_gkeyfile(libdnf::ConfigRepo &config, const char *repoId, GKeyFile *gkeyFile)
+dnf_repo_conf_from_gkeyfile(DnfRepo *repo, const char *repoId, GKeyFile *gkeyFile)
{
+ DnfRepoPrivate *priv = GET_PRIVATE(repo);
+ auto & config = *priv->repo->getConfig();
+
// Reset to the initial state before reloading the configuration.
dnf_repo_conf_reset(config);
@@ -883,20 +887,31 @@ dnf_repo_conf_from_gkeyfile(libdnf::ConfigRepo &config, const char *repoId, GKey
// list can be ['value1', 'value2, value3'] therefore we first join
// to have 'value1, value2, value3'
g_autofree gchar * tmp_strval = g_strjoinv(",", list);
+
+ // Substitute vars.
+ g_autofree gchar *subst_value = dnf_repo_substitute(repo, tmp_strval);
+
+ if (strcmp(key, "baseurl") == 0 && strstr(tmp_strval, "file://$testdatadir") != NULL) {
+ priv->unit_test_mode = true;
+ }
+
try {
- optionItem.newString(libdnf::Option::Priority::REPOCONFIG, tmp_strval);
+ optionItem.newString(libdnf::Option::Priority::REPOCONFIG, subst_value);
} catch (const std::exception & ex) {
- g_debug("Invalid configuration value: %s = %s in %s; %s", key, value.c_str(), repoId, ex.what());
+ g_debug("Invalid configuration value: %s = %s in %s; %s", key, subst_value, repoId, ex.what());
}
}
} else {
-
// process other (non list) options
+
+ // Substitute vars.
+ g_autofree gchar *subst_value = dnf_repo_substitute(repo, value.c_str());
+
try {
- optionItem.newString(libdnf::Option::Priority::REPOCONFIG, value);
+ optionItem.newString(libdnf::Option::Priority::REPOCONFIG, subst_value);
} catch (const std::exception & ex) {
- g_debug("Invalid configuration value: %s = %s in %s; %s", key, value.c_str(), repoId, ex.what());
+ g_debug("Invalid configuration value: %s = %s in %s; %s", key, subst_value, repoId, ex.what());
}
}
@@ -950,7 +965,7 @@ dnf_repo_set_keyfile_data(DnfRepo *repo, gboolean reloadFromGKeyFile, GError **e
// Reload repository configuration from keyfile.
if (reloadFromGKeyFile) {
- dnf_repo_conf_from_gkeyfile(*conf, repoId, priv->keyfile);
+ dnf_repo_conf_from_gkeyfile(repo, repoId, priv->keyfile);
dnf_repo_apply_setopts(*conf, repoId);
}
@@ -996,8 +1011,9 @@ dnf_repo_set_keyfile_data(DnfRepo *repo, gboolean reloadFromGKeyFile, GError **e
g_autofree gchar *url = NULL;
url = lr_prepend_url_protocol(baseurls[0]);
if (url != NULL && strncasecmp(url, "file://", 7) == 0) {
- if (g_strstr_len(url, -1, "$testdatadir") == NULL)
+ if (!priv->unit_test_mode) {
priv->kind = DNF_REPO_KIND_LOCAL;
+ }
g_free(priv->location);
g_free(priv->keyring);
priv->location = dnf_repo_substitute(repo, url + 7);
@@ -1224,7 +1240,7 @@ dnf_repo_setup(DnfRepo *repo, GError **error) try
auto repoId = priv->repo->getId().c_str();
auto conf = priv->repo->getConfig();
- dnf_repo_conf_from_gkeyfile(*conf, repoId, priv->keyfile);
+ dnf_repo_conf_from_gkeyfile(repo, repoId, priv->keyfile);
dnf_repo_apply_setopts(*conf, repoId);
auto sslverify = conf->sslverify().getValue();
--
2.31.1

View File

@ -0,0 +1,50 @@
From 074ca4cf643c79b8ec3db89a7fd5580ba387eb4d Mon Sep 17 00:00:00 2001
From: Jaroslav Rohel <jrohel@redhat.com>
Date: Wed, 20 Apr 2022 08:22:30 +0200
Subject: [PATCH 27/34] Use environment variable in unittest instead of ugly
hack in libdnf
Libdnf contains hacks for unit tests. This removes one hack.
---
libdnf/dnf-repo.cpp | 3 ---
tests/libdnf/dnf-self-test.c | 3 +++
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/libdnf/dnf-repo.cpp b/libdnf/dnf-repo.cpp
index 9d42e3e3..c015d7fd 100644
--- a/libdnf/dnf-repo.cpp
+++ b/libdnf/dnf-repo.cpp
@@ -1191,7 +1191,6 @@ dnf_repo_setup(DnfRepo *repo, GError **error) try
DnfRepoEnabled enabled = DNF_REPO_ENABLED_NONE;
g_autofree gchar *basearch = NULL;
g_autofree gchar *release = NULL;
- g_autofree gchar *testdatadir = NULL;
basearch = g_key_file_get_string(priv->keyfile, "general", "arch", NULL);
if (basearch == NULL)
@@ -1230,8 +1229,6 @@ dnf_repo_setup(DnfRepo *repo, GError **error) try
for (const auto & item : libdnf::dnf_context_get_vars(priv->context))
priv->urlvars = lr_urlvars_set(priv->urlvars, item.first.c_str(), item.second.c_str());
- testdatadir = dnf_realpath(TESTDATADIR);
- priv->urlvars = lr_urlvars_set(priv->urlvars, "testdatadir", testdatadir);
if (!lr_handle_setopt(priv->repo_handle, error, LRO_VARSUB, priv->urlvars))
return FALSE;
if (!lr_handle_setopt(priv->repo_handle, error, LRO_GNUPGHOMEDIR, priv->keyring))
diff --git a/tests/libdnf/dnf-self-test.c b/tests/libdnf/dnf-self-test.c
index 52958371..906f0e21 100644
--- a/tests/libdnf/dnf-self-test.c
+++ b/tests/libdnf/dnf-self-test.c
@@ -1225,6 +1225,9 @@ main(int argc, char **argv)
g_log_set_fatal_mask(NULL, G_LOG_LEVEL_ERROR | G_LOG_LEVEL_CRITICAL);
g_log_set_always_fatal (G_LOG_FATAL_MASK);
+ /* Sets a variable to replace in repository configurations. */
+ g_setenv("DNF_VAR_testdatadir", TESTDATADIR, TRUE);
+
/* tests go here */
g_test_add_func("/libdnf/repo_loader{gpg-asc}", dnf_repo_loader_gpg_asc_func);
g_test_add_func("/libdnf/repo_loader{gpg-wrong-asc}", dnf_repo_loader_gpg_wrong_asc_func);
--
2.31.1

View File

@ -0,0 +1,169 @@
From 983aeea57d75494fd4ea2ff2903f966136278c15 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Wed, 9 Feb 2022 13:17:00 +0100
Subject: [PATCH 28/34] Add private API for filling, reading and verifying new
dnf solv userdata
---
libdnf/hy-iutil-private.hpp | 24 +++++++++
libdnf/hy-iutil.cpp | 102 ++++++++++++++++++++++++++++++++++++
2 files changed, 126 insertions(+)
diff --git a/libdnf/hy-iutil-private.hpp b/libdnf/hy-iutil-private.hpp
index e07b1b51..d498c032 100644
--- a/libdnf/hy-iutil-private.hpp
+++ b/libdnf/hy-iutil-private.hpp
@@ -24,6 +24,30 @@
#include "hy-iutil.h"
#include "hy-types.h"
#include "sack/packageset.hpp"
+#include <array>
+#include <utility>
+
+// Use 8 bytes for libsolv version (API: solv_toolversion)
+// to be future proof even though it currently is "1.2"
+static constexpr const size_t solv_userdata_solv_toolversion_size{8};
+static constexpr const std::array<char, 4> solv_userdata_magic{'\0', 'd', 'n', 'f'};
+static constexpr const std::array<char, 4> solv_userdata_dnf_version{'\0', '1', '.', '0'};
+
+static constexpr const int solv_userdata_size = solv_userdata_solv_toolversion_size + \
+ solv_userdata_magic.size() + \
+ solv_userdata_dnf_version.size() + \
+ CHKSUM_BYTES;
+
+struct SolvUserdata {
+ char dnf_magic[solv_userdata_magic.size()];
+ char dnf_version[solv_userdata_dnf_version.size()];
+ char libsolv_version[solv_userdata_solv_toolversion_size];
+ unsigned char checksum[CHKSUM_BYTES];
+}__attribute__((packed)); ;
+
+int solv_userdata_fill(SolvUserdata *solv_userdata, const unsigned char *checksum, GError** error);
+std::unique_ptr<SolvUserdata> solv_userdata_read(FILE *fp);
+int solv_userdata_verify(const SolvUserdata *solv_userdata, const unsigned char *checksum);
/* crypto utils */
int checksum_cmp(const unsigned char *cs1, const unsigned char *cs2);
diff --git a/libdnf/hy-iutil.cpp b/libdnf/hy-iutil.cpp
index 2af13197..f81ca52f 100644
--- a/libdnf/hy-iutil.cpp
+++ b/libdnf/hy-iutil.cpp
@@ -43,6 +43,7 @@ extern "C" {
#include <solv/evr.h>
#include <solv/solver.h>
#include <solv/solverdebug.h>
+#include <solv/repo_solv.h>
#include <solv/util.h>
#include <solv/pool_parserpmrichdep.h>
}
@@ -182,6 +183,107 @@ int checksum_write(const unsigned char *cs, FILE *fp)
return 0;
}
+static std::array<char, solv_userdata_solv_toolversion_size>
+get_padded_solv_toolversion()
+{
+ std::array<char, solv_userdata_solv_toolversion_size> padded_solv_toolversion{};
+ std::string solv_ver_str{solv_toolversion};
+ std::copy(solv_ver_str.rbegin(), solv_ver_str.rend(), padded_solv_toolversion.rbegin());
+
+ return padded_solv_toolversion;
+}
+
+int
+solv_userdata_fill(SolvUserdata *solv_userdata, const unsigned char *checksum, GError** error)
+{
+ if (strlen(solv_toolversion) > solv_userdata_solv_toolversion_size) {
+ g_set_error(error, DNF_ERROR, DNF_ERROR_INTERNAL_ERROR,
+ _("Libsolv's solv_toolversion is: %zu long but we expect max of: %zu"),
+ strlen(solv_toolversion), solv_userdata_solv_toolversion_size);
+ return 1;
+ }
+
+ // copy dnf solv file magic
+ memcpy(solv_userdata->dnf_magic, solv_userdata_magic.data(), solv_userdata_magic.size());
+
+ // copy dnf solv file version
+ memcpy(solv_userdata->dnf_version, solv_userdata_dnf_version.data(), solv_userdata_dnf_version.size());
+
+ // copy libsolv solv file version
+ memcpy(solv_userdata->libsolv_version, get_padded_solv_toolversion().data(), solv_userdata_solv_toolversion_size);
+
+ // copy checksum
+ memcpy(solv_userdata->checksum, checksum, CHKSUM_BYTES);
+
+ return 0;
+}
+
+
+std::unique_ptr<SolvUserdata>
+solv_userdata_read(FILE *fp)
+{
+ unsigned char *dnf_solvfile_userdata_read = NULL;
+ int dnf_solvfile_userdata_len_read;
+ if (!fp) {
+ return nullptr;
+ }
+
+ int ret_code = solv_read_userdata(fp, &dnf_solvfile_userdata_read, &dnf_solvfile_userdata_len_read);
+ // The userdata layout has to match our struct exactly so we can just cast the memory
+ // allocated by libsolv
+ std::unique_ptr<SolvUserdata> uniq_userdata(reinterpret_cast<SolvUserdata *>(dnf_solvfile_userdata_read));
+ if(ret_code) {
+ g_warning("Failed to read solv userdata: solv_read_userdata returned: %i", ret_code);
+ return nullptr;
+ }
+
+ if (dnf_solvfile_userdata_len_read != solv_userdata_size) {
+ g_warning("Solv userdata length mismatch, read: %i vs expected: %i",
+ dnf_solvfile_userdata_len_read, solv_userdata_size);
+ return nullptr;
+ }
+
+ return uniq_userdata;
+}
+
+gboolean
+solv_userdata_verify(const SolvUserdata *solv_userdata, const unsigned char *checksum)
+{
+ // check dnf solvfile magic bytes
+ if (memcmp(solv_userdata->dnf_magic, solv_userdata_magic.data(), solv_userdata_magic.size()) != 0) {
+ // This is not dnf header do not read after it
+ g_warning("magic bytes don't match, read: %s vs. dnf solvfile magic: %s",
+ solv_userdata->dnf_magic, solv_userdata_magic.data());
+ return FALSE;
+ }
+
+ // check dnf solvfile version
+ if (memcmp(solv_userdata->dnf_version, solv_userdata_dnf_version.data(), solv_userdata_dnf_version.size()) != 0) {
+ // Mismatching dnf solvfile version -> we need to regenerate
+ g_warning("dnf solvfile version doesn't match, read: %s vs. dnf solvfile version: %s",
+ solv_userdata->dnf_version, solv_userdata_dnf_version.data());
+ return FALSE;
+ }
+
+ // check libsolv solvfile version
+ if (memcmp(solv_userdata->libsolv_version, get_padded_solv_toolversion().data(), solv_userdata_solv_toolversion_size) != 0) {
+ // Mismatching libsolv solvfile version -> we need to regenerate
+ g_warning("libsolv solvfile version doesn't match, read: %s vs. libsolv version: %s",
+ solv_userdata->libsolv_version, solv_toolversion);
+ return FALSE;
+ }
+
+ // check solvfile checksum
+ if (checksum_cmp(solv_userdata->checksum, checksum)) {
+ // Mismatching solvfile checksum -> we need to regenerate
+ g_debug("solvfile checksum doesn't match, read: %s vs. repomd checksum: %s",
+ solv_userdata->checksum, checksum);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
int
checksum_type2length(int type)
{
--
2.31.1

View File

@ -0,0 +1,417 @@
From 465a6a59279bd7fa2680c626ca0f10c059276668 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Wed, 9 Feb 2022 13:18:41 +0100
Subject: [PATCH 29/34] Use dnf solv userdata to check versions and checksum
(RhBug:2027445)
Remove unused functions for checksums
= changelog =
msg: Write and check versions and checksums for solvfile cache by using new dnf solvfile userdata (RhBug:2027445)
It is not possible to use old cache files, therefore cache regeneration is triggered automatically.
type: bugfix
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2027445
---
libdnf/dnf-sack.cpp | 254 ++++++++++++++++++++++--------------
libdnf/hy-iutil-private.hpp | 2 -
libdnf/hy-iutil.cpp | 20 ---
3 files changed, 156 insertions(+), 120 deletions(-)
diff --git a/libdnf/dnf-sack.cpp b/libdnf/dnf-sack.cpp
index b9baeaef..61f4807c 100644
--- a/libdnf/dnf-sack.cpp
+++ b/libdnf/dnf-sack.cpp
@@ -225,17 +225,39 @@ dnf_sack_new(void)
return DNF_SACK(g_object_new(DNF_TYPE_SACK, NULL));
}
-static int
-can_use_repomd_cache(FILE *fp_solv, unsigned char cs_repomd[CHKSUM_BYTES])
-{
- unsigned char cs_cache[CHKSUM_BYTES];
-
- if (fp_solv &&
- !checksum_read(cs_cache, fp_solv) &&
- !checksum_cmp(cs_cache, cs_repomd))
- return 1;
+// Try to load cached solv file into repo otherwise return FALSE
+static gboolean
+try_to_use_cached_solvfile(const char *path, Repo *repo, int flags, const unsigned char *checksum, GError **err){
+ FILE *fp_cache = fopen(path, "r");
+ if (!fp_cache) {
+ // Missing cache files (ENOENT) are not an error and can even be expected in some cases
+ // (such as when repo doesn't have updateinfo/prestodelta metadata).
+ // Use g_debug in order not to pollute the log by default with such entries.
+ if (errno == ENOENT) {
+ g_debug("Failed to open solvfile cache: %s: %s", path, strerror(errno));
+ } else {
+ g_warning("Failed to open solvfile cache: %s: %s", path, strerror(errno));
+ }
+ return FALSE;
+ }
+ std::unique_ptr<SolvUserdata> solv_userdata = solv_userdata_read(fp_cache);
+ gboolean ret = TRUE;
+ if (solv_userdata && solv_userdata_verify(solv_userdata.get(), checksum)) {
+ // after reading the header rewind to the begining
+ fseek(fp_cache, 0, SEEK_SET);
+ if (repo_add_solv(repo, fp_cache, flags)) {
+ g_set_error (err,
+ DNF_ERROR,
+ DNF_ERROR_INTERNAL_ERROR,
+ _("repo_add_solv() has failed."));
+ ret = FALSE;
+ }
+ } else {
+ ret = FALSE;
+ }
- return 0;
+ fclose(fp_cache);
+ return ret;
}
void
@@ -375,33 +397,27 @@ load_ext(DnfSack *sack, HyRepo hrepo, _hy_repo_repodata which_repodata,
gboolean done = FALSE;
char *fn_cache = dnf_sack_give_cache_fn(sack, name, suffix);
- fp = fopen(fn_cache, "r");
assert(libdnf::repoGetImpl(hrepo)->checksum);
- if (can_use_repomd_cache(fp, libdnf::repoGetImpl(hrepo)->checksum)) {
- int flags = 0;
- /* the updateinfo is not a real extension */
- if (which_repodata != _HY_REPODATA_UPDATEINFO)
- flags |= REPO_EXTEND_SOLVABLES;
- /* do not pollute the main pool with directory component ids */
- if (which_repodata == _HY_REPODATA_FILENAMES || which_repodata == _HY_REPODATA_OTHER)
- flags |= REPO_LOCALPOOL;
- done = TRUE;
+
+ int flags = 0;
+ /* the updateinfo is not a real extension */
+ if (which_repodata != _HY_REPODATA_UPDATEINFO)
+ flags |= REPO_EXTEND_SOLVABLES;
+ /* do not pollute the main pool with directory component ids */
+ if (which_repodata == _HY_REPODATA_FILENAMES || which_repodata == _HY_REPODATA_OTHER)
+ flags |= REPO_LOCALPOOL;
+ if (try_to_use_cached_solvfile(fn_cache, repo, flags, libdnf::repoGetImpl(hrepo)->checksum, error)) {
g_debug("%s: using cache file: %s", __func__, fn_cache);
- ret = repo_add_solv(repo, fp, flags);
- if (ret) {
- g_set_error_literal (error,
- DNF_ERROR,
- DNF_ERROR_INTERNAL_ERROR,
- _("failed to add solv"));
- return FALSE;
- } else {
- repo_update_state(hrepo, which_repodata, _HY_LOADED_CACHE);
- repo_set_repodata(hrepo, which_repodata, repo->nrepodata - 1);
- }
+ done = TRUE;
+ repo_update_state(hrepo, which_repodata, _HY_LOADED_CACHE);
+ repo_set_repodata(hrepo, which_repodata, repo->nrepodata - 1);
}
+ if (error && *error) {
+ g_prefix_error(error, _("Loading extension cache %s (%d) failed: "), fn_cache, which_repodata);
+ return FALSE;
+ }
+
g_free(fn_cache);
- if (fp)
- fclose(fp);
if (done)
return TRUE;
@@ -514,35 +530,53 @@ write_main(DnfSack *sack, HyRepo hrepo, int switchtosolv, GError **error)
strerror(errno));
goto done;
}
- rc = repo_write(repo, fp);
- rc |= checksum_write(repoImpl->checksum, fp);
- rc |= fclose(fp);
+
+ SolvUserdata solv_userdata;
+ if (solv_userdata_fill(&solv_userdata, repoImpl->checksum, error)) {
+ ret = FALSE;
+ fclose(fp);
+ goto done;
+ }
+
+ Repowriter *writer = repowriter_create(repo);
+ repowriter_set_userdata(writer, &solv_userdata, solv_userdata_size);
+ rc = repowriter_write(writer, fp);
+ repowriter_free(writer);
if (rc) {
+ ret = FALSE;
+ fclose(fp);
+ g_set_error(error,
+ DNF_ERROR,
+ DNF_ERROR_INTERNAL_ERROR,
+ _("While writing primary cache %s repowriter write failed: %i, error: %s"),
+ tmp_fn_templ, rc, pool_errstr(repo->pool));
+ goto done;
+ }
+
+ if (fclose(fp)) {
ret = FALSE;
g_set_error (error,
DNF_ERROR,
DNF_ERROR_FILE_INVALID,
- _("write_main() failed writing data: %i"), rc);
+ _("Failed closing tmp file %s: %s"),
+ tmp_fn_templ, strerror(errno));
goto done;
}
}
if (switchtosolv && repo_is_one_piece(repo)) {
+ repo_empty(repo, 1);
/* switch over to written solv file activate paging */
- FILE *fp = fopen(tmp_fn_templ, "r");
- if (fp) {
- repo_empty(repo, 1);
- rc = repo_add_solv(repo, fp, 0);
- fclose(fp);
- if (rc) {
- /* this is pretty fatal */
- ret = FALSE;
- g_set_error_literal (error,
- DNF_ERROR,
- DNF_ERROR_FILE_INVALID,
- _("write_main() failed to re-load "
- "written solv file"));
- goto done;
- }
+ gboolean loaded = try_to_use_cached_solvfile(tmp_fn_templ, repo, 0, repoImpl->checksum, error);
+ if (error && *error) {
+ g_prefix_error(error, _("Failed to use newly written primary cache: %s: "), tmp_fn_templ);
+ ret = FALSE;
+ goto done;
+ }
+ if (!loaded) {
+ g_set_error(error, DNF_ERROR, DNF_ERROR_INTERNAL_ERROR,
+ _("Failed to use newly written primary cache: %s"), tmp_fn_templ);
+ ret = FALSE;
+ goto done;
}
}
@@ -569,20 +603,6 @@ write_ext_updateinfo_filter(Repo *repo, Repokey *key, void *kfdata)
return repo_write_stdkeyfilter(repo, key, 0);
}
-static int
-write_ext_updateinfo(HyRepo hrepo, Repodata *data, FILE *fp)
-{
- auto repoImpl = libdnf::repoGetImpl(hrepo);
- Repo *repo = repoImpl->libsolvRepo;
- int oldstart = repo->start;
- repo->start = repoImpl->main_end;
- repo->nsolvables -= repoImpl->main_nsolvables;
- int res = repo_write_filtered(repo, fp, write_ext_updateinfo_filter, data, 0);
- repo->start = oldstart;
- repo->nsolvables += repoImpl->main_nsolvables;
- return res;
-}
-
static gboolean
write_ext(DnfSack *sack, HyRepo hrepo, _hy_repo_repodata which_repodata,
const char *suffix, GError **error)
@@ -611,37 +631,78 @@ write_ext(DnfSack *sack, HyRepo hrepo, _hy_repo_repodata which_repodata,
FILE *fp = fdopen(tmp_fd, "w+");
g_debug("%s: storing %s to: %s", __func__, repo->name, tmp_fn_templ);
- if (which_repodata != _HY_REPODATA_UPDATEINFO)
- ret |= repodata_write(data, fp);
- else
- ret |= write_ext_updateinfo(hrepo, data, fp);
- ret |= checksum_write(repoImpl->checksum, fp);
- ret |= fclose(fp);
+
+ SolvUserdata solv_userdata;
+ if (solv_userdata_fill(&solv_userdata, repoImpl->checksum, error)) {
+ fclose(fp);
+ success = FALSE;
+ goto done;
+ }
+
+ Repowriter *writer = repowriter_create(repo);
+ repowriter_set_userdata(writer, &solv_userdata, solv_userdata_size);
+ if (which_repodata != _HY_REPODATA_UPDATEINFO) {
+ repowriter_set_repodatarange(writer, data->repodataid, data->repodataid + 1);
+ repowriter_set_flags(writer, REPOWRITER_NO_STORAGE_SOLVABLE);
+ ret = repowriter_write(writer, fp);
+ } else {
+ // write only updateinfo repodata
+ int oldstart = repo->start;
+ repo->start = repoImpl->main_end;
+ repo->nsolvables -= repoImpl->main_nsolvables;
+ repowriter_set_flags(writer, REPOWRITER_LEGACY);
+ repowriter_set_keyfilter(writer, write_ext_updateinfo_filter, data);
+ repowriter_set_keyqueue(writer, 0);
+ ret = repowriter_write(writer, fp);
+ repo->start = oldstart;
+ repo->nsolvables += repoImpl->main_nsolvables;
+ }
+ repowriter_free(writer);
if (ret) {
+ success = FALSE;
+ fclose(fp);
+ g_set_error (error,
+ DNF_ERROR,
+ DNF_ERROR_INTERNAL_ERROR,
+ _("While writing extension cache %s (%d): repowriter write failed: %i, error: %s"),
+ tmp_fn_templ, which_repodata, ret, pool_errstr(repo->pool));
+ goto done;
+ }
+
+ if (fclose(fp)) {
success = FALSE;
g_set_error (error,
DNF_ERROR,
- DNF_ERROR_FAILED,
- _("write_ext(%1$d) has failed: %2$d"),
- which_repodata, ret);
+ DNF_ERROR_FILE_INVALID,
+ _("While writing extension cache (%d): cannot close temporary file: %s"),
+ which_repodata, tmp_fn_templ);
goto done;
}
}
if (repo_is_one_piece(repo) && which_repodata != _HY_REPODATA_UPDATEINFO) {
/* switch over to written solv file activate paging */
- FILE *fp = fopen(tmp_fn_templ, "r");
- if (fp) {
- int flags = REPO_USE_LOADING | REPO_EXTEND_SOLVABLES;
- /* do not pollute the main pool with directory component ids */
- if (which_repodata == _HY_REPODATA_FILENAMES || which_repodata == _HY_REPODATA_OTHER)
- flags |= REPO_LOCALPOOL;
- repodata_extend_block(data, repo->start, repo->end - repo->start);
- data->state = REPODATA_LOADING;
- repo_add_solv(repo, fp, flags);
- data->state = REPODATA_AVAILABLE;
- fclose(fp);
+ int flags = REPO_USE_LOADING | REPO_EXTEND_SOLVABLES;
+ /* do not pollute the main pool with directory component ids */
+ if (which_repodata == _HY_REPODATA_FILENAMES || which_repodata == _HY_REPODATA_OTHER)
+ flags |= REPO_LOCALPOOL;
+ repodata_extend_block(data, repo->start, repo->end - repo->start);
+ data->state = REPODATA_LOADING;
+ int loaded = try_to_use_cached_solvfile(tmp_fn_templ, repo, flags, repoImpl->checksum, error);
+ if (error && *error) {
+ g_prefix_error(error, _("Failed to use newly written extension cache: %s (%d): "),
+ tmp_fn_templ, which_repodata);
+ success = FALSE;
+ goto done;
+ }
+ if (!loaded) {
+ g_set_error(error, DNF_ERROR, DNF_ERROR_INTERNAL_ERROR,
+ _("Failed to use newly written extension cache: %s (%d)"), tmp_fn_templ, which_repodata);
+ success = FALSE;
+ goto done;
}
+
+ data->state = REPODATA_AVAILABLE;
}
if (!mv(tmp_fn_templ, fn, error)) {
@@ -672,7 +733,7 @@ load_yum_repo(DnfSack *sack, HyRepo hrepo, GError **error)
FILE *fp_primary = NULL;
FILE *fp_repomd = NULL;
- FILE *fp_cache = fopen(fn_cache, "r");
+
if (!fn_repomd) {
g_set_error (error,
DNF_ERROR,
@@ -693,18 +754,17 @@ load_yum_repo(DnfSack *sack, HyRepo hrepo, GError **error)
}
checksum_fp(repoImpl->checksum, fp_repomd);
- if (can_use_repomd_cache(fp_cache, repoImpl->checksum)) {
+ if (try_to_use_cached_solvfile(fn_cache, repo, 0, repoImpl->checksum, error)) {
const char *chksum = pool_checksum_str(pool, repoImpl->checksum);
g_debug("using cached %s (0x%s)", name, chksum);
- if (repo_add_solv(repo, fp_cache, 0)) {
- g_set_error (error,
- DNF_ERROR,
- DNF_ERROR_INTERNAL_ERROR,
- _("repo_add_solv() has failed."));
- retval = FALSE;
- goto out;
- }
repoImpl->state_main = _HY_LOADED_CACHE;
+ goto out;
+ }
+
+ if (error && *error) {
+ g_prefix_error(error, _("While loading repository failed to use %s: "), fn_cache);
+ retval = FALSE;
+ goto out;
} else {
auto primary = hrepo->getMetadataPath(MD_TYPE_PRIMARY);
if (primary.empty()) {
@@ -733,8 +793,6 @@ load_yum_repo(DnfSack *sack, HyRepo hrepo, GError **error)
repoImpl->state_main = _HY_LOADED_FETCH;
}
out:
- if (fp_cache)
- fclose(fp_cache);
if (fp_repomd)
fclose(fp_repomd);
if (fp_primary)
diff --git a/libdnf/hy-iutil-private.hpp b/libdnf/hy-iutil-private.hpp
index d498c032..efc91c63 100644
--- a/libdnf/hy-iutil-private.hpp
+++ b/libdnf/hy-iutil-private.hpp
@@ -52,9 +52,7 @@ int solv_userdata_verify(const SolvUserdata *solv_userdata, const unsigned char
/* crypto utils */
int checksum_cmp(const unsigned char *cs1, const unsigned char *cs2);
int checksum_fp(unsigned char *out, FILE *fp);
-int checksum_read(unsigned char *csout, FILE *fp);
int checksum_stat(unsigned char *out, FILE *fp);
-int checksum_write(const unsigned char *cs, FILE *fp);
int checksumt_l2h(int type);
const char *pool_checksum_str(Pool *pool, const unsigned char *chksum);
diff --git a/libdnf/hy-iutil.cpp b/libdnf/hy-iutil.cpp
index f81ca52f..c409a10a 100644
--- a/libdnf/hy-iutil.cpp
+++ b/libdnf/hy-iutil.cpp
@@ -142,17 +142,6 @@ checksum_fp(unsigned char *out, FILE *fp)
return 0;
}
-/* calls rewind(fp) before returning */
-int
-checksum_read(unsigned char *csout, FILE *fp)
-{
- if (fseek(fp, -32, SEEK_END) ||
- fread(csout, CHKSUM_BYTES, 1, fp) != 1)
- return 1;
- rewind(fp);
- return 0;
-}
-
/* does not move the fp position */
int
checksum_stat(unsigned char *out, FILE *fp)
@@ -174,15 +163,6 @@ checksum_stat(unsigned char *out, FILE *fp)
return 0;
}
-/* moves fp to the end of file */
-int checksum_write(const unsigned char *cs, FILE *fp)
-{
- if (fseek(fp, 0, SEEK_END) ||
- fwrite(cs, CHKSUM_BYTES, 1, fp) != 1)
- return 1;
- return 0;
-}
-
static std::array<char, solv_userdata_solv_toolversion_size>
get_padded_solv_toolversion()
{
--
2.31.1

View File

@ -0,0 +1,83 @@
From 1e0f8f66f6ff30e177c41be7d72330d5eccf2ff8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Wed, 9 Feb 2022 13:24:06 +0100
Subject: [PATCH 30/34] Update unittest to test the new private dnf solvfile
API
---
tests/hawkey/test_iutil.cpp | 34 ++++++++++++++++++++++------------
1 file changed, 22 insertions(+), 12 deletions(-)
diff --git a/tests/hawkey/test_iutil.cpp b/tests/hawkey/test_iutil.cpp
index 8d00cc94..f3c04782 100644
--- a/tests/hawkey/test_iutil.cpp
+++ b/tests/hawkey/test_iutil.cpp
@@ -24,6 +24,8 @@
#include <solv/pool.h>
+#include <solv/repo.h>
+#include <solv/repo_write.h>
#include "libdnf/hy-util.h"
@@ -97,28 +99,36 @@ START_TEST(test_checksum)
}
END_TEST
-START_TEST(test_checksum_write_read)
+START_TEST(test_dnf_solvfile_userdata)
{
char *new_file = solv_dupjoin(test_globals.tmpdir,
- "/test_checksum_write_read", NULL);
+ "/test_dnf_solvfile_userdata", NULL);
build_test_file(new_file);
unsigned char cs_computed[CHKSUM_BYTES];
- unsigned char cs_read[CHKSUM_BYTES];
- FILE *fp = fopen(new_file, "r");
+ FILE *fp = fopen(new_file, "r+");
checksum_fp(cs_computed, fp);
- // fails, file opened read-only:
- fail_unless(checksum_write(cs_computed, fp) == 1);
- fclose(fp);
- fp = fopen(new_file, "r+");
- fail_if(checksum_write(cs_computed, fp));
+
+ SolvUserdata solv_userdata;
+ fail_if(solv_userdata_fill(&solv_userdata, cs_computed, NULL));
+
+ Pool *pool = pool_create();
+ Repo *repo = repo_create(pool, "test_repo");
+ Repowriter *writer = repowriter_create(repo);
+ repowriter_set_userdata(writer, &solv_userdata, solv_userdata_size);
+ fail_if(repowriter_write(writer, fp));
+ repowriter_free(writer);
fclose(fp);
+
fp = fopen(new_file, "r");
- fail_if(checksum_read(cs_read, fp));
- fail_if(checksum_cmp(cs_computed, cs_read));
+ std::unique_ptr<SolvUserdata> dnf_solvfile = solv_userdata_read(fp);
+ fail_unless(dnf_solvfile);
+ fail_unless(solv_userdata_verify(dnf_solvfile.get(), cs_computed));
fclose(fp);
g_free(new_file);
+ repo_free(repo, 0);
+ pool_free(pool);
}
END_TEST
@@ -181,7 +191,7 @@ iutil_suite(void)
TCase *tc = tcase_create("Main");
tcase_add_test(tc, test_abspath);
tcase_add_test(tc, test_checksum);
- tcase_add_test(tc, test_checksum_write_read);
+ tcase_add_test(tc, test_dnf_solvfile_userdata);
tcase_add_test(tc, test_mkcachedir);
tcase_add_test(tc, test_version_split);
suite_add_tcase(s, tc);
--
2.31.1

View File

@ -0,0 +1,38 @@
From 893eb087e56588d62e81e91e5d283003bd80552a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Tue, 8 Mar 2022 11:43:38 +0100
Subject: [PATCH 31/34] Increase required libsolv version for cache versioning
---
CMakeLists.txt | 2 +-
libdnf.spec | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 60cf1b8c..d895b2bf 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -51,7 +51,7 @@ endif()
# build dependencies
find_package(Gpgme REQUIRED)
-find_package(LibSolv 0.6.30 REQUIRED COMPONENTS ext)
+find_package(LibSolv 0.7.20 REQUIRED COMPONENTS ext)
find_package(OpenSSL REQUIRED)
diff --git a/libdnf.spec b/libdnf.spec
index a849cdea..aa51dd28 100644
--- a/libdnf.spec
+++ b/libdnf.spec
@@ -1,5 +1,5 @@
-%global libsolv_version 0.7.17
-%global libmodulemd_version 2.11.2-2
+%global libsolv_version 0.7.21
+%global libmodulemd_version 2.13.0
%global librepo_version 1.13.1
%global dnf_conflict 4.3.0
%global swig_version 3.0.12
--
2.31.1

View File

@ -0,0 +1,46 @@
From b636af779fcdab326eef7bbb74912254c2fa2b0c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Thu, 17 Mar 2022 10:34:24 +0100
Subject: [PATCH 32/34] Add more specific error handling for loading repomd and
primary
---
libdnf/dnf-sack.cpp | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/libdnf/dnf-sack.cpp b/libdnf/dnf-sack.cpp
index 61f4807c..8e11b8f8 100644
--- a/libdnf/dnf-sack.cpp
+++ b/libdnf/dnf-sack.cpp
@@ -780,13 +780,24 @@ load_yum_repo(DnfSack *sack, HyRepo hrepo, GError **error)
fp_primary = solv_xfopen(primary.c_str(), "r");
assert(fp_primary);
- g_debug("fetching %s", name);
- if (repo_add_repomdxml(repo, fp_repomd, 0) || \
- repo_add_rpmmd(repo, fp_primary, 0, 0)) {
+ g_debug("Loading repomd: %s", fn_repomd);
+ if (repo_add_repomdxml(repo, fp_repomd, 0)) {
g_set_error (error,
DNF_ERROR,
DNF_ERROR_INTERNAL_ERROR,
- _("repo_add_repomdxml/rpmmd() has failed."));
+ _("Loading repomd has failed: %s"),
+ pool_errstr(repo->pool));
+ retval = FALSE;
+ goto out;
+ }
+
+ g_debug("Loading primary: %s", primary.c_str());
+ if (repo_add_rpmmd(repo, fp_primary, 0, 0)) {
+ g_set_error (error,
+ DNF_ERROR,
+ DNF_ERROR_INTERNAL_ERROR,
+ _("Loading primary has failed: %s"),
+ pool_errstr(repo->pool));
retval = FALSE;
goto out;
}
--
2.31.1

View File

@ -0,0 +1,74 @@
From c5919efe898294420ec8e91e4eed5b9081e681c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Hr=C3=A1zk=C3=BD?= <lhrazky@redhat.com>
Date: Thu, 17 Feb 2022 18:18:16 +0100
Subject: [PATCH 33/34] libdnf/transaction/RPMItem: Fix handling transaction id
in resolveTransactionItemReason
The maxTransactionId argument was ignored, the method was always returning the
reason from the last transaction. This is the correct result for
maxTransactionId = -1. In a couple of places the method is called with
maxTransactionId = -2. Fixing this would mean nontrivial changes to the
logic which could potentially break something else, so I'm leaving this
behavior unchanged.
For non-negative values of maxTransactionId (with which it's not being called
anywhere in dnf codebase), the commit adds a condition to SELECT only
transaction ids less than or equal to maxTransactionId.
= changelog =
msg: Fix handling transaction id in resolveTransactionItemReason
type: bugfix
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2053014
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2010259
---
libdnf/transaction/RPMItem.cpp | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/libdnf/transaction/RPMItem.cpp b/libdnf/transaction/RPMItem.cpp
index 5f667ab9..ecce789d 100644
--- a/libdnf/transaction/RPMItem.cpp
+++ b/libdnf/transaction/RPMItem.cpp
@@ -255,7 +255,11 @@ RPMItem::resolveTransactionItemReason(SQLite3Ptr conn,
const std::string &arch,
int64_t maxTransactionId)
{
- const char *sql = R"**(
+ // NOTE: All negative maxTransactionId values are treated the same. The
+ // method is called with maxTransactionId = -2 in a couple of places, the
+ // semantics here have been the same as with -1 for a long time. If it
+ // ain't broke...
+ std::string sql = R"**(
SELECT
ti.action as action,
ti.reason as reason
@@ -271,14 +275,25 @@ RPMItem::resolveTransactionItemReason(SQLite3Ptr conn,
AND ti.action not in (3, 5, 7, 10)
AND i.name = ?
AND i.arch = ?
+ )**";
+
+ if (maxTransactionId >= 0) {
+ sql.append(" AND ti.trans_id <= ?");
+ }
+
+ sql.append(R"**(
ORDER BY
ti.trans_id DESC
LIMIT 1
- )**";
+ )**");
if (arch != "") {
SQLite3::Query query(*conn, sql);
- query.bindv(name, arch);
+ if (maxTransactionId >= 0) {
+ query.bindv(name, arch, maxTransactionId);
+ } else {
+ query.bindv(name, arch);
+ }
if (query.step() == SQLite3::Statement::StepResult::ROW) {
auto action = static_cast< TransactionItemAction >(query.get< int64_t >("action"));
--
2.31.1

View File

@ -0,0 +1,33 @@
From c303b7c3723f3e9fbc43963a62237ea17516fc6b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Hr=C3=A1zk=C3=BD?= <lhrazky@redhat.com>
Date: Thu, 17 Feb 2022 18:30:14 +0100
Subject: [PATCH 34/34] libdnf/transaction/TransactionItem: Set short action
for Reason Change
Sets the "short" (one letter) representation of the Reason Change action
to "C".
This was likely not ever used before as the only way to create a
transaction with a reason change and something else is rolling back
multiple transactions, which was broken.
---
libdnf/transaction/TransactionItem.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/libdnf/transaction/TransactionItem.cpp b/libdnf/transaction/TransactionItem.cpp
index 3b43d1f1..4358038e 100644
--- a/libdnf/transaction/TransactionItem.cpp
+++ b/libdnf/transaction/TransactionItem.cpp
@@ -51,8 +51,7 @@ static const std::map< TransactionItemAction, std::string > transactionItemActio
{TransactionItemAction::REMOVE, "E"},
{TransactionItemAction::REINSTALL, "R"},
{TransactionItemAction::REINSTALLED, "R"},
- // TODO: replace "?" with something better
- {TransactionItemAction::REASON_CHANGE, "?"},
+ {TransactionItemAction::REASON_CHANGE, "C"},
};
/*
--
2.31.1

View File

@ -0,0 +1,45 @@
From c4ee580c73375060b6eb5b3414636688e3d601c3 Mon Sep 17 00:00:00 2001
From: Marek Blaha <mblaha@redhat.com>
Date: Fri, 10 Jun 2022 15:29:56 +0200
Subject: [PATCH] Do not print errors on failovermethod repo option
(RhBug:2039906)
= changelog =
msg: Do not print errors if repository config contains failovermethod option
type: bugfix
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2039906
---
libdnf/conf/ConfigRepo.cpp | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/libdnf/conf/ConfigRepo.cpp b/libdnf/conf/ConfigRepo.cpp
index e98ac0af..0cb52f58 100644
--- a/libdnf/conf/ConfigRepo.cpp
+++ b/libdnf/conf/ConfigRepo.cpp
@@ -22,6 +22,8 @@
#include "Const.hpp"
#include "Config-private.hpp"
+#include "bgettext/bgettext-lib.h"
+
namespace libdnf {
class ConfigRepo::Impl {
@@ -174,6 +176,14 @@ ConfigRepo::Impl::Impl(Config & owner, ConfigMain & mainConfig)
owner.optBinds().add("enabled_metadata", enabled_metadata);
owner.optBinds().add("user_agent", user_agent);
owner.optBinds().add("countme", countme);
+ owner.optBinds().add("failovermethod", failovermethod,
+ [&](Option::Priority priority, const std::string & value){
+ if (value != "priority") {
+ throw Option::InvalidValue(_("only the value 'priority' is supported."));
+ }
+ failovermethod.set(priority, value);
+ }, nullptr, false
+ );
owner.optBinds().add("sslverifystatus", sslverifystatus);
}
--
2.36.1

View File

@ -0,0 +1,24 @@
From 9dbd5f8f0ac3d6d3fab9147a3208623cba698682 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Hr=C3=A1zk=C3=BD?= <lhrazky@redhat.com>
Date: Tue, 14 Jun 2022 17:26:44 +0200
Subject: [PATCH] sack/query.hpp: Add a missing include
---
libdnf/sack/query.hpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/libdnf/sack/query.hpp b/libdnf/sack/query.hpp
index 9e49761c..306b24e3 100644
--- a/libdnf/sack/query.hpp
+++ b/libdnf/sack/query.hpp
@@ -26,6 +26,7 @@
#include "../hy-types.h"
#include "../hy-query.h"
#include "../hy-subject.h"
+#include "../nevra.hpp"
#include "../repo/solvable/Dependency.hpp"
#include "../repo/solvable/DependencyContainer.hpp"
#include "../transaction/Swdb.hpp"
--
2.36.1

View File

@ -0,0 +1,128 @@
From 876393d5d0cd5f806415dcdc90168e58e66da916 Mon Sep 17 00:00:00 2001
From: Jaroslav Rohel <jrohel@redhat.com>
Date: Mon, 28 Mar 2022 07:29:48 +0200
Subject: [PATCH] context: dnf_context_remove accepts `<package-spec>` as dnf,
unify code
Prior to change, the `dnf_context_remove` function only accepted
the package name (without globs). It was not possible to enter more detailed
specifications and thus, for example, select a specific version of the package
to uninstall - for example, which kernel we want to uninstall.
This patch adds full `<package-spec>` support as in dnf, including support
for globs (wildcards) and searching against 'provides' and 'file provides'.
Better error handling for `hy_goal_upgrade_selector` in` dnf_context_update`.
Unification of the function code `dnf_context_install`, `dnf_context_remove`,
`dnf_context_update`.
= changelog =
msg: context: Support <package-spec> (NEVRA forms, provides, file provides) including globs in the dnf_context_remove func
type: enhancement
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2084602
---
libdnf/dnf-context.cpp | 46 ++++++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 20 deletions(-)
diff --git a/libdnf/dnf-context.cpp b/libdnf/dnf-context.cpp
index 6cb0011b..4b055f03 100644
--- a/libdnf/dnf-context.cpp
+++ b/libdnf/dnf-context.cpp
@@ -2391,10 +2391,9 @@ dnf_context_run(DnfContext *context, GCancellable *cancellable, GError **error)
* Since: 0.1.0
**/
gboolean
-dnf_context_install (DnfContext *context, const gchar *name, GError **error) try
+dnf_context_install(DnfContext *context, const gchar *name, GError **error) try
{
DnfContextPrivate *priv = GET_PRIVATE (context);
- g_autoptr(GPtrArray) selector_matches = NULL;
/* create sack and add sources */
if (priv->sack == NULL) {
@@ -2405,7 +2404,7 @@ dnf_context_install (DnfContext *context, const gchar *name, GError **error) try
g_auto(HySubject) subject = hy_subject_create(name);
g_auto(HySelector) selector = hy_subject_get_best_selector(subject, priv->sack, NULL, FALSE, NULL);
- selector_matches = hy_selector_matches(selector);
+ g_autoptr(GPtrArray) selector_matches = hy_selector_matches(selector);
if (selector_matches->len == 0) {
g_set_error(error,
DNF_ERROR,
@@ -2438,31 +2437,33 @@ gboolean
dnf_context_remove(DnfContext *context, const gchar *name, GError **error) try
{
DnfContextPrivate *priv = GET_PRIVATE(context);
- GPtrArray *pkglist;
- hy_autoquery HyQuery query = NULL;
- gboolean ret = TRUE;
- guint i;
/* create sack and add repos */
if (priv->sack == NULL) {
dnf_state_reset(priv->state);
- ret = dnf_context_setup_sack(context, priv->state, error);
- if (!ret)
+ if (!dnf_context_setup_sack(context, priv->state, error))
return FALSE;
}
- /* find installed packages to remove */
- query = hy_query_create(priv->sack);
- query->installed();
- hy_query_filter(query, HY_PKG_NAME, HY_EQ, name);
- pkglist = hy_query_run(query);
+ libdnf::Query query(priv->sack, libdnf::Query::ExcludeFlags::APPLY_EXCLUDES);
+ query.installed();
+ auto ret = query.filterSubject(name, nullptr, false, true, true, true);
+ if (!ret.first) {
+ g_set_error(error,
+ DNF_ERROR,
+ DNF_ERROR_PACKAGE_NOT_FOUND,
+ "No installed package matches '%s'", name);
+ return FALSE;
+ }
+
+ g_autoptr(GPtrArray) packages = query.run();
/* add each package */
- for (i = 0; i < pkglist->len; i++) {
- auto pkg = static_cast<DnfPackage *>(g_ptr_array_index(pkglist, i));
+ for (guint i = 0; i < packages->len; i++) {
+ auto pkg = static_cast<DnfPackage *>(g_ptr_array_index(packages, i));
hy_goal_erase(priv->goal, pkg);
}
- g_ptr_array_unref(pkglist);
+
return TRUE;
} CATCH_TO_GERROR(FALSE)
@@ -2493,8 +2494,7 @@ dnf_context_update(DnfContext *context, const gchar *name, GError **error) try
}
g_auto(HySubject) subject = hy_subject_create(name);
- g_auto(HySelector) selector = hy_subject_get_best_selector(subject, priv->sack, NULL, FALSE,
- NULL);
+ g_auto(HySelector) selector = hy_subject_get_best_selector(subject, priv->sack, NULL, FALSE, NULL);
g_autoptr(GPtrArray) selector_matches = hy_selector_matches(selector);
if (selector_matches->len == 0) {
g_set_error(error,
@@ -2504,8 +2504,14 @@ dnf_context_update(DnfContext *context, const gchar *name, GError **error) try
return FALSE;
}
- if (hy_goal_upgrade_selector(priv->goal, selector))
+ int ret = hy_goal_upgrade_selector(priv->goal, selector);
+ if (ret != 0) {
+ g_set_error(error,
+ DNF_ERROR,
+ ret,
+ "Ill-formed Selector '%s'", name);
return FALSE;
+ }
return TRUE;
} CATCH_TO_GERROR(FALSE)
--
2.36.1

View File

@ -0,0 +1,62 @@
From 44d75a36d7c8a933119e5b63f180a8c23715ec51 Mon Sep 17 00:00:00 2001
From: Jaroslav Rohel <jrohel@redhat.com>
Date: Mon, 28 Mar 2022 07:51:45 +0200
Subject: [PATCH] context: Fix doc dnf_context_install/remove/update/distrosync
Functions do not support groups - only packages are supported.
The `dnf_context_remove` function marks all matching packages for removal
- not just the oldest one.
---
libdnf/dnf-context.cpp | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/libdnf/dnf-context.cpp b/libdnf/dnf-context.cpp
index 4b055f03..fe005430 100644
--- a/libdnf/dnf-context.cpp
+++ b/libdnf/dnf-context.cpp
@@ -2379,7 +2379,7 @@ dnf_context_run(DnfContext *context, GCancellable *cancellable, GError **error)
/**
* dnf_context_install:
* @context: a #DnfContext instance.
- * @name: A package or group name, e.g. "firefox" or "@gnome-desktop"
+ * @name: A package specification (NEVRA forms, provide, file provide, globs supported) e.g. "firefox"
* @error: A #GError or %NULL
*
* Finds a remote package and marks it to be installed.
@@ -2422,12 +2422,12 @@ dnf_context_install(DnfContext *context, const gchar *name, GError **error) try
/**
* dnf_context_remove:
* @context: a #DnfContext instance.
- * @name: A package or group name, e.g. "firefox" or "@gnome-desktop"
+ * @name: A package specification (NEVRA forms, provide, file provide, globs supported) e.g. "firefox"
* @error: A #GError or %NULL
*
* Finds an installed package and marks it to be removed.
*
- * If multiple packages are available then only the oldest package is removed.
+ * If multiple packages are available, all of them will be removed.
*
* Returns: %TRUE for success, %FALSE otherwise
*
@@ -2470,7 +2470,7 @@ dnf_context_remove(DnfContext *context, const gchar *name, GError **error) try
/**
* dnf_context_update:
* @context: a #DnfContext instance.
- * @name: A package or group name, e.g. "firefox" or "@gnome-desktop"
+ * @name: A package specification (NEVRA forms, provide, file provide, globs supported) e.g. "firefox"
* @error: A #GError or %NULL
*
* Finds an installed and remote package and marks it to be updated.
@@ -2548,7 +2548,7 @@ dnf_context_update_all (DnfContext *context,
/**
* dnf_context_distrosync:
* @context: a #DnfContext instance.
- * @name: A package or group name, e.g. "firefox" or "@gnome-desktop"
+ * @name: A package specification (NEVRA forms, provide, file provide, globs supported) e.g. "firefox"
* @error: A #GError or %NULL
*
* Finds an installed and remote package and marks it to be synchronized with remote version.
--
2.36.1

View File

@ -0,0 +1,100 @@
From cf4893a0128c567ed1fdd1b02c9cf2b43bfb02f7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Mon, 30 May 2022 08:59:41 +0200
Subject: [PATCH] advisory upgrade: filter out advPkgs with different arch
This prevents a situation in security upgrades where libsolv cannot
upgrade dependent pkgs because we ask for an upgrade of different arch:
We can get the following testcase if libdnf has filtered out
json-c-2-2.el8.x86_64@rhel-8-for-x86_64-baseos-rpms
(because there is an advisory for already installed json-c-1-1.el8.x86_64) but
json-c-2-2.el8.i686@rhel-8-for-x86_64-baseos-rpms is not filtered out because
it has different architecture. The resulting transaction doesn't work.
```
repo @System -99.-1000 testtags <inline>
#>=Pkg: bind-libs-lite 1 1.el8 x86_64
#>=Pkg: json-c 1 1.el8 x86_64
repo rhel-8-for-x86_64-baseos-rpms -99.-1000 testtags <inline>
#>=Pkg: json-c 2 2.el8 x86_64
#>=Prv: libjson-c.so.4()(64bit)
#>
#>=Pkg: json-c 2 2.el8 i686
#>=Prv: libjson-c.so.4()
#>
#>=Pkg: bind-libs-lite 2 2.el8 x86_64
#>=Req: libjson-c.so.4()(64bit)
system x86_64 rpm @System
job update oneof json-c-1-1.el8.x86_64@@System json-c-2-2.el8.i686@rhel-8-for-x86_64-baseos-rpms bind-libs-lite-2-2.el8.x86_64@rhel-8-for-x86_64-baseos-rpms [forcebest,targeted,setevr,setarch]
result transaction,problems <inline>
#>problem f06d81a4 info package bind-libs-lite-2-2.el8.x86_64 requires libjson-c.so.4()(64bit), but none of the providers can be installed
#>problem f06d81a4 solution 96f9031b allow bind-libs-lite-1-1.el8.x86_64@@System
#>problem f06d81a4 solution c8daf94f allow json-c-2-2.el8.x86_64@rhel-8-for-x86_64-baseos-rpms
#>upgrade bind-libs-lite-1-1.el8.x86_64@@System bind-libs-lite-2-2.el8.x86_64@rhel-8-for-x86_64-baseos-rpms
#>upgrade json-c-1-1.el8.x86_64@@System json-c-2-2.el8.x86_64@rhel-8-for-x86_64-baseos-rpms```
```
= changelog =
msg: Filter out advisory pkgs with different arch during advisory upgrade, fixes possible problems in dependency resulution.
type: bugfix
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2088149
---
libdnf/sack/query.cpp | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/libdnf/sack/query.cpp b/libdnf/sack/query.cpp
index ac2736b5..03d39659 100644
--- a/libdnf/sack/query.cpp
+++ b/libdnf/sack/query.cpp
@@ -1877,12 +1877,6 @@ Query::Impl::filterAdvisory(const Filter & f, Map *m, int keyname)
std::vector<Solvable *> candidates;
std::vector<Solvable *> installed_solvables;
- Id id = -1;
- while ((id = resultPset->next(id)) != -1) {
- candidates.push_back(pool_id2solvable(pool, id));
- }
- NameArchEVRComparator cmp_key(pool);
-
if (cmp_type & HY_UPGRADE) {
Query installed(sack, ExcludeFlags::IGNORE_EXCLUDES);
installed.installed();
@@ -1893,6 +1887,18 @@ Query::Impl::filterAdvisory(const Filter & f, Map *m, int keyname)
installed_solvables.push_back(pool_id2solvable(pool, installed_id));
}
std::sort(installed_solvables.begin(), installed_solvables.end(), NameArchSolvableComparator);
+ Id id = -1;
+ while ((id = resultPset->next(id)) != -1) {
+ Solvable * s = pool_id2solvable(pool, id);
+ // When doing HY_UPGRADE consider only candidate pkgs that have matching Name and Arch
+ // with some already installed pkg (in other words: some other version of the pkg is already installed).
+ // Otherwise a pkg with different Arch than installed can end up in upgrade set which is wrong.
+ // It can result in dependency issues, reported as: RhBug:2088149.
+ auto low = std::lower_bound(installed_solvables.begin(), installed_solvables.end(), s, NameArchSolvableComparator);
+ if (low != installed_solvables.end() && s->name == (*low)->name && s->arch == (*low)->arch) {
+ candidates.push_back(s);
+ }
+ }
// Apply security filters only to packages with lower priority - to unify behaviour upgrade
// and upgrade-minimal
@@ -1915,7 +1921,14 @@ Query::Impl::filterAdvisory(const Filter & f, Map *m, int keyname)
}
}
std::swap(candidates, priority_candidates);
+ } else {
+ Id id = -1;
+ while ((id = resultPset->next(id)) != -1) {
+ candidates.push_back(pool_id2solvable(pool, id));
+ }
}
+
+ NameArchEVRComparator cmp_key(pool);
std::sort(candidates.begin(), candidates.end(), cmp_key);
for (auto & advisoryPkg : pkgs) {
if (cmp_type & HY_UPGRADE) {
--
2.36.1

View File

@ -0,0 +1,71 @@
From 652977360c4253faff9e95d35c603b2f585671fe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ale=C5=A1=20Mat=C4=9Bj?= <amatej@redhat.com>
Date: Tue, 5 Jul 2022 09:02:22 +0200
Subject: [PATCH] Add obsoletes to filtering for advisory candidates
Patch https://github.com/rpm-software-management/libdnf/pull/1526
introduced a regression where we no longer do a security upgrade if a
package A is installed and package B obsoletes A and B is available in two
versions while there is an advisory for the second version.
Test: https://github.com/rpm-software-management/ci-dnf-stack/pull/1130
---
libdnf/sack/query.cpp | 32 ++++++++++++++++++++++++++++----
1 file changed, 28 insertions(+), 4 deletions(-)
diff --git a/libdnf/sack/query.cpp b/libdnf/sack/query.cpp
index 03d39659..5355f9f7 100644
--- a/libdnf/sack/query.cpp
+++ b/libdnf/sack/query.cpp
@@ -1878,6 +1878,13 @@ Query::Impl::filterAdvisory(const Filter & f, Map *m, int keyname)
std::vector<Solvable *> installed_solvables;
if (cmp_type & HY_UPGRADE) {
+ // When doing HY_UPGRADE consider only candidate pkgs that have matching Name and Arch with:
+ // * some already installed pkg (in other words: some other version of the pkg is already installed)
+ // or
+ // * with pkg that obsoletes some already installed (or to be installed in this transaction) pkg
+ // Otherwise a pkg with different Arch than installed can end up in upgrade set which is wrong.
+ // It can result in dependency issues, reported as: RhBug:2088149.
+
Query installed(sack, ExcludeFlags::IGNORE_EXCLUDES);
installed.installed();
installed.addFilter(HY_PKG_LATEST_PER_ARCH, HY_EQ, 1);
@@ -1887,13 +1894,30 @@ Query::Impl::filterAdvisory(const Filter & f, Map *m, int keyname)
installed_solvables.push_back(pool_id2solvable(pool, installed_id));
}
std::sort(installed_solvables.begin(), installed_solvables.end(), NameArchSolvableComparator);
+
+ Query obsoletes(sack, ExcludeFlags::IGNORE_EXCLUDES);
+ obsoletes.addFilter(HY_PKG, HY_EQ, resultPset);
+ obsoletes.available();
+
+ Query possibly_obsoleted(sack, ExcludeFlags::IGNORE_EXCLUDES);
+ possibly_obsoleted.addFilter(HY_PKG, HY_EQ, resultPset);
+ possibly_obsoleted.addFilter(HY_PKG_UPGRADES, HY_EQ, 1);
+ possibly_obsoleted.queryUnion(installed);
+ possibly_obsoleted.apply();
+
+ obsoletes.addFilter(HY_PKG_OBSOLETES, HY_EQ, possibly_obsoleted.runSet());
+ obsoletes.apply();
+ Id obsoleted_id = -1;
+ // Add to candidates resultPset pkgs that obsolete some installed (or to be installed in this transaction) pkg
+ while ((obsoleted_id = obsoletes.pImpl->result->next(obsoleted_id)) != -1) {
+ Solvable * s = pool_id2solvable(pool, obsoleted_id);
+ candidates.push_back(s);
+ }
+
Id id = -1;
+ // Add to candidates resultPset pkgs that match name and arch with some already installed pkg
while ((id = resultPset->next(id)) != -1) {
Solvable * s = pool_id2solvable(pool, id);
- // When doing HY_UPGRADE consider only candidate pkgs that have matching Name and Arch
- // with some already installed pkg (in other words: some other version of the pkg is already installed).
- // Otherwise a pkg with different Arch than installed can end up in upgrade set which is wrong.
- // It can result in dependency issues, reported as: RhBug:2088149.
auto low = std::lower_bound(installed_solvables.begin(), installed_solvables.end(), s, NameArchSolvableComparator);
if (low != installed_solvables.end() && s->name == (*low)->name && s->arch == (*low)->arch) {
candidates.push_back(s);
--
2.36.1

View File

@ -1,4 +1,4 @@
%global libsolv_version 0.7.17
%global libsolv_version 0.7.20-3
%global libmodulemd_version 2.11.2-2
%global librepo_version 1.13.1
%global dnf_conflict 4.3.0
@ -56,7 +56,7 @@
Name: libdnf
Version: %{libdnf_major_version}.%{libdnf_minor_version}.%{libdnf_micro_version}
Release: 7%{?dist}
Release: 11%{?dist}
Summary: Library providing simplified C and Python API to libsolv
License: LGPLv2+
URL: https://github.com/rpm-software-management/libdnf
@ -86,6 +86,22 @@ Patch22: 0022-hawkey-surrogateescape-error-handler-to-decode-UTF-8-string
Patch23: 0023-Turn-off-strict-validation-of-modulemd-documents-RhBug200485320071662007167.patch
Patch24: 0024-Add-unittest-for-setting-up-repo-with-empty-keyfile-RhBug1994614.patch
Patch25: 0025-Add-getLatestModules.patch
Patch26: 0026-context-Substitute-all-repository-config-options-RhB.patch
Patch27: 0027-Use-environment-variable-in-unittest-instead-of-ugly.patch
Patch28: 0028-Add-private-API-for-filling-reading-and-verifying-ne.patch
Patch29: 0029-Use-dnf-solv-userdata-to-check-versions-and-checksum.patch
Patch30: 0030-Update-unittest-to-test-the-new-private-dnf-solvfile.patch
Patch31: 0031-Increase-required-libsolv-version-for-cache-versioni.patch
Patch32: 0032-Add-more-specific-error-handling-for-loading-repomd-.patch
Patch33: 0033-libdnf-transaction-RPMItem-Fix-handling-transaction-.patch
Patch34: 0034-libdnf-transaction-TransactionItem-Set-short-action-.patch
Patch35: 0035-Do-not-print-errors-on-failovermethod-repo-option-Rh.patch
Patch36: 0036-sack-query.hpp-Add-a-missing-include.patch
Patch37: 0037-context-dnf_context_remove-accepts-package-spec-as-d.patch
Patch38: 0038-context-Fix-doc-dnf_context_install-remove-update-di.patch
Patch39: 0039-advisory-upgrade-filter-out-advPkgs-with-different-a.patch
Patch40: 0040-Add-obsoletes-to-filtering-for-advisory-candidates.patch
BuildRequires: cmake
BuildRequires: gcc
@ -330,6 +346,20 @@ popd
%endif
%changelog
* Thu Jul 21 2022 Lukas Hrazky <lhrazky@redhat.com> - 0.63.0-11
- Add obsoletes to filtering for advisory candidates
* Tue Jun 14 2022 Lukas Hrazky <lhrazky@redhat.com> - 0.63.0-10
- Do not print errors on failovermethod repo option
- the dnf_context_remove() function accepts `<package-spec>`, doc updates
- advisory upgrade: filter out advPkgs with different arch
* Wed May 04 2022 Lukas Hrazky <lhrazky@redhat.com> - 0.63.0-8
- Substitute all repository config options (fixes substitution of baseurl)
- Use solvfile userdata to store and check checksums and solv versions
- Fix handling transaction id in resolveTransactionItemReason
- Set short action for Reason Change
* Fri Jan 14 2022 Pavla Kratochvilova <pkratoch@redhat.com> - 0.63.0-7
- Rebuild with new release number