Always update repo metadata when building an image
When the kickstart is handed off to Anaconda for building it will download its own copy of the metadata and re-run the depsolve. So if the dnf cache isn't current there will be a mismatch and the build will fail to find some of the versions in final-kickstart.ks This adds a new context to DNFLock, .lock_check, that will force a check of the metadata. It also implements its own timeout and forces a refresh of the metadata when that expires because the dnf expiration doesn't always work as expected. Resolves: rhbz#1631561
This commit is contained in:
parent
f5679f61b1
commit
13e6a68154
@ -290,7 +290,8 @@ def start_build(cfg, dnflock, gitlock, branch, recipe_name, compose_type, test_m
|
|||||||
projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower())
|
projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower())
|
||||||
deps = []
|
deps = []
|
||||||
try:
|
try:
|
||||||
with dnflock.lock:
|
# This can possibly update repodata and reset the YumBase object.
|
||||||
|
with dnflock.lock_check:
|
||||||
(installed_size, deps) = projects_depsolve_with_size(dnflock.dbo, projects, recipe.group_names, with_core=False)
|
(installed_size, deps) = projects_depsolve_with_size(dnflock.dbo, projects, recipe.group_names, with_core=False)
|
||||||
except ProjectsError as e:
|
except ProjectsError as e:
|
||||||
log.error("start_build depsolve: %s", str(e))
|
log.error("start_build depsolve: %s", str(e))
|
||||||
|
@ -24,8 +24,45 @@ import dnf.logging
|
|||||||
from glob import glob
|
from glob import glob
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
from threading import Lock
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class DNFLock(object):
|
||||||
|
"""Hold the dnf.Base object and a Lock to control access to it.
|
||||||
|
|
||||||
|
self.dbo is a property that returns the dnf.Base object, but it *may* change
|
||||||
|
from one call to the next if the upstream repositories have changed.
|
||||||
|
"""
|
||||||
|
def __init__(self, conf, expire_secs=6*60*60):
|
||||||
|
self._conf = conf
|
||||||
|
self._lock = Lock()
|
||||||
|
self.dbo = get_base_object(self._conf)
|
||||||
|
self._expire_secs = expire_secs
|
||||||
|
self._expire_time = time.time() + self._expire_secs
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lock(self):
|
||||||
|
"""Check for repo updates (using expiration time) and return the lock
|
||||||
|
|
||||||
|
If the repository has been updated, tear down the old dnf.Base and
|
||||||
|
create a new one. This is the only way to force dnf to use the new
|
||||||
|
metadata.
|
||||||
|
"""
|
||||||
|
if time.time() > self._expire_time:
|
||||||
|
return self.lock_check
|
||||||
|
return self._lock
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lock_check(self):
|
||||||
|
"""Force a check for repo updates and return the lock
|
||||||
|
|
||||||
|
Use this method sparingly, it removes the repodata and downloads a new copy every time.
|
||||||
|
"""
|
||||||
|
self._expire_time = time.time() + self._expire_secs
|
||||||
|
self.dbo.update_cache()
|
||||||
|
return self._lock
|
||||||
|
|
||||||
def get_base_object(conf):
|
def get_base_object(conf):
|
||||||
"""Get the DNF object with settings from the config file
|
"""Get the DNF object with settings from the config file
|
||||||
|
|
||||||
@ -69,6 +106,10 @@ def get_base_object(conf):
|
|||||||
log.info("releasever = %s", _releasever)
|
log.info("releasever = %s", _releasever)
|
||||||
dbc.releasever = _releasever
|
dbc.releasever = _releasever
|
||||||
|
|
||||||
|
# Make sure metadata is always current
|
||||||
|
dbc.metadata_expire = 0
|
||||||
|
dbc.metadata_expire_filter = "never"
|
||||||
|
|
||||||
# write the dnf configuration file
|
# write the dnf configuration file
|
||||||
with open(dnfconf, "w") as f:
|
with open(dnfconf, "w") as f:
|
||||||
f.write(dbc.dump())
|
f.write(dbc.dump())
|
||||||
@ -85,6 +126,7 @@ def get_base_object(conf):
|
|||||||
try:
|
try:
|
||||||
dbo.fill_sack(load_system_repo=False)
|
dbo.fill_sack(load_system_repo=False)
|
||||||
dbo.read_comps()
|
dbo.read_comps()
|
||||||
|
dbo.update_cache()
|
||||||
except dnf.exceptions.Error as e:
|
except dnf.exceptions.Error as e:
|
||||||
log.error("Failed to update metadata: %s", str(e))
|
log.error("Failed to update metadata: %s", str(e))
|
||||||
raise RuntimeError("Fetching metadata failed: %s" % str(e))
|
raise RuntimeError("Fetching metadata failed: %s" % str(e))
|
||||||
|
@ -195,7 +195,9 @@ def _depsolve(dbo, projects, groups):
|
|||||||
:rtype: None
|
:rtype: None
|
||||||
:raises: ProjectsError if there was a problem installing something
|
:raises: ProjectsError if there was a problem installing something
|
||||||
"""
|
"""
|
||||||
# This resets the transaction
|
# This resets the transaction and updates the cache.
|
||||||
|
# It is important that the cache always be synchronized because Anaconda will grab its own copy
|
||||||
|
# and if that is different the NEVRAs will not match and the build will fail.
|
||||||
dbo.reset(goal=True)
|
dbo.reset(goal=True)
|
||||||
install_errors = []
|
install_errors = []
|
||||||
for name in groups:
|
for name in groups:
|
||||||
|
@ -28,7 +28,6 @@ from pylorax.api.v0 import v0_api
|
|||||||
from pylorax.sysutils import joinpaths
|
from pylorax.sysutils import joinpaths
|
||||||
|
|
||||||
GitLock = namedtuple("GitLock", ["repo", "lock", "dir"])
|
GitLock = namedtuple("GitLock", ["repo", "lock", "dir"])
|
||||||
DNFLock = namedtuple("DNFLock", ["dbo", "lock"])
|
|
||||||
|
|
||||||
server = Flask(__name__)
|
server = Flask(__name__)
|
||||||
|
|
||||||
|
@ -39,10 +39,10 @@ from pylorax import vernum
|
|||||||
from pylorax.api.cmdline import lorax_composer_parser
|
from pylorax.api.cmdline import lorax_composer_parser
|
||||||
from pylorax.api.config import configure, make_dnf_dirs, make_queue_dirs
|
from pylorax.api.config import configure, make_dnf_dirs, make_queue_dirs
|
||||||
from pylorax.api.compose import test_templates
|
from pylorax.api.compose import test_templates
|
||||||
|
from pylorax.api.dnfbase import DNFLock
|
||||||
from pylorax.api.queue import start_queue_monitor
|
from pylorax.api.queue import start_queue_monitor
|
||||||
from pylorax.api.recipes import open_or_create_repo, commit_recipe_directory
|
from pylorax.api.recipes import open_or_create_repo, commit_recipe_directory
|
||||||
from pylorax.api.server import server, GitLock, DNFLock
|
from pylorax.api.server import server, GitLock
|
||||||
from pylorax.api.dnfbase import get_base_object
|
|
||||||
|
|
||||||
VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
|
VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
|
||||||
|
|
||||||
@ -246,14 +246,14 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
# Get a dnf.Base to share with the requests
|
# Get a dnf.Base to share with the requests
|
||||||
try:
|
try:
|
||||||
dbo = get_base_object(server.config["COMPOSER_CFG"])
|
server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"])
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
# Error has already been logged. Just exit cleanly.
|
# Error has already been logged. Just exit cleanly.
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
server.config["DNFLOCK"] = DNFLock(dbo=dbo, lock=Lock())
|
|
||||||
|
|
||||||
# Depsolve the templates and make a note of the failures for /api/status to report
|
# Depsolve the templates and make a note of the failures for /api/status to report
|
||||||
server.config["TEMPLATE_ERRORS"] = test_templates(dbo, server.config["COMPOSER_CFG"].get("composer", "share_dir"))
|
with server.config["DNFLOCK"].lock:
|
||||||
|
server.config["TEMPLATE_ERRORS"] = test_templates(server.config["DNFLOCK"].dbo, server.config["COMPOSER_CFG"].get("composer", "share_dir"))
|
||||||
|
|
||||||
# Setup access to the git repo
|
# Setup access to the git repo
|
||||||
server.config["REPO_DIR"] = opts.BLUEPRINTS
|
server.config["REPO_DIR"] = opts.BLUEPRINTS
|
||||||
|
Loading…
Reference in New Issue
Block a user