Run compressions in multiple threads

This speeds up compression a lot on multicore systems.
This commit is contained in:
Vratislav Podzimek 2013-11-13 09:16:21 +01:00
parent 0b741d714b
commit 1d7eb09a53
2 changed files with 9 additions and 0 deletions

View File

@ -32,6 +32,7 @@ Requires: python-mako
Requires: squashfs-tools >= 4.2 Requires: squashfs-tools >= 4.2
Requires: util-linux Requires: util-linux
Requires: xz Requires: xz
Requires: pigz
Requires: yum Requires: yum
Requires: pykickstart Requires: pykickstart
Requires: dracut >= 030 Requires: dracut >= 030

View File

@ -25,6 +25,7 @@ from os.path import join, dirname
from subprocess import CalledProcessError from subprocess import CalledProcessError
import sys import sys
import traceback import traceback
import multiprocessing
from time import sleep from time import sleep
from pylorax.sysutils import cpfile from pylorax.sysutils import cpfile
@ -44,6 +45,13 @@ def mkcpio(rootdir, outfile, compression="xz", compressargs=["-9"]):
if compression is None: if compression is None:
compression = "cat" # this is a little silly compression = "cat" # this is a little silly
compressargs = [] compressargs = []
# make compression run with multiple threads if possible
if compression in ("xz", "lzma"):
compressargs.insert(0, "-T%d" % multiprocessing.cpu_count())
elif compression == "gzip":
compression = "pigz"
logger.debug("mkcpio %s | %s %s > %s", rootdir, compression, logger.debug("mkcpio %s | %s %s > %s", rootdir, compression,
" ".join(compressargs), outfile) " ".join(compressargs), outfile)
find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=rootdir) find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=rootdir)