From ea625cf31f592b531b0a93c812e6cc4e64219d7d Mon Sep 17 00:00:00 2001 From: eabdullin Date: Tue, 11 Mar 2025 07:32:37 +0000 Subject: [PATCH] import CS keylime-7.3.0-15.el9 --- SOURCES/0012-Restore-create-allowlist.patch | 59 + ...rator-and-timestamp-in-create-policy.patch | 44 + ...-a-logger.error-with-an-Exception-in.patch | 80 + .../0015-Backport-keylime-policy-tool.patch | 6638 +++++++++++++++++ ...S-on-revocation-notification-webhook.patch | 167 + SPECS/keylime.spec | 52 +- 6 files changed, 7036 insertions(+), 4 deletions(-) create mode 100644 SOURCES/0012-Restore-create-allowlist.patch create mode 100644 SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch create mode 100644 SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch create mode 100644 SOURCES/0015-Backport-keylime-policy-tool.patch create mode 100644 SOURCES/0016-Use-TLS-on-revocation-notification-webhook.patch diff --git a/SOURCES/0012-Restore-create-allowlist.patch b/SOURCES/0012-Restore-create-allowlist.patch new file mode 100644 index 0000000..1e065ff --- /dev/null +++ b/SOURCES/0012-Restore-create-allowlist.patch @@ -0,0 +1,59 @@ +--- a/scripts/create_runtime_policy.sh 2023-10-09 17:04:26.121194607 +0200 ++++ b/scripts/create_runtime_policy.sh 2023-10-09 17:06:02.089855614 +0200 +@@ -42,7 +42,7 @@ + exit $NOARGS; + fi + +-ALGO=sha1sum ++ALGO=sha256sum + + ALGO_LIST=("sha1sum" "sha256sum" "sha512sum") + +@@ -78,7 +78,7 @@ + + + # Where to look for initramfs image +-INITRAMFS_LOC="/boot/" ++INITRAMFS_LOC="/boot" + if [ -d "/ostree" ]; then + # If we are on an ostree system change where we look for initramfs image + loc=$(grep -E "/ostree/[^/]([^/]*)" -o /proc/cmdline | head -n 1 | cut -d / -f 3) +@@ -121,7 +121,7 @@ + cp -r /tmp/ima/$i-extracted-unmk/. /tmp/ima/$i-extracted + fi + elif [[ -x "/usr/lib/dracut/skipcpio" ]] ; then +- /usr/lib/dracut/skipcpio $i | gunzip -c | cpio -i -d 2> /dev/null ++ /usr/lib/dracut/skipcpio $i | gunzip -c 2> /dev/null | cpio -i -d 2> /dev/null + else + echo "ERROR: No tools for initramfs image processing found!" + break +@@ -130,9 +130,26 @@ + find -type f -exec $ALGO "./{}" \; | sed "s| \./\./| /|" >> $OUTPUT + done + +-# Convert to runtime policy +-echo "Converting created allowlist to Keylime runtime policy" +-python3 $WORKING_DIR/../keylime/cmd/convert_runtime_policy.py -a $OUTPUT -o $OUTPUT ++# when ROOTFS_LOC = '/', the path starts on allowlist ends up with double '//' ++# ++# Example: ++# ++# b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c //bar ++# ++# Replace the unwanted '//' with a single '/' ++sed -i 's| /\+| /|g' $ALLOWLIST_DIR/${OUTPUT} ++ ++# When the file name contains newlines or backslashes, the output of sha256sum ++# adds a backslash at the beginning of the line. ++# ++# Example: ++# ++# $ echo foo > ba\\r ++# $ sha256sum ba\\r ++# \b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c ba\\r ++# ++# Remove the unwanted backslash prefix ++sed -i 's/^\\//g' $ALLOWLIST_DIR/${OUTPUT} + + # Clean up + rm -rf /tmp/ima diff --git a/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch b/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch new file mode 100644 index 0000000..2c40991 --- /dev/null +++ b/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch @@ -0,0 +1,44 @@ +diff --git a/keylime/cloud_verifier_common.py b/keylime/cloud_verifier_common.py +index a7399d2..c0f416d 100644 +--- a/keylime/cloud_verifier_common.py ++++ b/keylime/cloud_verifier_common.py +@@ -8,7 +8,7 @@ from keylime.agentstates import AgentAttestState, AgentAttestStates, TPMClockInf + from keylime.common import algorithms + from keylime.db.verifier_db import VerfierMain + from keylime.failure import Component, Event, Failure +-from keylime.ima import file_signatures ++from keylime.ima import file_signatures, ima + from keylime.ima.types import RuntimePolicyType + from keylime.tpm import tpm_util + from keylime.tpm.tpm_main import Tpm +@@ -271,7 +271,7 @@ def process_get_status(agent: VerfierMain) -> Dict[str, Any]: + logger.debug('The contents of the agent %s attribute "mb_refstate" are %s', agent.agent_id, agent.mb_refstate) + + has_runtime_policy = 0 +- if agent.ima_policy.generator and agent.ima_policy.generator > 1: ++ if agent.ima_policy.generator and agent.ima_policy.generator > ima.RUNTIME_POLICY_GENERATOR.EmptyAllowList: + has_runtime_policy = 1 + + response = { +diff --git a/keylime/cmd/create_policy.py b/keylime/cmd/create_policy.py +index 0841d64..086b92a 100755 +--- a/keylime/cmd/create_policy.py ++++ b/keylime/cmd/create_policy.py +@@ -6,6 +6,7 @@ import argparse + import binascii + import collections + import copy ++import datetime + import gzip + import json + import multiprocessing +@@ -580,6 +581,9 @@ def main() -> None: + policy["excludes"] = sorted(list(set(policy["excludes"]))) + policy["ima"]["ignored_keyrings"] = sorted(list(set(policy["ima"]["ignored_keyrings"]))) + ++ policy["meta"]["generator"] = ima.RUNTIME_POLICY_GENERATOR.LegacyAllowList ++ policy["meta"]["timestamp"] = str(datetime.datetime.now()) ++ + try: + ima.validate_runtime_policy(policy) + except ima.ImaValidationError as ex: diff --git a/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch b/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch new file mode 100644 index 0000000..7b1ee26 --- /dev/null +++ b/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch @@ -0,0 +1,80 @@ +From add9847988e963fd124863736592fc16cc8c716b Mon Sep 17 00:00:00 2001 +From: Stefan Berger +Date: Tue, 11 Jul 2023 18:03:28 -0400 +Subject: [PATCH 14/14] tpm_util: Replace a logger.error with an Exception in + case of invalid signature + +This fixes a possibly severe issue in 7.2.5 & 7.3.0. + +Signed-off-by: Stefan Berger +--- + keylime/tpm/tpm_util.py | 6 +----- + keylime/tpm/tpm_util_test.py | 21 +++++++++++++++++++++ + 2 files changed, 22 insertions(+), 5 deletions(-) + +diff --git a/keylime/tpm/tpm_util.py b/keylime/tpm/tpm_util.py +index ce2ce0f..58a1a04 100644 +--- a/keylime/tpm/tpm_util.py ++++ b/keylime/tpm/tpm_util.py +@@ -3,7 +3,6 @@ import string + import struct + from typing import Any, Dict, List, Optional, Tuple, Union + +-from cryptography.exceptions import InvalidSignature + from cryptography.hazmat import backends + from cryptography.hazmat.primitives import hashes, hmac, serialization + from cryptography.hazmat.primitives.asymmetric import ec, padding +@@ -155,10 +154,7 @@ def checkquote( + digest.update(quoteblob) + quote_digest = digest.finalize() + +- try: +- verify(pubkey, signature, quote_digest, hashfunc) +- except InvalidSignature: +- logger.error("Invalid quote signature!") ++ verify(pubkey, signature, quote_digest, hashfunc) + + # Check that reported nonce is expected one + retDict = tpm2_objects.unmarshal_tpms_attest(quoteblob) +diff --git a/keylime/tpm/tpm_util_test.py b/keylime/tpm/tpm_util_test.py +index aaf16cd..2c73997 100644 +--- a/keylime/tpm/tpm_util_test.py ++++ b/keylime/tpm/tpm_util_test.py +@@ -2,6 +2,7 @@ import base64 + import unittest + from unittest import mock + ++from cryptography.exceptions import InvalidSignature + from cryptography.hazmat.primitives.asymmetric.ec import ( + SECP256R1, + EllipticCurve, +@@ -60,6 +61,26 @@ class TestTpmUtil(unittest.TestCase): + except Exception as e: + self.fail(f"checkquote failed with {e}") + ++ # test bad input ++ bad_quoteblob = bytearray(quoteblob) ++ bad_quoteblob[5] ^= 0x1 ++ with self.assertRaises(InvalidSignature): ++ checkquote(aikblob, nonce, sigblob, bad_quoteblob, pcrblob, "sha256") ++ ++ l = list(nonce) ++ l[0] = "a" ++ bad_nonce = "".join(l) ++ with self.assertRaises(Exception): ++ checkquote(aikblob, bad_nonce, sigblob, quoteblob, pcrblob, "sha256") ++ ++ bad_pcrblob = bytearray(pcrblob) ++ bad_pcrblob[5] ^= 0x1 ++ with self.assertRaises(Exception): ++ checkquote(aikblob, nonce, sigblob, quoteblob, bad_pcrblob, "sha256") ++ ++ with self.assertRaises(ValueError): ++ checkquote(aikblob, nonce, sigblob, quoteblob, pcrblob, "sha1") ++ + @staticmethod + def not_random(numbytes: int) -> bytes: + return b"\x12" * numbytes +-- +2.41.0 + diff --git a/SOURCES/0015-Backport-keylime-policy-tool.patch b/SOURCES/0015-Backport-keylime-policy-tool.patch new file mode 100644 index 0000000..6705c92 --- /dev/null +++ b/SOURCES/0015-Backport-keylime-policy-tool.patch @@ -0,0 +1,6638 @@ +From 6f97dc334cd21e1a60ac0073f52723abc01a8a50 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Fri, 10 Jan 2025 11:05:40 +0000 +Subject: [PATCH 15/15] Backport keylime-policy tool + +--- + keylime/cert_utils.py | 29 +- + keylime/cmd/keylime_policy.py | 73 ++ + keylime/common/algorithms.py | 32 +- + keylime/ima/file_signatures.py | 23 + + keylime/ima/ima.py | 13 +- + keylime/policy/__init__.py | 0 + keylime/policy/create_mb_policy.py | 266 ++++ + keylime/policy/create_runtime_policy.py | 1068 +++++++++++++++++ + keylime/policy/initrd.py | 353 ++++++ + keylime/policy/logger.py | 81 ++ + keylime/policy/rpm_repo.py | 344 ++++++ + keylime/policy/sign_runtime_policy.py | 200 +++ + keylime/policy/utils.py | 121 ++ + setup.cfg | 3 +- + .../create-mb-policy/binary_bios_measurements | Bin 0 -> 49088 bytes + .../binary_bios_measurements-bogus | 6 + + .../binary_bios_measurements-empty | 0 + .../binary_bios_measurements-secureboot | Bin 0 -> 41371 bytes + .../data/create-runtime-policy/allowlist-sha1 | 1 + + .../create-runtime-policy/allowlist-sha256 | 1 + + .../create-runtime-policy/allowlist-sha384 | 1 + + .../create-runtime-policy/allowlist-sha512 | 1 + + .../create-runtime-policy/allowlist-sm3_256 | 1 + + .../create-runtime-policy/allowlist-unknown | 1 + + test/data/create-runtime-policy/ima-log-sha1 | 2 + + .../data/create-runtime-policy/ima-log-sha256 | 2 + + .../data/create-runtime-policy/ima-log-sha384 | 2 + + .../data/create-runtime-policy/ima-log-sha512 | 2 + + .../create-runtime-policy/ima-log-sm3_256 | 2 + + .../create-runtime-policy/ima-log-unknown | 2 + + test/data/create-runtime-policy/policy-sha1 | 1 + + test/data/create-runtime-policy/policy-sha256 | 1 + + test/data/create-runtime-policy/policy-sha384 | 1 + + test/data/create-runtime-policy/policy-sha512 | 1 + + .../data/create-runtime-policy/policy-sm3_256 | 1 + + .../data/create-runtime-policy/policy-unknown | 1 + + .../create-runtime-policy/rootfs/rootfs_data | 1 + + .../create-runtime-policy/setup-initrd-tests | 161 +++ + .../create-runtime-policy/setup-rpm-tests | 345 ++++++ + .../sign-runtime-policy/ec-p521-private.pem | 7 + + .../sign-runtime-policy/ec-p521-public.pem | 6 + + .../sign-runtime-policy/rsa-4096-private.pem | 52 + + .../runtime-policy-bogus.json | 2 + + .../runtime-policy-empty.json | 1 + + .../sign-runtime-policy/runtime-policy.json | 1 + + test/test_create_mb_policy.py | 602 ++++++++++ + test/test_create_runtime_policy.py | 985 +++++++++++++++ + test/test_sign_runtime_policy.py | 194 +++ + test/utils.py | 64 + + 49 files changed, 5045 insertions(+), 12 deletions(-) + create mode 100644 keylime/cmd/keylime_policy.py + create mode 100644 keylime/policy/__init__.py + create mode 100644 keylime/policy/create_mb_policy.py + create mode 100644 keylime/policy/create_runtime_policy.py + create mode 100644 keylime/policy/initrd.py + create mode 100644 keylime/policy/logger.py + create mode 100644 keylime/policy/rpm_repo.py + create mode 100644 keylime/policy/sign_runtime_policy.py + create mode 100644 keylime/policy/utils.py + create mode 100644 test/data/create-mb-policy/binary_bios_measurements + create mode 100644 test/data/create-mb-policy/binary_bios_measurements-bogus + create mode 100644 test/data/create-mb-policy/binary_bios_measurements-empty + create mode 100644 test/data/create-mb-policy/binary_bios_measurements-secureboot + create mode 100644 test/data/create-runtime-policy/allowlist-sha1 + create mode 100644 test/data/create-runtime-policy/allowlist-sha256 + create mode 100644 test/data/create-runtime-policy/allowlist-sha384 + create mode 100644 test/data/create-runtime-policy/allowlist-sha512 + create mode 100644 test/data/create-runtime-policy/allowlist-sm3_256 + create mode 100644 test/data/create-runtime-policy/allowlist-unknown + create mode 100644 test/data/create-runtime-policy/ima-log-sha1 + create mode 100644 test/data/create-runtime-policy/ima-log-sha256 + create mode 100644 test/data/create-runtime-policy/ima-log-sha384 + create mode 100644 test/data/create-runtime-policy/ima-log-sha512 + create mode 100644 test/data/create-runtime-policy/ima-log-sm3_256 + create mode 100644 test/data/create-runtime-policy/ima-log-unknown + create mode 100644 test/data/create-runtime-policy/policy-sha1 + create mode 100644 test/data/create-runtime-policy/policy-sha256 + create mode 100644 test/data/create-runtime-policy/policy-sha384 + create mode 100644 test/data/create-runtime-policy/policy-sha512 + create mode 100644 test/data/create-runtime-policy/policy-sm3_256 + create mode 100644 test/data/create-runtime-policy/policy-unknown + create mode 100644 test/data/create-runtime-policy/rootfs/rootfs_data + create mode 100755 test/data/create-runtime-policy/setup-initrd-tests + create mode 100755 test/data/create-runtime-policy/setup-rpm-tests + create mode 100644 test/data/sign-runtime-policy/ec-p521-private.pem + create mode 100644 test/data/sign-runtime-policy/ec-p521-public.pem + create mode 100644 test/data/sign-runtime-policy/rsa-4096-private.pem + create mode 100644 test/data/sign-runtime-policy/runtime-policy-bogus.json + create mode 100644 test/data/sign-runtime-policy/runtime-policy-empty.json + create mode 100644 test/data/sign-runtime-policy/runtime-policy.json + create mode 100644 test/test_create_mb_policy.py + create mode 100644 test/test_create_runtime_policy.py + create mode 100644 test/test_sign_runtime_policy.py + create mode 100644 test/utils.py + +diff --git a/keylime/cert_utils.py b/keylime/cert_utils.py +index b8d664d..89eeb70 100644 +--- a/keylime/cert_utils.py ++++ b/keylime/cert_utils.py +@@ -27,7 +27,26 @@ from keylime import keylime_logging, tpm_ek_ca + logger = keylime_logging.init_logging("cert_utils") + + +-def x509_der_cert(der_cert_data: bytes) -> Certificate: ++def is_x509_cert(cert_data: bytes, verbose: bool = False) -> bool: ++ """ ++ Determine wheter the data passed is a valid x509 cert. ++ ++ :param cert_data: bytes to check ++ :return: bool, indicating whether the provided input is a valid cert ++ """ ++ try: ++ x509_pem_cert(cert_data.decode("UTF-8"), verbose) ++ return True ++ except Exception: ++ try: ++ x509_der_cert(cert_data, verbose) ++ return True ++ except Exception: ++ return False ++ return False ++ ++ ++def x509_der_cert(der_cert_data: bytes, verbose: bool = True) -> Certificate: + """Load an x509 certificate provided in DER format + :param der_cert_data: the DER bytes of the certificate + :type der_cert_data: bytes +@@ -36,12 +55,13 @@ def x509_der_cert(der_cert_data: bytes) -> Certificate: + try: + return x509.load_der_x509_certificate(data=der_cert_data, backend=default_backend()) + except Exception as e: +- logger.warning("Failed to parse DER data with python-cryptography: %s", e) ++ if verbose: ++ logger.warning("Failed to parse DER data with python-cryptography: %s", e) + pyasn1_cert = decoder.decode(der_cert_data, asn1Spec=rfc2459.Certificate())[0] + return x509.load_der_x509_certificate(data=encoder.encode(pyasn1_cert), backend=default_backend()) + + +-def x509_pem_cert(pem_cert_data: str) -> Certificate: ++def x509_pem_cert(pem_cert_data: str, verbose: bool = True) -> Certificate: + """Load an x509 certificate provided in PEM format + :param pem_cert_data: the base-64 encoded PEM certificate + :type pem_cert_data: str +@@ -50,7 +70,8 @@ def x509_pem_cert(pem_cert_data: str) -> Certificate: + try: + return x509.load_pem_x509_certificate(data=pem_cert_data.encode("utf-8"), backend=default_backend()) + except Exception as e: +- logger.warning("Failed to parse PEM data with python-cryptography: %s", e) ++ if verbose: ++ logger.warning("Failed to parse PEM data with python-cryptography: %s", e) + # Let's read the DER bytes from the base-64 PEM. + der_data = pem.readPemFromFile(io.StringIO(pem_cert_data)) + # Now we can load it as we do in x509_der_cert(). +diff --git a/keylime/cmd/keylime_policy.py b/keylime/cmd/keylime_policy.py +new file mode 100644 +index 0000000..af94a88 +--- /dev/null ++++ b/keylime/cmd/keylime_policy.py +@@ -0,0 +1,73 @@ ++#!/usr/bin/env python3 ++# PYTHON_ARGCOMPLETE_OK ++# The comment above enables global autocomplete using argcomplete ++ ++""" ++Utility to assist with runtime policies. ++""" ++ ++import argparse ++import os ++import sys ++ ++try: ++ import argcomplete ++except ModuleNotFoundError: ++ argcomplete = None ++ ++ ++from keylime.policy import create_mb_policy, create_runtime_policy, sign_runtime_policy ++from keylime.policy.logger import Logger ++ ++logger = Logger().logger() ++ ++ ++def main() -> None: ++ """keylime-policy entry point.""" ++ if os.geteuid() != 0: ++ logger.critical("Please, run this program as root") ++ sys.exit(1) ++ ++ parser = argparse.ArgumentParser(add_help=False) ++ ++ main_parser = argparse.ArgumentParser() ++ ++ action_subparsers = main_parser.add_subparsers(title="actions") ++ ++ create_parser = action_subparsers.add_parser( ++ "create", help="create runtime or measured boot policy", parents=[parser] ++ ) ++ create_subparser = create_parser.add_subparsers(title="create") ++ create_subparser.required = True ++ ++ sign_parser = action_subparsers.add_parser("sign", help="sign policy", parents=[parser]) ++ sign_subparser = sign_parser.add_subparsers(title="sign") ++ sign_subparser.required = True ++ ++ create_runtime_policy.get_arg_parser(create_subparser, parser) ++ create_mb_policy.get_arg_parser(create_subparser, parser) ++ sign_runtime_policy.get_arg_parser(sign_subparser, parser) ++ ++ if argcomplete: ++ # This should happen before parse_args() ++ argcomplete.autocomplete(main_parser) ++ ++ args = main_parser.parse_args() ++ if "func" not in args: ++ main_parser.print_help() ++ main_parser.exit() ++ ++ try: ++ ret = args.func(args) ++ if ret is None: ++ sys.exit(1) ++ except BrokenPipeError: ++ # Python flushes standard streams on exit; redirect remaining output ++ # to devnull to avoid another BrokenPipeError at shutdown. ++ devnull = os.open(os.devnull, os.O_WRONLY) ++ os.dup2(devnull, sys.stdout.fileno()) ++ sys.exit(1) # Python exits with error code 1 on EPIPE ++ ++ ++if __name__ == "__main__": ++ main() +diff --git a/keylime/common/algorithms.py b/keylime/common/algorithms.py +index f3ca98d..db12c26 100644 +--- a/keylime/common/algorithms.py ++++ b/keylime/common/algorithms.py +@@ -45,28 +45,48 @@ class Hash(str, enum.Enum): + def get_ff_hash(self) -> bytes: + return b"\xff" * (self.get_size() // 8) + ++ def hexdigest_len(self) -> int: ++ return len(self.__hashfn(b"").hexdigest()) ++ ++ def file_digest(self, filepath: str) -> str: ++ """ ++ Calculate the digest of the specified file. ++ ++ :param filepath: the path of the file to calculate the digest ++ :return: str, the hex digest of the specified file ++ """ ++ _BUFFER_SIZE = 65535 ++ alg = "sm3" if self.value == "sm3_256" else self.value ++ hasher = hashlib.new(alg) ++ with open(filepath, "rb") as f: ++ while True: ++ data = f.read(_BUFFER_SIZE) ++ if not data: ++ break ++ hasher.update(data) ++ ++ return hasher.hexdigest() ++ + def __str__(self) -> str: + return self.value + + +-class Encrypt: ++class Encrypt(str, enum.Enum): + RSA = "rsa" + ECC = "ecc" +- supported_algorithms = (RSA, ECC) + + @staticmethod + def is_recognized(algorithm: str) -> bool: +- return algorithm in Encrypt.supported_algorithms ++ return algorithm in list(Encrypt) + + +-class Sign: ++class Sign(str, enum.Enum): + RSASSA = "rsassa" + RSAPSS = "rsapss" + ECDSA = "ecdsa" + ECDAA = "ecdaa" + ECSCHNORR = "ecschnorr" +- supported_algorithms = (RSASSA, RSAPSS, ECDSA, ECDAA, ECSCHNORR) + + @staticmethod + def is_recognized(algorithm: str) -> bool: +- return algorithm in Sign.supported_algorithms ++ return algorithm in list(Sign) +diff --git a/keylime/ima/file_signatures.py b/keylime/ima/file_signatures.py +index 5277ed8..bee5ed5 100755 +--- a/keylime/ima/file_signatures.py ++++ b/keylime/ima/file_signatures.py +@@ -4,6 +4,7 @@ import json + import struct + from typing import Any, Dict, List, Optional, Tuple, Union + ++import jsonschema + from cryptography import x509 + from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm + from cryptography.hazmat import backends +@@ -22,6 +23,22 @@ logger = keylime_logging.init_logging("file_signatures") + + SupportedKeyTypes = Union[RSAPublicKey, EllipticCurvePublicKey] + ++IMA_KEYRING_JSON_SCHEMA = { ++ "type": "object", ++ "required": ["keyids", "pubkeys"], ++ "properties": { ++ "meta": { ++ "type": "object", ++ "required": ["version"], ++ "properties": { ++ "version": {"type": "integer", "minimum": 1}, ++ }, ++ }, ++ "keyids": {"type": "array", "items": {"type": "integer"}}, ++ "pubkeys": {"type": "array", "items": {"type": "string"}}, ++ }, ++} ++ + """ + Tools for IMA file signature verification + """ +@@ -196,6 +213,12 @@ class ImaKeyring: + if not isinstance(obj, dict): + return None + ++ try: ++ jsonschema.validate(obj, IMA_KEYRING_JSON_SCHEMA) ++ except Exception as e: ++ logger.error("JSON from string is not a valid IMA Keyring : %s", e) ++ return None ++ + keyids = obj.get("keyids", []) + + for der_key, keyidv2 in ImaKeyring._base64_to_der_keylist(obj["pubkeys"], keyids): +diff --git a/keylime/ima/ima.py b/keylime/ima/ima.py +index 9fadf2c..1b197c3 100644 +--- a/keylime/ima/ima.py ++++ b/keylime/ima/ima.py +@@ -14,7 +14,7 @@ from keylime.common.algorithms import Hash + from keylime.dsse import dsse + from keylime.failure import Component, Failure + from keylime.ima import ast, file_signatures, ima_dm +-from keylime.ima.file_signatures import ImaKeyrings ++from keylime.ima.file_signatures import IMA_KEYRING_JSON_SCHEMA, ImaKeyrings + from keylime.ima.types import RuntimePolicyType + + logger = keylime_logging.init_logging("ima") +@@ -531,6 +531,17 @@ def validate_runtime_policy(runtime_policy: RuntimePolicyType) -> None: + """ + try: + jsonschema.validate(instance=runtime_policy, schema=RUNTIME_POLICY_SCHEMA) ++ verification_keys = runtime_policy.get("verification-keys", "") ++ if verification_keys: ++ # Verification keys is a string in JSON format. Parse it to verify ++ # against the schema ++ j = json.loads(verification_keys) ++ jsonschema.validate(instance=j, schema=IMA_KEYRING_JSON_SCHEMA) + except Exception as error: + msg = str(error).split("\n", 1)[0] + raise ImaValidationError(message=f"{msg}", code=400) from error ++ ++ ++def empty_policy() -> RuntimePolicyType: ++ """Return an empty runtime policy.""" ++ return copy.deepcopy(EMPTY_RUNTIME_POLICY) +diff --git a/keylime/policy/__init__.py b/keylime/policy/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/keylime/policy/create_mb_policy.py b/keylime/policy/create_mb_policy.py +new file mode 100644 +index 0000000..859e652 +--- /dev/null ++++ b/keylime/policy/create_mb_policy.py +@@ -0,0 +1,266 @@ ++""" ++Module to generate a valid measured boot policy. ++ ++Copyright 2021 Thore Sommer ++""" ++ ++import argparse ++import json ++import re ++import sys ++from typing import TYPE_CHECKING, Any, Dict, List, Optional ++ ++from keylime.mba.elparsing.tpm2_tools_elparser import parse_binary_bootlog ++from keylime.policy.logger import Logger ++ ++if TYPE_CHECKING: ++ # FIXME: how to make mypy and pylint happy here? ++ _SubparserType = argparse._SubParsersAction[argparse.ArgumentParser] # pylint: disable=protected-access ++else: ++ _SubparserType = Any ++ ++logger = Logger().logger() ++ ++ ++def event_to_sha256(event: Dict[str, Any]) -> Dict[str, Any]: ++ """Extract the sha256 digest from an event.""" ++ if "Digests" not in event: ++ return {} ++ ++ for digest in event["Digests"]: ++ if "AlgorithmId" in digest and digest["AlgorithmId"] == "sha256": ++ # We don't do any validation here, we just assume ++ # the provided information (digest) is correct. ++ return {"sha256": f"0x{digest['Digest']}"} ++ return {} ++ ++ ++def get_s_crtm(events: List[Dict[Any, str]]) -> Dict[str, Any]: ++ """Find the EV_S_CRTM_VERSION.""" ++ for event in events: ++ if "EventType" in event and event["EventType"] == "EV_S_CRTM_VERSION": ++ return {"scrtm": event_to_sha256(event)} ++ return {} ++ ++ ++def get_platform_firmware(events: List[Dict[str, Any]]) -> Dict[str, Any]: ++ """Get all platform specific files measured with EV_EFI_PLATFORM_FIRMWARE_BLOB/EV_EFI_PLATFORM_FIRMWARE_BLOB2 events.""" ++ out = [] ++ for event in events: ++ if "EventType" not in event: ++ continue ++ if event["EventType"] not in ["EV_EFI_PLATFORM_FIRMWARE_BLOB", "EV_EFI_PLATFORM_FIRMWARE_BLOB2"]: ++ continue ++ out.append(event_to_sha256(event)) ++ return {"platform_firmware": out} ++ ++ ++def variabledata_to_signature(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ++ """Convert VariableData entry from EV_EFI_VARIABLE_DRIVER_CONFIG to signature data.""" ++ out = [] ++ ++ for entry in data: ++ if "Keys" not in entry: ++ continue ++ for key in entry["Keys"]: ++ if "SignatureOwner" not in key or "SignatureData" not in key: ++ continue ++ out.append({"SignatureOwner": key["SignatureOwner"], "SignatureData": f"0x{key['SignatureData']}"}) ++ return out ++ ++ ++def get_keys(events: List[Dict[str, Any]]) -> Dict[str, List[Any]]: ++ """Get valid signatures for UEFI Secure Boot PK, KEK, DB and DBX.""" ++ out: Dict[str, List[Any]] = {"pk": [], "kek": [], "db": [], "dbx": []} ++ ++ for event in events: ++ if "EventType" not in event: ++ continue ++ if event["EventType"] != "EV_EFI_VARIABLE_DRIVER_CONFIG": ++ continue ++ if "Event" not in event or "UnicodeName" not in event["Event"]: ++ continue ++ event_name = event["Event"]["UnicodeName"].lower() ++ ++ if event_name in out: ++ data = None ++ if "VariableData" in event["Event"]: ++ data = event["Event"]["VariableData"] ++ ++ if data is not None: ++ out[event_name] = variabledata_to_signature(data) ++ ++ return out ++ ++ ++def get_kernel(events: List[Dict[str, Any]], secure_boot: bool) -> Dict[str, List[Dict[str, Any]]]: ++ """Extract digest for Shim, Grub, Linux Kernel and initrd.""" ++ out = [] ++ ++ # Some firmware implement the UEFI boot menu and other system components ++ # as UEFI applications, and those are measured in the boot chain. As we ++ # currently have no reference values for those, we will ignore them for ++ # now. ++ # Workaround from: https://github.com/Lernstick/Lernstick-Bridge/blob/6defec/measured_boot/lernstick_policy.py#L89 ++ ++ uefi_app_pattern = re.compile(r"FvVol\(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\)/FvFile\(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\)") ++ for event in events: ++ if event.get("EventType") != "EV_EFI_BOOT_SERVICES_APPLICATION": ++ continue ++ ++ if uefi_app_pattern.match(event["Event"].get("DevicePath")): ++ continue ++ ++ out.append(event_to_sha256(event)) ++ ++ kernel = {} ++ if secure_boot: ++ if len(out) not in [3, 4]: ++ logger.error("Expected 3 different UEFI applications to be booted (Shim, Grub, Linux); got: %s", len(out)) ++ return {} ++ kernel["kernel_authcode_sha256"] = out[2]["sha256"] ++ else: ++ if len(out) != 2: ++ logger.error("Expected 2 different UEFI applications to be booted (Shim, Grub); got: %s", len(out)) ++ return {} ++ ++ kernel["shim_authcode_sha256"] = out[0]["sha256"] ++ kernel["grub_authcode_sha256"] = out[1]["sha256"] ++ ++ for event in events: ++ if event.get("EventType") != "EV_IPL" or event.get("PCRIndex") != 9: ++ continue ++ ++ if "initrd" in event["Event"].get("String") or "initramfs" in event["Event"].get("String"): ++ kernel["initrd_plain_sha256"] = event_to_sha256(event)["sha256"] ++ break ++ ++ if not secure_boot: ++ logger.info("Adding plain sha256 digest of vmlinuz for GRUB to reference state, because SecureBoot is disabled") ++ ++ for event in events: ++ if event.get("EventType") != "EV_IPL" or event.get("PCRIndex") != 9: ++ continue ++ ++ if "vmlinuz" in event["Event"].get("String"): ++ kernel["vmlinuz_plain_sha256"] = event_to_sha256(event)["sha256"] ++ break ++ ++ for event in events: ++ if event["EventType"] != "EV_IPL" or event.get("PCRIndex") != 8: ++ continue ++ ++ if "kernel_cmdline" in event["Event"].get("String"): ++ kernel["kernel_cmdline"] = re.escape(event["Event"]["String"][len("kernel_cmdline: ") :]) ++ break ++ return {"kernels": [kernel]} ++ ++ ++def get_mok(events: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]: ++ """Extract digest for MokList and MokListX.""" ++ out: Dict[str, List[Dict[str, Any]]] = {"mokdig": [], "mokxdig": []} ++ for event in events: ++ if "EventType" not in event or event["EventType"] != "EV_IPL": ++ continue ++ ++ if "Event" not in event or "String" not in event["Event"]: ++ continue ++ if event["Event"]["String"] == "MokList": ++ out["mokdig"].append(event_to_sha256(event)) ++ elif event["Event"]["String"] == "MokListX": ++ out["mokxdig"].append(event_to_sha256(event)) ++ return out ++ ++ ++def secureboot_enabled(events: List[Dict[str, Any]]) -> bool: ++ """Check if Secure Boot is enabled.""" ++ for event in events: ++ if "EventType" not in event or "Event" not in event: ++ continue ++ if event["EventType"] == "EV_EFI_VARIABLE_DRIVER_CONFIG" and event["Event"].get("UnicodeName") == "SecureBoot": ++ if "VariableData" in event["Event"] and "Enabled" in event["Event"]["VariableData"]: ++ ret: bool = event["Event"]["VariableData"]["Enabled"] == "Yes" ++ return ret ++ ++ logger.warning("SecureBoot state could not be determined; assuming disabled") ++ return False ++ ++ ++def get_arg_parser(create_parser: _SubparserType, parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser: ++ """Perform the setup of the command-line arguments for this module.""" ++ mbref_p = create_parser.add_parser( ++ "measured-boot", help="create measured boot reference state policy", parents=[parent_parser] ++ ) ++ ++ mbref_p.add_argument( ++ "-e", ++ "--eventlog-file", ++ type=argparse.FileType("rb"), ++ default=sys.stdin, ++ required=True, ++ help="Binary UEFI eventlog (Normally /sys/kernel/security/tpm0/binary_bios_measurements)", ++ ) ++ mbref_p.add_argument( ++ "--without-secureboot", ++ "-i", ++ action="store_true", ++ help="Set if you want to create a measured boot reference state policy without SecureBoot (only MeasuredBoot)", ++ ) ++ mbref_p.add_argument( ++ "-o", ++ "--output", ++ type=argparse.FileType("w"), ++ default=sys.stdout, ++ help="Output path for the generated measured boot policy", ++ ) ++ ++ mbref_p.set_defaults(func=create_mb_refstate) ++ return mbref_p ++ ++ ++def create_mb_refstate(args: argparse.Namespace) -> Optional[Dict[str, object]]: ++ """Create a measured boot reference state.""" ++ try: ++ log_bin = args.eventlog_file.read() ++ ++ failure, log_data = parse_binary_bootlog(log_bin) ++ if failure or not log_data: ++ logger.error( ++ "Parsing of binary boot measurements (%s) failed with: %s", ++ args.eventlog_file.name, ++ list(map(lambda x: x.context, failure.events)), ++ ) ++ return None ++ except Exception as exc: ++ logger.error("Parsing of binary boot measurements (%s) failed with: %s", args.eventlog_file.name, exc) ++ return None ++ ++ events = log_data.get("events") ++ if not events: ++ logger.error("No events found on binary boot measurements log") ++ return None ++ ++ has_secureboot = secureboot_enabled(events) ++ if not has_secureboot and not args.without_secureboot: ++ logger.error("Provided eventlog has SecureBoot disabled, but -i flag was not set") ++ return None ++ ++ if has_secureboot and args.without_secureboot: ++ logger.warning( ++ "-i/--without-secureboot was set to create a reference state without SecureBoot, but the provided eventlog has SecureBoot enabled. Ignoring this flag" ++ ) ++ ++ mb_refstate = { ++ "has_secureboot": has_secureboot, ++ "scrtm_and_bios": [ ++ { ++ **get_s_crtm(events), ++ **get_platform_firmware(events), ++ } ++ ], ++ **get_keys(events), ++ **get_mok(events), ++ **get_kernel(events, has_secureboot), ++ } ++ json.dump(mb_refstate, args.output) ++ return mb_refstate +diff --git a/keylime/policy/create_runtime_policy.py b/keylime/policy/create_runtime_policy.py +new file mode 100644 +index 0000000..535703d +--- /dev/null ++++ b/keylime/policy/create_runtime_policy.py +@@ -0,0 +1,1068 @@ ++""" ++Module to assist with creating runtime policies. ++""" ++ ++import argparse ++import binascii ++import datetime ++import json ++import os ++import pathlib ++from concurrent.futures import ThreadPoolExecutor, as_completed ++from importlib import util ++from typing import TYPE_CHECKING, Any, Dict, List, Optional, TextIO, Tuple ++ ++import psutil ++ ++from keylime import cert_utils ++from keylime.common import algorithms, validators ++from keylime.ima import file_signatures, ima ++from keylime.ima.types import RuntimePolicyType ++from keylime.policy import initrd ++from keylime.policy.logger import Logger ++from keylime.policy.utils import merge_lists, merge_maplists ++ ++_has_rpm = util.find_spec("rpm") is not None ++ ++rpm_repo: Any ++if _has_rpm: ++ from keylime.policy import rpm_repo ++ ++ ++if TYPE_CHECKING: ++ # FIXME: how to make mypy and pylint happy here? ++ _SubparserType = argparse._SubParsersAction[argparse.ArgumentParser] # pylint: disable=protected-access ++else: ++ _SubparserType = Any ++ ++logger = Logger().logger() ++ ++# We use /dev/null to indicate an empty ima measurement list. ++EMPTY_IMA_MEASUREMENT_LIST = "/dev/null" ++IMA_MEASUREMENT_LIST = "/sys/kernel/security/ima/ascii_runtime_measurements" ++ ++IGNORED_KEYRINGS: List[str] = [] ++DEFAULT_FILE_DIGEST_ALGORITHM = algorithms.Hash.SHA256 ++ ++# This is to follow IMA default ++# See in keylime/ima/ima.py ++DEFAULT_IMA_TEMPLATE_DIGEST_ALGORITHM = algorithms.Hash.SHA1 ++ ++# This is used to indicate that the algorithm guessing identified a valid output ++# length, but it corresponds to more than one supported algorithm ++SHA256_OR_SM3 = "sha256_or_sm3_256" ++ ++# This is used to indicate that the algorithm guessing could not identify the ++# algorithm from the output length ++INVALID_ALGORITHM = "invalid" ++ ++BASE_EXCLUDE_DIRS: List[str] = [ ++ "/sys", ++ "/run", ++ "/proc", ++ "/lost+found", ++ "/dev", ++ "/media", ++ "/snap", ++ "/mnt", ++ "/var", ++ "/tmp", ++] ++ ++ ++def exclude_dirs_based_on_rootfs(dirs_to_exclude: List[str]) -> List[str]: ++ """ ++ Build a list of directories to exclude, as they don't match the root filesystem. ++ ++ :param dirs_to_exclude: list of directories to exclude ++ :return: a list of strings, that contains directories we need to exclude ++ """ ++ rootfs = None ++ to_exclude = [] ++ # First we identify the root filesystem ++ disk_part = psutil.disk_partitions(all=True) ++ for pp in disk_part: ++ if pp.mountpoint == "/": ++ rootfs = pp.fstype ++ break ++ ++ # Now we select mountpoints to exclude ++ for pp in disk_part: ++ if pp.fstype != rootfs: ++ to_exclude.append(pp.mountpoint) ++ logger.debug( ++ "exclude_dirs_based_on_rootfs(): excluding %s (fstype %s); rootfs: %s", ++ pp.mountpoint, ++ pp.fstype, ++ rootfs, ++ ) ++ ++ trimmed_dirs = [] ++ # Finally, let's trim this list down based on the existing ++ # dirs_to_exclude. ++ for dir_e in to_exclude: ++ matched = False ++ for cur in dirs_to_exclude: ++ if dir_e.startswith(cur): ++ matched = True ++ logger.debug("exclude_dirs_based_on_rootfs(): %s already covered by %s; skipping", dir_e, cur) ++ continue ++ if not matched: ++ trimmed_dirs.append(dir_e) ++ logger.debug("exclude_dirs_based_on_rootfs(): excluded dirs: %s", trimmed_dirs) ++ return trimmed_dirs ++ ++ ++def _calculate_digest(prefix: str, fpath: str, alg: str, remove_prefix: bool) -> Tuple[bool, str, str]: ++ """ ++ Filter the specified file to decide if we should calculate its digest. ++ ++ This method should skip non-files (e.g. sockets) and directories, ++ as well as files not owned by root (uid 0). ++ ++ The return is a tuple with 3 values: ++ 1) a boolean indicating the success of the operation ++ to calculate its checksum, as well as ++ 2) the file path path (with the prefix removed, if required), and ++ 3) its associated digest. ++ ++ :param prefix: str indicating the path prefix, the "root" directory for the file ++ :param fpath: str inficating the path for the file ++ :param alg: int, digest algorithm ++ :param remove_prefix: boolean that indicates whether the displayed file should have its prefix removed ++ :return: Tuple of boolean, str and str, indicating whether this method calculated the digest, the file name and its digest, respectively ++ """ ++ ++ # Let's take care of removing the prefix, if requested. ++ fkey = fpath ++ if remove_prefix and (str(prefix) != "/"): ++ fkey = fkey[len(str(prefix)) :] ++ ++ # IMA replaces spaces with underscores in the log, so we do ++ # that here as well, for them to match. ++ fkey = fkey.replace(" ", "_") ++ ++ return True, fkey, algorithms.Hash(alg).file_digest(fpath) ++ ++ ++def _get_all_files( ++ root_dir: str, prefix: str, dirs_to_exclude: Optional[List[str]] = None, only_owned_by_root: bool = False ++) -> List[str]: ++ """Find all files inside a directory recursively, skipping the directories ++ marked to be excluded and files not owned by root, if requested. ++ It is expected that root_dir and prefix are absolute paths.""" ++ ++ if not dirs_to_exclude: ++ dirs_to_exclude = [] ++ ++ paths = set() ++ subdirs = [] ++ with os.scandir(root_dir) as it: ++ for entry in it: ++ # Skip symlinks ++ if entry.is_symlink(): ++ continue ++ ++ # If the entry is a file, add to the set of paths ++ if entry.is_file(): ++ # Skipping files not not owned by root (uid 0), if requested. ++ if only_owned_by_root: ++ try: ++ st = os.stat(entry.path, follow_symlinks=False) ++ if st.st_uid != 0: ++ continue ++ except FileNotFoundError: ++ logger.debug("Could not find '%s', skipping", entry.path) ++ continue ++ ++ paths.add(entry.path) ++ continue ++ ++ # For directories, add the entry to the subdirectories ++ if entry.is_dir(): ++ relpath = entry.path ++ if prefix != "/" and entry.path.startswith(prefix): ++ relpath = entry.path[len(prefix) :] ++ ++ # Skip directories marked to be excluded from the search ++ if relpath in dirs_to_exclude: ++ logger.debug("Skipping '%s' that matches a directory path to exclude", entry.path) ++ continue ++ ++ subdirs.append(entry.path) ++ ++ for d in subdirs: ++ paths.update(_get_all_files(d, prefix, dirs_to_exclude, only_owned_by_root)) ++ ++ return list(paths) ++ ++ ++def path_digests( ++ *fdirpath: str, ++ alg: str = DEFAULT_FILE_DIGEST_ALGORITHM, ++ dirs_to_exclude: Optional[List[str]] = None, ++ digests: Optional[Dict[str, List[str]]] = None, ++ remove_prefix: bool = True, ++ only_owned_by_root: bool = False, ++ match_rootfs: bool = False, ++) -> Dict[str, List[str]]: ++ """ ++ Calculate the digest of every file under the specified directory. ++ ++ :param *fdirpath: the directory that contains the files to calculate their digests ++ :param alg: the algorithm to use for the digests. The default is SHA-256 ++ :param dirs_to_exclude: a list of directories that should be excluded from the checksum calculation ++ :param digests: the map of files and their set of digests that will be filled by this method ++ :param remove_prefix: a flag to indicate whether the files should have their prefixes removed when added to the resulting map ++ :param only_owned_by_root: a flag to indicate it should calculate the digests only for files owned by root. Default is False ++ :param match_rootfs: a flag to indicate we want files to match the filesystem of the root fs ++ :return: a mapping of a file (str) with a set of checksums (str) ++ """ ++ if digests is None: ++ digests = {} ++ ++ absfpath = os.path.abspath(str(*fdirpath)) ++ if not os.path.isdir(absfpath): ++ logger.error("Invalid path, %s is not a directory", absfpath) ++ return digests ++ ++ # Let's first check if the root is not marked to be excluded. ++ if dirs_to_exclude is None: ++ dirs_to_exclude = [] ++ ++ if match_rootfs: ++ dirs_to_exclude.extend(exclude_dirs_based_on_rootfs(dirs_to_exclude)) ++ ++ # Get the paths to all files contained in the directory, recursively ++ paths = _get_all_files(absfpath, absfpath, dirs_to_exclude, only_owned_by_root) ++ ++ logger.debug("obtained %d paths", len(paths)) ++ ++ futures = [] ++ with ThreadPoolExecutor() as executor: ++ for p in paths: ++ futures.append( ++ executor.submit( ++ _calculate_digest, ++ absfpath, ++ pathlib.Path(p).as_posix(), ++ alg, ++ remove_prefix, ++ ) ++ ) ++ ++ for f in as_completed(futures): ++ try: ++ ok, fkey, fdigest = f.result() ++ if ok: ++ if fkey not in digests: ++ digests[fkey] = [] ++ digests[fkey].append(fdigest) ++ except Exception: ++ logger.debug("Failed to calculate a digest") ++ continue ++ ++ return digests ++ ++ ++def print_digests_legacy_format(digests: Dict[str, List[str]], outfile: TextIO) -> None: ++ """ ++ Print the digest dict using the legacy allowlist format. ++ ++ Helper to print the digests dictionary in the format ++ used by the old allowlist, which is basically the output ++ of the sha256sum utility, i.e. ++ ++ :param digests: a dictionary that maps a file with a set of checksums ++ :return: None ++ """ ++ # Print the boot_aggregate first, if available ++ boot_agg_fname = "boot_aggregate" ++ if boot_agg_fname in digests: ++ for digest in digests[boot_agg_fname]: ++ print(f"{digest} {boot_agg_fname}", file=outfile) ++ ++ for fname, fdigests in digests.items(): ++ if fname == boot_agg_fname: ++ continue ++ for digest in fdigests: ++ print(f"{digest} {fname}", file=outfile) ++ ++ ++def process_ima_sig_ima_ng_line(line: str) -> Tuple[str, str, str, bool]: ++ """ ++ Process a single line "ima", "ima-ng" and "ima-sig" IMA log . ++ ++ :param line: str that has the line to be processed ++ :return: a tuple containing 3 strings and a bool. The strings are, in ++ order: 1) the hash algorithm used, 2) the checksum, 3) either ++ the file path or signature, depending on the template, and 4) ++ a boolean indicating whether the method succeeded ++ """ ++ ret = ("", "", "", False) ++ if not line: ++ return ret ++ ++ pieces = line.split(" ") ++ if len(pieces) < 5: ++ errmsg = f"Skipping line that was split into {len(pieces)} pieces, expected at least 5: {line}" ++ logger.debug(errmsg) ++ return ret ++ ++ if pieces[2] not in ("ima-sig", "ima-ng", "ima"): ++ errmsg = f"skipping line that uses a template ({pieces[2]}) not in ('ima-sig', 'ima-ng', 'ima'): {line}" ++ logger.debug(errmsg) ++ return ret ++ ++ csum_hash = pieces[3].split(":") ++ ++ alg = "" ++ csum = "" ++ if len(csum_hash) == 2: ++ alg = csum_hash[0] ++ csum = csum_hash[1] ++ ++ # Check if the algorithm is one of the supported algorithms ++ if not algorithms.Hash.is_recognized(alg): ++ return ret ++ ++ # Check that the length of the digest matches the expected length ++ if len(csum) != algorithms.Hash(alg).hexdigest_len(): ++ return ret ++ ++ # Old "ima" template. ++ else: ++ csum = csum_hash[0] ++ # Lets attempt to detect the alg by len. ++ alg = _get_digest_algorithm_from_hex(csum) ++ if alg == INVALID_ALGORITHM: ++ errmsg = f"skipping line that using old 'ima' template because it was not possible to identify the hash alg: {line}" ++ logger.debug(errmsg) ++ return ret ++ ++ path = pieces[4].rstrip("\n") ++ return alg, csum, path, True ++ ++ ++def boot_aggregate_parse(line: str) -> Tuple[str, str]: ++ """ ++ Parse the boot aggregate from the provided line. ++ ++ :param line: str with the line to be parsed ++ :return: tuple with two values, the algorithm used and the digest of the ++ boot aggregate ++ """ ++ alg, digest, fpath, ok = process_ima_sig_ima_ng_line(line) ++ if not ok or fpath != "boot_aggregate": ++ return INVALID_ALGORITHM, "" ++ return alg, digest ++ ++ ++def boot_aggregate_from_file( ++ ascii_runtime_file: str = IMA_MEASUREMENT_LIST, ++) -> Tuple[str, str]: ++ """ ++ Return the boot aggregate indicated in the specified file. ++ ++ :param ascii_runtime_file: a string indicating the file where we should read the boot aggregate from. The default is /sys/kernel/security/ima/ascii_runtime_measurements. ++ :return: str, the boot aggregate ++ """ ++ line = "" ++ with open(ascii_runtime_file, "r", encoding="UTF-8") as f: ++ line = f.readline().strip("\n") ++ ++ return boot_aggregate_parse(line) ++ ++ ++def list_initrds(basedir: str = "/boot") -> List[str]: ++ """ ++ Return a list of initrds found in the indicated base dir. ++ ++ :param basedir: str, the directory where to find the initrds. Default is /boot ++ :return: a list of filenames starting with "initr" ++ """ ++ initrds = [] ++ for f in os.scandir(basedir): ++ if f.is_file() and pathlib.Path(f.path).name.startswith("initr"): ++ initrds.append(pathlib.Path(f.path).as_posix()) ++ return initrds ++ ++ ++def process_flat_allowlist(allowlist_file: str, hashes_map: Dict[str, List[str]]) -> Tuple[Dict[str, List[str]], bool]: ++ """Process a flat allowlist file.""" ++ ret = True ++ try: ++ with open(allowlist_file, "r", encoding="UTF-8") as fobj: ++ while True: ++ line = fobj.readline() ++ if not line: ++ break ++ line = line.strip() ++ if len(line) == 0: ++ continue ++ pieces = line.split(None, 1) ++ if not len(pieces) == 2: ++ logmsg = f"Skipping line that was split into {len(pieces)} parts, expected 2: {line}" ++ logger.debug(logmsg) ++ continue ++ ++ (checksum_hash, path) = pieces ++ ++ # IMA replaces spaces with underscores in the log, so we do ++ # that here as well, for them to match. ++ path = path.replace(" ", "_") ++ hashes_map.setdefault(path, []).append(checksum_hash) ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred while accessing the allowlist: {ex}" ++ logger.error(errmsg) ++ ret = False ++ return hashes_map, ret ++ ++ ++def get_arg_parser(create_parser: _SubparserType, parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser: ++ """Perform the setup of the command-line arguments for this module.""" ++ runtime_p = create_parser.add_parser("runtime", help="create runtime policies", parents=[parent_parser]) ++ fs_group = runtime_p.add_argument_group("runtime policy from filesystem") ++ ++ if _has_rpm: ++ repo_group = runtime_p.add_argument_group("runtime policy from repositories") ++ repo_group.add_argument( ++ "--local-rpm-repo", dest="local_rpm_repo", type=pathlib.Path, help="Local RPM repo directory" ++ ) ++ repo_group.add_argument( ++ "--remote-rpm-repo", ++ dest="remote_rpm_repo", ++ help="Remote RPM repo URL", ++ ) ++ ++ fs_group.add_argument( ++ "--algo", ++ dest="algo", ++ choices=list(algorithms.Hash), ++ required=False, ++ help="checksum algorithm to be used. If not specified, it will attempt to use the same algorithm the boot aggregate uses or fallback to sha256, otherwise", ++ default="", ++ ) ++ fs_group.add_argument( ++ "--ramdisk-dir", ++ dest="ramdisk_dir", ++ required=False, ++ help="path to where the initrds are located, e.g.: /boot", ++ default="", ++ ) ++ fs_group.add_argument( ++ "--rootfs", ++ dest="rootfs", ++ required=False, ++ help="path to the root filesystem, e.g.: /", ++ default="", ++ ) ++ fs_group.add_argument( ++ "-s", ++ "--skip-path", ++ dest="skip_path", ++ required=False, ++ help="comma-separated list of directories; files found there will not have their checksums calculated", ++ default="", ++ ) ++ ++ runtime_p.add_argument( ++ "-o", ++ "--output", ++ dest="output", ++ required=False, ++ help="output file (defaults to stdout)", ++ default="/dev/stdout", ++ ) ++ runtime_p.add_argument( ++ "-p", ++ "--base-policy", ++ dest="base_policy", ++ required=False, ++ help="Merge new data into the given JSON runtime policy", ++ default="", ++ ) ++ runtime_p.add_argument( ++ "-k", ++ "--keyrings", ++ dest="get_keyrings", ++ required=False, ++ help="Create keyrings policy entries", ++ action="store_true", ++ default=False, ++ ) ++ runtime_p.add_argument( ++ "-b", ++ "--ima-buf", ++ dest="get_ima_buf", ++ required=False, ++ help="Process ima-buf entries other than those related to keyrings", ++ action="store_true", ++ default=False, ++ ) ++ runtime_p.add_argument( ++ "-a", ++ "--allowlist", ++ dest="allowlist", ++ required=False, ++ help="Read checksums from the given plain-text allowlist", ++ default="", ++ ) ++ runtime_p.add_argument( ++ "-e", ++ "--excludelist", ++ dest="exclude_list_file", ++ required=False, ++ help="An IMA exclude list file whose contents will be added to the policy", ++ default="", ++ ) ++ runtime_p.add_argument( ++ "-m", ++ "--ima-measurement-list", ++ dest="ima_measurement_list", ++ required=False, ++ nargs="?", ++ help="Use an IMA measurement list for hash, keyring, and critical " ++ f"data extraction. If a list is not specified, it uses {IMA_MEASUREMENT_LIST}. Use " ++ f"{EMPTY_IMA_MEASUREMENT_LIST} for an empty list.", ++ default=EMPTY_IMA_MEASUREMENT_LIST, ++ ) ++ runtime_p.add_argument( ++ "--ignored-keyrings", ++ dest="ignored_keyrings", ++ action="append", ++ required=False, ++ help="Ignores the given keyring; this option may be passed multiple times", ++ default=IGNORED_KEYRINGS, ++ ) ++ runtime_p.add_argument( ++ "--add-ima-signature-verification-key", ++ action="append", ++ dest="ima_signature_keys", ++ default=[], ++ help="Add the given IMA signature verification key to the Keylime-internal 'tenant_keyring'; " ++ "the key should be an x509 certificate in DER or PEM format but may also be a public or " ++ "private key file; this option may be passed multiple times", ++ ) ++ runtime_p.add_argument( ++ "--show-legacy-allowlist", ++ dest="legacy_allowlist", ++ help="Instead of the actual policy, display only the digests in the legacy allowlist format", ++ action="store_true", ++ default=False, ++ ) ++ ++ runtime_p.add_argument( ++ "-v", ++ "--verbose", ++ help="Set log level to DEBUG; may be helpful when diagnosing issues", ++ action="store_true", ++ required=False, ++ default=False, ++ ) ++ ++ runtime_p.set_defaults(func=create_runtime_policy) ++ return runtime_p ++ ++ ++def update_base_policy(base_policy_file: str) -> Optional[RuntimePolicyType]: ++ """Update the base policy to the latest policy format""" ++ ++ policy = None ++ ++ try: ++ with open(base_policy_file, "r", encoding="UTF-8") as fobj: ++ basepol = fobj.read() ++ ++ # Load as a plain JSON without type. Do not assume it is a valid policy ++ base_policy = json.loads(basepol) ++ ++ # Get an instance of the latest policy format to import the data ++ policy = ima.empty_policy() ++ ++ # Cherry-pick from base policy what is supported and merge into policy ++ policy["digests"] = merge_maplists(policy["digests"], base_policy.get("digests", {})) ++ policy["excludes"] = merge_lists(policy["excludes"], base_policy.get("excludes", [])) ++ policy["keyrings"] = merge_maplists(policy["keyrings"], base_policy.get("keyrings", {})) ++ policy["ima-buf"] = merge_maplists(policy["ima-buf"], base_policy.get("ima-buf", {})) ++ ++ policy["ima"]["log_hash_alg"] = base_policy.get("ima", {}).get( ++ "log_hash_alg", DEFAULT_IMA_TEMPLATE_DIGEST_ALGORITHM ++ ) ++ ignored_keyrings = base_policy.get("ima", {}).get("ignored_keyrings", []) ++ policy["ima"]["ignored_keyrings"] = merge_lists(policy["ima"]["ignored_keyrings"], ignored_keyrings) ++ policy["verification-keys"] = base_policy.get("verification-keys", "") ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred while loading the policy: {ex}" ++ logger.error(errmsg) ++ return None ++ except json.decoder.JSONDecodeError as ex: ++ errmsg = f"An error occurred while parsing a JSON object from file {base_policy_file}: {ex}" ++ logger.error(errmsg) ++ return None ++ ++ # Validate that the resulting policy is a valid policy ++ try: ++ ima.validate_runtime_policy(policy) ++ except ima.ImaValidationError as ex: ++ errmsg = f"Could not convert the provided base policy to a valid runtime policy: {ex}" ++ logger.error(errmsg) ++ return None ++ ++ return policy ++ ++ ++def get_hashes_from_measurement_list( ++ ima_measurement_list_file: str, hashes_map: Dict[str, List[str]] ++) -> Tuple[Dict[str, List[str]], bool]: ++ """Get the hashes from the IMA measurement list file.""" ++ ret = True ++ try: ++ with open(ima_measurement_list_file, "r", encoding="UTF-8") as fobj: ++ while True: ++ line = fobj.readline() ++ if not line: ++ break ++ pieces = line.split(" ") ++ if len(pieces) < 5: ++ errmsg = f"Skipping line that was split into {len(pieces)} pieces, expected at least 5: {line}" ++ logger.debug(errmsg) ++ continue ++ if pieces[2] not in ("ima-sig", "ima-ng"): ++ continue ++ checksum_hash = pieces[3].split(":")[1] ++ path = pieces[4].rstrip("\n") ++ hashes_map.setdefault(path, []).append(checksum_hash) ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred: {ex}" ++ logger.error(errmsg) ++ ret = False ++ return hashes_map, ret ++ ++ ++def process_exclude_list_line(line: str) -> Tuple[str, bool]: ++ """Validate an exclude list line.""" ++ if not line: ++ return "", True ++ ++ _, validator_msg = validators.valid_exclude_list([line]) ++ if validator_msg: ++ errmsg = f"Bad IMA exclude list rule '{line}': {validator_msg}" ++ logger.warning(errmsg) ++ return "", False ++ ++ return line, True ++ ++ ++def process_exclude_list_file(exclude_list_file: str, excludes: List[str]) -> Tuple[List[str], bool]: ++ """Add the contents of the IMA exclude list file to the given list.""" ++ ret = True ++ try: ++ with open(exclude_list_file, "r", encoding="UTF-8") as fobj: ++ while True: ++ line = fobj.readline() ++ if not line: ++ break ++ ++ line, ok = process_exclude_list_line(line.strip()) ++ if not ok: ++ return [], False ++ # Skip empty lines. ++ if len(line) == 0: ++ continue ++ ++ excludes.append(line) ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred: {ex}" ++ logger.error(errmsg) ++ ret = False ++ return excludes, ret ++ ++ ++def get_rootfs_digests( ++ rootfs: str, skip_path: Optional[str], hashes_map: Dict[str, List[str]], algo: str ++) -> Dict[str, List[str]]: ++ """Calculate digests for files under a directory.""" ++ ++ abs_rootfs = os.path.abspath(rootfs) ++ ++ dirs_to_exclude = [] ++ ++ # Preprocess the directories to skip, ignoring those outside the rootfs and ++ # removing the prefix from those that are under the rootfs ++ if skip_path: ++ for d in skip_path.split(","): ++ abs_d = os.path.abspath(d) ++ ++ if pathlib.PurePath(abs_rootfs).is_relative_to(abs_d): ++ # Okay, nothing to do here, since the root is marked to be ++ # skipped ++ logger.debug("The rootfs %s is excluded because it matches or is within %s", abs_rootfs, abs_d) ++ return {} ++ ++ if not pathlib.PurePath(d).is_relative_to(rootfs): ++ logger.warning("Ignoring directory '%s' that should be skipped, but it is not under '%s'", d, rootfs) ++ else: ++ # Remove the prefix to make it consistent with other excluded ++ # directories ++ if abs_rootfs != "/" and abs_d.startswith(abs_rootfs): ++ dirs_to_exclude.append(abs_d[len(abs_rootfs) :]) ++ ++ dirs_to_exclude.extend(BASE_EXCLUDE_DIRS) ++ hashes_map = path_digests( ++ rootfs, ++ dirs_to_exclude=dirs_to_exclude, ++ digests=hashes_map, ++ alg=algo, ++ only_owned_by_root=True, ++ match_rootfs=True, ++ ) ++ return hashes_map ++ ++ ++def get_initrds_digests(initrd_dir: str, hashes_map: Dict[str, List[str]], algo: str) -> Dict[str, List[str]]: ++ """Calculate digests for files from initrds from the given directory.""" ++ for initrd_file in list_initrds(initrd_dir): ++ initrd_data = initrd.InitrdReader(initrd_file) ++ hashes_map = path_digests(initrd_data.contents(), remove_prefix=True, digests=hashes_map, alg=algo) ++ return hashes_map ++ ++ ++def process_ima_buf_in_measurement_list( ++ ima_measurement_list_file: str, ++ ignored_keyrings: List[str], ++ get_keyrings: bool, ++ keyrings_map: Dict[str, List[str]], ++ get_ima_buf: bool, ++ ima_buf_map: Dict[str, List[str]], ++) -> Tuple[Dict[str, List[str]], Dict[str, List[str]], bool]: ++ """ ++ Process ima-buf entries. ++ ++ Process ima-buf entries and get the keyrings map from key-related entries ++ and ima_buf map from the rest. ++ """ ++ ret = True ++ try: ++ with open(ima_measurement_list_file, "r", encoding="UTF-8") as fobj: ++ while True: ++ line = fobj.readline() ++ if not line: ++ break ++ pieces = line.split(" ") ++ if len(pieces) != 6: ++ errmsg = f"Skipping line that was split into {len(pieces)} pieces, expected 6: {line}" ++ logger.debug(errmsg) ++ continue ++ if pieces[2] not in ("ima-buf"): ++ continue ++ checksum_hash = pieces[3].split(":")[1] ++ path = pieces[4] ++ ++ bindata = None ++ try: ++ bindata = binascii.unhexlify(pieces[5].strip()) ++ except binascii.Error: ++ pass ++ ++ # check whether buf's bindata contains a key; if so, we will only ++ # append it to 'keyrings', never to 'ima-buf' ++ if bindata and cert_utils.is_x509_cert(bindata): ++ if path in ignored_keyrings or not get_keyrings: ++ continue ++ ++ keyrings_map.setdefault(path, []).append(checksum_hash) ++ continue ++ ++ if get_ima_buf: ++ ima_buf_map.setdefault(path, []).append(checksum_hash) ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred: {ex}" ++ logger.error(errmsg) ++ ret = False ++ return keyrings_map, ima_buf_map, ret ++ ++ ++def process_signature_verification_keys(verification_keys: List[str], policy: RuntimePolicyType) -> RuntimePolicyType: ++ """Add the given keys (x509 certificates) to keyring.""" ++ if not verification_keys: ++ return policy ++ ++ verification_key_list = None ++ if policy.get("verification-keys"): ++ keyring = file_signatures.ImaKeyring().from_string(policy["verification-keys"]) ++ if not keyring: ++ logger.error("Could not create IMA Keyring from JSON") ++ else: ++ keyring = file_signatures.ImaKeyring() ++ ++ if keyring: ++ for key in verification_keys: ++ try: ++ pubkey, keyidv2 = file_signatures.get_pubkey_from_file(key) ++ if not pubkey: ++ errmsg = f"File '{key}' is not a file with a key" ++ logger.error(errmsg) ++ else: ++ keyring.add_pubkey(pubkey, keyidv2) ++ except ValueError as e: ++ errmsg = f"File '{key}' does not have a supported key: {e}" ++ logger.error(errmsg) ++ ++ verification_key_list = keyring.to_string() ++ ++ if verification_key_list: ++ policy["verification-keys"] = verification_key_list ++ ++ return policy ++ ++ ++def _get_digest_algorithm_from_hex(hexstring: str) -> str: ++ """Try to identify the algorithm used to generate the provided value by length""" ++ for alg in list(algorithms.Hash): ++ if len(hexstring) == algorithms.Hash(alg).hexdigest_len(): ++ return str(alg) ++ return INVALID_ALGORITHM ++ ++ ++def _get_digest_algorithm_from_map_list(maplist: Dict[str, List[str]]) -> str: ++ """Assuming all digests in the policy uses the same algorithm, get the first ++ digest and try to obtain the algorithm from its length""" ++ ++ algo = INVALID_ALGORITHM ++ if maplist: ++ digest_list = next(iter(maplist.values())) ++ if digest_list: ++ digest = digest_list[0] ++ if digest: ++ algo = _get_digest_algorithm_from_hex(digest) ++ return algo ++ ++ ++def create_runtime_policy(args: argparse.Namespace) -> Optional[RuntimePolicyType]: ++ """Create a runtime policy from the input arguments.""" ++ policy = None ++ algo = None ++ base_policy_algo = None ++ allowlist_algo = None ++ ima_measurement_list_algo = None ++ ++ allowlist_digests: Dict[str, List[str]] = {} ++ ima_digests: Dict[str, List[str]] = {} ++ rootfs_digests: Dict[str, List[str]] = {} ++ ramdisk_digests: Dict[str, List[str]] = {} ++ local_rpm_digests: Dict[str, List[str]] = {} ++ remote_rpm_digests: Dict[str, List[str]] = {} ++ ++ # Adjust logging for verbose, if required. ++ if args.verbose: ++ Logger().enableVerbose() ++ ++ # If a base policy was provided, try to parse the file as JSON and import ++ # the values to the current policy format. ++ # Otherwise, use an empty policy as the base policy ++ if args.base_policy: ++ policy = update_base_policy(args.base_policy) ++ if not policy: ++ return None ++ ++ digests = policy.get("digests", {}) ++ ++ # Try to get the digest algorithm from the lenght of the digest ++ base_policy_algo = _get_digest_algorithm_from_map_list(digests) ++ ++ # If the guessed algorithm was SHA-256, it is actually ambiguous as it ++ # could be also SM3_256. Set as SHA256_OR_SM3 ++ if base_policy_algo == algorithms.Hash.SHA256: ++ base_policy_algo = SHA256_OR_SM3 ++ else: ++ policy = ima.empty_policy() ++ ++ if args.algo: ++ if not (args.ramdisk_dir or args.rootfs): ++ logger.warning( ++ "You need to specify at least one of --ramdisk-dir or --rootfs to use a custom digest algorithm" ++ ) ++ ++ if args.allowlist: ++ allowlist_digests, ok = process_flat_allowlist(args.allowlist, {}) ++ if not ok: ++ return None ++ ++ # Try to get the digest algorithm from the lenght of the digest ++ allowlist_algo = _get_digest_algorithm_from_map_list(allowlist_digests) ++ ++ # If the guessed algorithm was SHA-256, it is actually ambiguous as it ++ # could be also SM3_256. Set as SHA256_OR_SM3 ++ if allowlist_algo == algorithms.Hash.SHA256: ++ allowlist_algo = SHA256_OR_SM3 ++ ++ if args.ima_measurement_list != EMPTY_IMA_MEASUREMENT_LIST: ++ ima_list = args.ima_measurement_list ++ if ima_list is None: ++ # Use the default list, when one is not specified. ++ ima_list = IMA_MEASUREMENT_LIST ++ ++ logger.debug("Measurement list is %s", ima_list) ++ if not os.path.isfile(ima_list): ++ logger.warning("The IMA measurement list file '%s' does not seem to exist", ima_list) ++ return None ++ ++ try: ++ # If not set, try to get the digest algorithm from the boot_aggregate. ++ ima_measurement_list_algo, _ = boot_aggregate_from_file(ima_list) ++ except Exception: ++ ima_measurement_list_algo = INVALID_ALGORITHM ++ ++ ima_digests = {} ++ ima_digests, ok = get_hashes_from_measurement_list(ima_list, ima_digests) ++ if not ok: ++ return None ++ ++ if _has_rpm and rpm_repo: ++ if args.local_rpm_repo: ++ # FIXME: pass the IMA sigs as well. ++ local_rpm_digests = {} ++ local_rpm_digests, _imasigs, ok = rpm_repo.analyze_local_repo( ++ args.local_rpm_repo, digests=local_rpm_digests ++ ) ++ if not ok: ++ return None ++ if args.remote_rpm_repo: ++ # FIXME: pass the IMA sigs as well. ++ remote_rpm_digests = {} ++ remote_rpm_digests, _imasigs, ok = rpm_repo.analyze_remote_repo( ++ args.remote_rpm_repo, digests=remote_rpm_digests ++ ) ++ if not ok: ++ return None ++ ++ # Flag to indicate whether the operation should be aborted ++ abort = False ++ ++ # Use the same digest algorithm used by the provided inputs, following the ++ # priority order: --algo > base policy > allowlist > IMA measurement list ++ for a, source in [ ++ (args.algo, "--algo option"), ++ (base_policy_algo, "base policy"), ++ (allowlist_algo, "allowlist"), ++ (ima_measurement_list_algo, "IMA measurement list"), ++ ]: ++ if a == INVALID_ALGORITHM: ++ logger.warning("Invalid digest algorithm found in the %s", source) ++ abort = True ++ continue ++ ++ # Skip unset options ++ if not a: ++ continue ++ ++ # If the algorithm was previously set, check it against the algorithm ++ # from the current source ++ if algo: ++ if a != algo: ++ if algo == SHA256_OR_SM3: ++ if a in [algorithms.Hash.SHA256, algorithms.Hash.SM3_256]: ++ algo = a ++ logger.debug("Using digest algorithm '%s' obtained from the %s", a, source) ++ continue ++ ++ logger.warning( ++ "The digest algorithm in the %s does not match the previously set '%s' algorithm", source, algo ++ ) ++ abort = True ++ else: ++ if a not in (h for h in algorithms.Hash): ++ if a == SHA256_OR_SM3: ++ algo = a ++ else: ++ logger.warning("Invalid digests algorithm %s in the %s", a, source) ++ abort = True ++ continue ++ ++ algo = a ++ ++ if abort: ++ continue ++ ++ logger.debug("Using digest algorithm '%s' obtained from the %s", a, source) ++ ++ if abort: ++ logger.warning("Aborting operation") ++ return None ++ ++ if not algo: ++ logger.debug("Using default digest algorithm %s", DEFAULT_FILE_DIGEST_ALGORITHM) ++ algo = DEFAULT_FILE_DIGEST_ALGORITHM ++ ++ if args.ramdisk_dir: ++ ramdisk_digests = {} ++ ramdisk_digests = get_initrds_digests(args.ramdisk_dir, ramdisk_digests, algo) ++ ++ if args.rootfs: ++ rootfs_digests = {} ++ rootfs_digests = get_rootfs_digests(args.rootfs, args.skip_path, rootfs_digests, algo) ++ ++ # Combine all obtained digests ++ for digests in [ ++ allowlist_digests, ++ ima_digests, ++ rootfs_digests, ++ ramdisk_digests, ++ local_rpm_digests, ++ remote_rpm_digests, ++ ]: ++ if not digests: ++ continue ++ policy["digests"] = merge_maplists(policy["digests"], digests) ++ ++ if args.exclude_list_file: ++ policy["excludes"], ok = process_exclude_list_file(args.exclude_list_file, policy["excludes"]) ++ if not ok: ++ return None ++ ++ policy["ima"]["ignored_keyrings"].extend(args.ignored_keyrings) ++ if args.get_keyrings or args.get_ima_buf: ++ policy["keyrings"], policy["ima-buf"], ok = process_ima_buf_in_measurement_list( ++ args.ima_measurement_list, ++ policy["ima"]["ignored_keyrings"], ++ args.get_keyrings, ++ policy["keyrings"], ++ args.get_ima_buf, ++ policy["ima-buf"], ++ ) ++ if not ok: ++ return None ++ ++ policy = process_signature_verification_keys(args.ima_signature_keys, policy) ++ ++ # Ensure we only have unique values in lists ++ for key in ["digests", "ima-buf", "keyrings"]: ++ policy[key] = {k: sorted(list(set(v))) for k, v in policy[key].items()} # type: ignore ++ ++ policy["excludes"] = sorted(list(set(policy["excludes"]))) ++ policy["ima"]["ignored_keyrings"] = sorted(list(set(policy["ima"]["ignored_keyrings"]))) ++ ++ policy["meta"]["generator"] = ima.RUNTIME_POLICY_GENERATOR.LegacyAllowList ++ policy["meta"]["timestamp"] = str(datetime.datetime.now()) ++ ++ try: ++ ima.validate_runtime_policy(policy) ++ except ima.ImaValidationError as ex: ++ errmsg = f"Base policy is not a valid runtime policy: {ex}" ++ logger.error(errmsg) ++ return None ++ ++ try: ++ with open(args.output, "w", encoding="UTF-8") as fobj: ++ if args.legacy_allowlist: ++ print_digests_legacy_format(policy["digests"], fobj) ++ else: ++ jsonpolicy = json.dumps(policy) ++ fobj.write(jsonpolicy) ++ except (PermissionError, FileNotFoundError) as ex: ++ errmsg = f"An error occurred while writing the policy: %{ex}" ++ logger.error(errmsg) ++ return None ++ ++ return policy +diff --git a/keylime/policy/initrd.py b/keylime/policy/initrd.py +new file mode 100644 +index 0000000..0e103df +--- /dev/null ++++ b/keylime/policy/initrd.py +@@ -0,0 +1,353 @@ ++#!/usr/bin/env python3 ++ ++""" ++Module to help with extracting initrds. ++""" ++ ++import os ++import shutil ++import subprocess ++import tempfile ++from importlib import util ++from typing import IO, Dict ++ ++from keylime.policy.logger import Logger ++from keylime.policy.utils import Compression, Magic, read_bytes_from_open_file ++ ++_HAS_LIBARCHIVE = util.find_spec("libarchive") is not None ++if _HAS_LIBARCHIVE: ++ import libarchive # pylint: disable=import-error ++else: ++ libarchive = None ++ ++logger = Logger().logger() ++ ++ ++class InitrdReader: ++ """A helper class for reading the contents of an initrd. This is based on dracut's skipcpio.""" ++ ++ _initrd_file: str = "" ++ _contents_dir: str = "" ++ _flist: Dict[str, str] = {} ++ ++ # New ASCII format. CRC format is identical, except that ++ # the magic field is 070702 instead of 070701. ++ # struct cpio_newc_header { ++ # char c_magic[6]; ++ # char c_ino[8]; ++ # char c_mode[8]; ++ # char c_uid[8]; ++ # char c_gid[8]; ++ # char c_nlink[8]; ++ # char c_mtime[8]; ++ # char c_filesize[8]; ++ # char c_devmajor[8]; ++ # char c_devminor[8]; ++ # char c_rdevmajor[8]; ++ # char c_rdevminor[8]; ++ # char c_namesize[8]; ++ # char c_check[8]; ++ # }__attribute__((packed)); ++ ++ # CPIO fields are 8 bytes long, except for the magic, which is 6. ++ CPIO_MAGIC_LEN: int = 6 ++ CPIO_FIELD_LEN: int = 8 ++ CPIO_ALIGNMENT: int = 4 ++ CPIO_END: bytes = b"TRAILER!!!" ++ CPIO_END_LEN: int = 10 ++ CPIO_NAMESIZE_OFFSET: int = 94 # 6 (magic) + 11 fields (x8) ++ CPIO_FILESIZE_OFFSET: int = 54 # 6 (magic) + 6 fields (x8) ++ CPIO_HEADER_LEN: int = 110 # 6 (magic) + 13 fields (x8) ++ CPIO_HEADER_AND_TRAILING_LEN: int = CPIO_HEADER_LEN + CPIO_END_LEN ++ ++ @staticmethod ++ def align_up(pos: int, alignment: int) -> int: ++ """Align pos to the specified byte alignment.""" ++ return (pos + alignment - 1) & (~(alignment - 1)) ++ ++ @staticmethod ++ def extract_at_offset_fallback(infile: IO[bytes], offset: int) -> None: ++ """ ++ Fall back for extracting an initrd at given offset. ++ ++ This method will extract an initrd by calling system programs ++ to do the decompression and extraction, and will be used if ++ libarchive is not available. Note that the data will be extracted ++ at the current directory. ++ ++ :param infile: the (open) file we will be using to extract the data from ++ :param offset: the offset in the provided file where the data to be extract starts ++ :return: None ++ """ ++ logger.debug("extract_at_offset_fallback(): file %s, offset %s", infile.name, offset) ++ ++ decompression: Dict[str, str] = { ++ Compression.LZO: "lzop -d -c", ++ Compression.BZIP2: "bzcat --", ++ Compression.CPIO: "cat --", ++ Compression.GZIP: "zcat --", ++ Compression.ZSTD: "zstd -d -c", ++ Compression.LZ4: "lz4 -d -c", ++ Compression.XZ: "xzcat --", ++ } ++ ++ # cat will be used for the decompression, and may be one of ++ # the following programs: lzop, bzcat, zcat, zstd, lz4, xzcat ++ # or even cat itself, if no compression is used. ++ cat: str = decompression[Compression.XZ] ++ comp_type = Compression.detect_from_open_file(infile, offset) ++ if comp_type and comp_type in decompression: ++ cat = decompression[comp_type] ++ ++ logger.debug("extract_at_offset_fallback(): identified format %s", cat) ++ ++ # We need 2 programs to do this, the one identified in the previous ++ # step, to do the decompression, stored in the cat variable, plus ++ # cpio itself. Let's check if we have them avialable before moving ++ # ahead. ++ ++ cat_args = cat.split(" ") ++ orig_cat_bin = cat_args[0] ++ cat_bin = shutil.which(orig_cat_bin) ++ if cat_bin is None: ++ errmsg = f"Unable to move forward; '{orig_cat_bin}' not available in the path" ++ logger.error(errmsg) ++ raise Exception(errmsg) ++ cat_args[0] = cat_bin ++ ++ cpio_bin = shutil.which("cpio") ++ if cpio_bin is None: ++ errmsg = "Unable to move forward; 'cpio' not available in the path" ++ logger.error(errmsg) ++ raise Exception(errmsg) ++ cpio_args = f"{cpio_bin} --quiet -i".split(" ") ++ ++ # Ok, we have the required programs, so now we need to run cat, ++ # to possibly decompress the data, then use its output and input ++ # to cpio. ++ infile.seek(offset) ++ data = infile.read() ++ ++ with subprocess.Popen( ++ cat_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ++ ) as cat_proc: ++ decompressed, stderr = cat_proc.communicate(input=data) ++ if cat_proc.returncode != 0: ++ errmsg = f"Unable to process file '{infile.name}' at offset {offset} with '{orig_cat_bin}': {stderr.decode('UTF-8')}" ++ logger.error(errmsg) ++ raise Exception(errmsg) ++ with subprocess.Popen(cpio_args, stdin=subprocess.PIPE) as cpio_proc: ++ _, stderr = cpio_proc.communicate(input=decompressed) ++ if cpio_proc.returncode != 0: ++ errmsg = f"Unable to process cpio archive from file '{infile.name}' at offset {offset}: {stderr.decode('UTF-8')}" ++ logger.error(errmsg) ++ raise Exception(errmsg) ++ ++ @staticmethod ++ def extract_at_offset_libarchive(infile: IO[bytes], offset: int) -> None: ++ """ ++ libarchive-based initrd extractor. ++ ++ This method will extract an initrd using the libarchive module. ++ Note that the data will be extracted at the current directory. ++ ++ :param infile: the (open) file we will be using to extract the data from ++ :param offset: the offset in the provided file where the data to be extract starts ++ :return: None ++ """ ++ logger.debug("extract_at_offset_libarchive(): file %s, offset %s", infile.name, offset) ++ ++ if not _HAS_LIBARCHIVE or not libarchive: ++ raise Exception("libarchive is not available") ++ ++ infile.seek(offset) ++ data = infile.read() ++ ++ try: ++ libarchive.extract_memory(data) ++ except Exception as exc: ++ errmsg = f"Unable to extract data from '{infile.name}' at offset {offset} with libarchive: {exc}" ++ logger.error(errmsg) ++ raise Exception(errmsg) from None ++ ++ @staticmethod ++ def extract_at_offset(infile: IO[bytes], offset: int, dstdir: str) -> None: ++ """ ++ Extract an initrd file from given offset to a given directory. ++ ++ This method extracts the contents of an initrd indicated by the the ++ file and offset provided. It will either use libarchive, if available, ++ or fall back to doing the extraction by using system commands. ++ ++ :param infile: the (open) file to use for getting the data ++ :param offset: the offset in the provided file where the data start ++ :param dstdir: the directory to extract the data at ++ :return: None ++ """ ++ prevdir = os.getcwd() ++ ++ extract_method = InitrdReader.extract_at_offset_fallback ++ if _HAS_LIBARCHIVE: ++ extract_method = InitrdReader.extract_at_offset_libarchive ++ ++ try: ++ os.chdir(dstdir) ++ extract_method(infile, offset) ++ finally: ++ os.chdir(prevdir) ++ ++ @staticmethod ++ def is_eof(f: IO[bytes]) -> bool: ++ """Check for EOF (enf of file).""" ++ s = f.read(1) ++ if s != b"": # Restore position. ++ f.seek(-1, os.SEEK_CUR) ++ ++ return s == b"" ++ ++ @staticmethod ++ def skip_cpio(infile: IO[bytes]) -> int: ++ """ ++ Find the offset where the "main" initrd starts. ++ ++ :param infile: an open file handle for the initrd ++ :return: int, the offset where the data of interest starts ++ """ ++ pos = 0 ++ previous = 0 ++ parsing = False ++ buffer_size: int = 2048 # Buffer arbitrarily long. ++ cpio_formats = (Magic.CPIO_NEW_ASCII, Magic.CPIO_CRC) ++ ++ buffer = read_bytes_from_open_file(infile, pos, buffer_size) ++ # Reset file offset. ++ infile.seek(0) ++ ++ # Now let's check if it's a cpio archive. ++ magic = buffer[: InitrdReader.CPIO_MAGIC_LEN] ++ if magic not in cpio_formats: ++ return pos ++ ++ while True: ++ filename_len = int( ++ "0x" ++ + buffer[ ++ InitrdReader.CPIO_NAMESIZE_OFFSET : InitrdReader.CPIO_NAMESIZE_OFFSET + InitrdReader.CPIO_FIELD_LEN ++ ].decode("UTF-8"), ++ 0, ++ ) ++ filesize = int( ++ "0x" ++ + buffer[ ++ InitrdReader.CPIO_FILESIZE_OFFSET : InitrdReader.CPIO_FILESIZE_OFFSET + InitrdReader.CPIO_FIELD_LEN ++ ].decode("UTF-8"), ++ 0, ++ ) ++ ++ filename = buffer[InitrdReader.CPIO_HEADER_LEN : pos + InitrdReader.CPIO_HEADER_LEN + filename_len] ++ if not parsing: ++ # Mark as the beginning of the archive. ++ previous = pos ++ parsing = True ++ ++ pos = InitrdReader.align_up(pos + InitrdReader.CPIO_HEADER_LEN + filename_len, InitrdReader.CPIO_ALIGNMENT) ++ pos = InitrdReader.align_up(pos + filesize, InitrdReader.CPIO_ALIGNMENT) ++ ++ if filename_len == (InitrdReader.CPIO_END_LEN + 1) and filename == InitrdReader.CPIO_END: ++ infile.seek(pos) ++ parsing = False ++ break ++ ++ infile.seek(pos) ++ buffer = read_bytes_from_open_file(infile, pos, InitrdReader.CPIO_HEADER_AND_TRAILING_LEN) ++ ++ magic = buffer[: InitrdReader.CPIO_MAGIC_LEN] ++ if magic not in cpio_formats: ++ logger.warning("Corrupt CPIO archive (magic: %s)", magic.decode("UTF-8")) ++ return pos ++ ++ if InitrdReader.is_eof(infile): ++ break ++ ++ if InitrdReader.is_eof(infile): ++ # CPIO_END not found. ++ return pos ++ ++ # Skip zeros. ++ while True: ++ i = 0 ++ buffer = read_bytes_from_open_file(infile, pos, buffer_size) ++ for i, value in enumerate(buffer): ++ if value != 0: ++ break ++ ++ if buffer[i] != 0: ++ pos += i ++ infile.seek(pos) ++ break ++ ++ pos += len(buffer) ++ ++ if InitrdReader.is_eof(infile): ++ # Rewinding, as we got to the end of the archive. ++ pos = previous ++ break ++ ++ return pos ++ ++ def _extract(self) -> None: ++ """Extract an initrd.""" ++ with open(self._initrd_file, "rb") as infile: ++ InitrdReader.extract_at_offset(infile, self.skip_cpio(infile), self._contents_dir) ++ ++ def set_initrd(self, initrdfile: str) -> None: ++ """ ++ Define the initrd to be used. ++ ++ Specify an initrd file, that will be extracted. Its contents ++ can be found at the path indicated by the method contents(). ++ ++ :param initrdfile: a string with the path of an initrd ++ :return: None ++ """ ++ if not os.path.isfile(initrdfile): ++ errmsg = f"Specified initrd file '{initrdfile}' does not seem to exist; please double check" ++ logger.error(errmsg) ++ raise Exception(errmsg) ++ ++ self._initrd_file = os.path.realpath(initrdfile) ++ if self._contents_dir and os.path.isdir(self._contents_dir): ++ shutil.rmtree(self._contents_dir) ++ self._contents_dir = tempfile.mkdtemp(prefix="keylime-initrd-") ++ self._extract() ++ ++ def contents(self) -> str: ++ """ ++ Return the path where the extracted initrd is available. ++ ++ :return: str ++ """ ++ return self._contents_dir ++ ++ def __init__(self, initrdfile: str) -> None: ++ """ ++ Initialize the class with the specified initrd. ++ ++ :param initrdfile: the path of the initrd we want to extract ++ :return: None ++ """ ++ self.set_initrd(initrdfile) ++ ++ def __del__(self) -> None: ++ """ ++ Destructor. ++ ++ Takes care of removing the temp directory created to contain ++ the initrd contents after it has been extracted from the cpio ++ archive. ++ ++ :return: None ++ """ ++ if self._contents_dir and os.path.isdir(self._contents_dir): ++ logger.debug("Removing temporary directory %s", self._contents_dir) ++ shutil.rmtree(self._contents_dir) +diff --git a/keylime/policy/logger.py b/keylime/policy/logger.py +new file mode 100644 +index 0000000..5b8812a +--- /dev/null ++++ b/keylime/policy/logger.py +@@ -0,0 +1,81 @@ ++""" ++Module to assist with logging in the policy tool. ++ ++SPDX-License-Identifier: Apache-2.0 ++Copyright 2024 Red Hat, Inc. ++""" ++ ++import logging ++import sys ++from typing import Optional, TextIO ++ ++_policy_logger: Optional[logging.Logger] = None ++_log_stream: TextIO = sys.stderr ++_log_handler: logging.Handler = logging.Handler() ++_log_handler_verbose: logging.Handler = logging.Handler() ++ ++ ++class Logger: ++ """A helper class to handle logging.""" ++ ++ POLICY_LOGGER_FORMAT = r"%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s" ++ POLICY_LOGGER_DATEFMT = r"%Y-%m-%d %H:%M:%S" ++ ++ _logger: logging.Logger ++ _formatter: logging.Formatter = logging.Formatter(fmt=POLICY_LOGGER_FORMAT, datefmt=POLICY_LOGGER_DATEFMT) ++ _verbose: bool = False ++ ++ def __init__(self, verbose: bool = False): ++ """Initialize the class with the specified verbosity and stream.""" ++ global _policy_logger ++ ++ if _policy_logger is None: ++ _policy_logger = logging.getLogger("keylime-policy") ++ # We stop log propagation to prevent both duplication and ++ # to avoid the possibility of other loggers writing to ++ # stdout, which would mix logs with the relevant data the ++ # tool might output. ++ _policy_logger.propagate = False ++ ++ self._logger = _policy_logger ++ ++ self._verbose = verbose ++ self.setStream(_log_stream) ++ ++ def setStream(self, stream: TextIO) -> None: ++ """Define the stream for the logger.""" ++ # As some functionality may output data to stdout, let us log ++ # everything to stderr by default (default stream), so that it ++ # won't interfere with the relevant data. ++ global _log_stream ++ global _log_handler ++ global _log_handler_verbose ++ ++ _log_stream = stream ++ _log_handler = logging.StreamHandler(_log_stream) ++ _log_handler.setLevel(logging.INFO) ++ _log_handler_verbose = logging.StreamHandler(_log_stream) ++ _log_handler_verbose.setLevel(logging.DEBUG) ++ ++ # For the DEBUG level, we also have a formatter, with extra ++ # info, such as the timestamp. ++ _log_handler_verbose.setFormatter(self._formatter) ++ ++ if self._verbose: ++ self.enableVerbose() ++ else: ++ self.disableVerbose() ++ ++ def enableVerbose(self) -> None: ++ """Use a verbose logger.""" ++ self._logger.handlers = [_log_handler_verbose] ++ self._logger.setLevel(logging.DEBUG) ++ ++ def disableVerbose(self) -> None: ++ """Do not use a verbose logger.""" ++ self._logger.handlers = [_log_handler] ++ self._logger.setLevel(logging.INFO) ++ ++ def logger(self) -> logging.Logger: ++ """Return the logger.""" ++ return self._logger +diff --git a/keylime/policy/rpm_repo.py b/keylime/policy/rpm_repo.py +new file mode 100644 +index 0000000..d6a8bda +--- /dev/null ++++ b/keylime/policy/rpm_repo.py +@@ -0,0 +1,344 @@ ++#!/usr/bin/env python3 ++ ++"""Analyze local and remote RPM repositories.""" ++ ++import gzip ++import multiprocessing ++import os ++import pathlib ++import shutil ++import tempfile ++import urllib.error ++import urllib.parse ++import urllib.request ++import xml.etree.ElementTree as ET ++from contextlib import contextmanager ++from typing import Dict, Generator, List, Optional, Tuple ++ ++import rpm # pylint: disable=import-error ++ ++from keylime.common import algorithms ++from keylime.policy.logger import Logger ++from keylime.policy.utils import Compression, merge_maplists ++from keylime.signing import verify_signature_from_file ++from keylime.types import PathLike_str ++ ++logger = Logger().logger() ++ ++ ++def _parse_rpm_header(hdr: rpm.hdr) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]]]: ++ # First, the file digests. ++ _MD5_DIGEST_LEN = 32 # In the past, rpm used MD5 for the digests. ++ _SHA256_DIGEST_LEN = algorithms.Hash("sha256").hexdigest_len() ++ empty_hashes = ("0" * _MD5_DIGEST_LEN, "0" * _SHA256_DIGEST_LEN) ++ digests = {f.name: [f.digest] for f in rpm.files(hdr) if f.digest not in empty_hashes} ++ ++ # Now, the IMA signatures, if any. ++ ima_sig = {f.name: [f.imasig] for f in rpm.files(hdr) if f.imasig} ++ return digests, ima_sig ++ ++ ++def analyze_rpm_pkg(pkg: PathLike_str) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]]]: ++ """ ++ Analyze a single RPM package. ++ ++ :param pkg: the path to a single package ++ :return: two dicts; the first one containts the digests of the files and the ++ second one contains the ima signatures, if any ++ """ ++ ts = rpm.TransactionSet() ++ ts.setVSFlags(rpm.RPMVSF_MASK_NOSIGNATURES | rpm.RPMVSF_MASK_NODIGESTS) ++ ++ with open(pkg, "rb") as f: ++ hdr = ts.hdrFromFdno(f) ++ ++ # Symbolic links in IMA are resolved before the measured, ++ # registering the final linked name in the logs ++ return _parse_rpm_header(hdr) ++ ++ ++def analyze_rpm_pkg_url(url: str) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]]]: ++ """Analyze a single RPM package from its URL.""" ++ # To fetch the header we can emulate rpmReadPackageFile, but this ++ # seems to require multiple reads. This simplified algorithm read ++ # first a sizeable blob, adjusted from the median of some repo ++ # analysis, and if the hdrFromFdno fails, try to expand it ++ # iteratively. ++ ++ # Estimation of a RPM header size. ++ _RPM_HEADER_SIZE = 24 * 1024 ++ ++ # Hide errors while fetching partial headers. ++ with open(os.devnull, "wb") as devnull: ++ rpm.setLogFile(devnull) ++ ++ logmsg = f"Fetching header for {url}" ++ logger.debug(logmsg) ++ ++ blob = b"" ++ chunk_size = _RPM_HEADER_SIZE ++ while True: ++ with tempfile.TemporaryFile() as f: ++ range_ = f"{len(blob)}-{len(blob) + chunk_size - 1}" ++ req = urllib.request.Request(url, headers={"Range": f"bytes={range_}"}) ++ try: ++ with urllib.request.urlopen(req) as resp: ++ blob += resp.read() ++ except urllib.error.HTTPError as exc: ++ errmsg = f"Error trying to open {url}: {exc}" ++ logger.warning(errmsg) ++ return {}, {} ++ ++ f.write(blob) ++ f.seek(0) ++ ++ ts = rpm.TransactionSet() ++ ts.setVSFlags(rpm.RPMVSF_MASK_NOSIGNATURES | rpm.RPMVSF_MASK_NODIGESTS) ++ try: ++ hdr = ts.hdrFromFdno(f) ++ break ++ except Exception: ++ chunk_size = max(1024, int(chunk_size / 2)) ++ ++ # Symbolic links in IMA are resolved before the measured, ++ # registering the final linked name in the logs ++ return _parse_rpm_header(hdr) ++ ++ ++def analyze_local_repo( ++ *repodir: str, ++ digests: Optional[Dict[str, List[str]]] = None, ++ imasigs: Optional[Dict[str, List[bytes]]] = None, ++ jobs: Optional[int] = None, ++) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]], bool]: ++ """ ++ Analyze a local repository. ++ ++ :param *repodir: str, the directory of the repository, where "repodata" is ++ located ++ :param digests: dict of str and a list of strings, to store the files and ++ their associated digests ++ :param imasigs: dict of str and a list of bytes, to store the files and ++ their associated IMA signatures ++ :param jobs: integer, the number of jobs to use when processing the rpms ++ :return: tuple with the dict of digests, the dict of IMA signatures and a ++ boolean indicating the success of this method ++ """ ++ # Validate repodir. ++ if not str(*repodir): ++ logger.error("Please specify a repository") ++ return {}, {}, False ++ ++ repo = pathlib.Path(*repodir) ++ if not repo.exists(): ++ errmsg = f"{repo.absolute()} does not seem to exist" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ repodata_dir = repo.joinpath("repodata") ++ if not repodata_dir.exists(): ++ errmsg = f"{repodata_dir.absolute()} does not seem to exist" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ repomd_xml = repodata_dir.joinpath("repomd.xml") ++ if not repomd_xml.exists(): ++ errmsg = f"{repomd_xml} cannot be found" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ repomd_asc = repodata_dir.joinpath("repomd.xml.asc") ++ if repomd_asc.exists(): ++ repomd_key = repodata_dir.joinpath("repomd.xml.key") ++ if not repomd_key.exists(): ++ errmsg = f"Error. Key file {repomd_key} missing" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ try: ++ verify_signature_from_file(repomd_key, repomd_xml, repomd_asc, "Repository metadata") ++ except Exception: ++ logger.error("Error. Invalid signature. Untrusted repository") ++ return {}, {}, False ++ else: ++ logger.warning("Warning. Unsigned repository. Continuing the RPM scanning") ++ ++ jobs = jobs if jobs else multiprocessing.cpu_count() ++ ++ if not digests: ++ digests = {} ++ if not imasigs: ++ imasigs = {} ++ ++ # Analyze all the RPMs in parallel ++ with multiprocessing.Pool(jobs) as pool: ++ for rpm_digests, rpm_imasigs in pool.map(analyze_rpm_pkg, repo.glob("**/*.rpm")): ++ digests = merge_maplists(digests, rpm_digests) ++ imasigs = merge_maplists(imasigs, rpm_imasigs) ++ ++ return digests, imasigs, True ++ ++ ++@contextmanager ++def get_from_url(url: str) -> Generator[str, None, None]: ++ """Download the contents of an URL.""" ++ try: ++ with urllib.request.urlopen(url) as resp: ++ tfile = None ++ try: ++ tfile = tempfile.NamedTemporaryFile(prefix="keylime-policy-rpm-repo", delete=False) ++ fname = tfile.name ++ shutil.copyfileobj(resp, tfile) ++ tfile.close() ++ yield fname ++ finally: ++ if tfile: ++ os.remove(tfile.name) ++ except (urllib.error.HTTPError, ValueError) as exc: ++ logger.debug("HTTP error with URL '%s': %s", url, exc) ++ yield "" ++ ++ ++def get_filelists_ext_from_repomd(repo: str, repomd_xml: str) -> Optional[str]: ++ """Parse the filelist_ext file from a given repomd.xml file.""" ++ root = _parse_xml_file(repomd_xml).getroot() ++ location = root.find( ++ "./{http://linux.duke.edu/metadata/repo}data[@type='filelists-ext']/{http://linux.duke.edu/metadata/repo}location" ++ ) ++ return urllib.parse.urljoin(repo, location.attrib["href"]) if location is not None else None ++ ++ ++def get_rpm_urls_from_repomd(repo: str, repomd_xml: str) -> List[str]: ++ """Parse the RPM URLs from a given repomd.xml file.""" ++ root = _parse_xml_file(repomd_xml).getroot() ++ location = root.find( ++ "./{http://linux.duke.edu/metadata/repo}data[@type='primary']/{http://linux.duke.edu/metadata/repo}location" ++ ) ++ if location is None: ++ logger.error("Error. Primary location tag not found") ++ return [] ++ ++ logger.debug("Generating package list from repo ...") ++ primary_xml_url = urllib.parse.urljoin(repo, location.attrib["href"]) ++ with get_from_url(primary_xml_url) as primary_xml: ++ if not primary_xml: ++ logger.error("Error. Primary XML file cannot be downloaded") ++ return [] ++ ++ root = _parse_xml_file(primary_xml) ++ ++ locations = root.findall( ++ "./{http://linux.duke.edu/metadata/common}package[@type='rpm']" ++ "/{http://linux.duke.edu/metadata/common}location" ++ ) ++ ++ return [urllib.parse.urljoin(repo, ll.attrib["href"]) for ll in locations] ++ ++ ++def _parse_xml_file(filepath: str) -> ET.ElementTree: ++ # We support only gzip compression, currently. ++ ctype = Compression.detect_from_file(filepath) ++ if ctype: ++ if ctype != Compression.GZIP: ++ errmsg = ( ++ f"Compression type '{ctype}' NOT supported yet; The only compression format currently supported is gzip" ++ ) ++ logger.debug(errmsg) ++ raise Exception(errmsg) ++ # Gzip. ++ with gzip.open(filepath) as to_parse: ++ return ET.parse(to_parse) ++ ++ # Let us assume no compression here. ++ with open(filepath, encoding="UTF-8") as to_parse: ++ return ET.parse(to_parse) ++ ++ ++def _analyze_remote_repo( ++ repo: str, digests: Optional[Dict[str, List[str]]], imasigs: Optional[Dict[str, List[bytes]]], jobs: Optional[int] ++) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]], bool]: ++ # Make the repo ends with "/", so we can be considered as a base URL ++ repo = repo if (repo).endswith("/") else f"{repo}/" ++ ++ if not digests: ++ digests = {} ++ if not imasigs: ++ imasigs = {} ++ ++ repomd_xml_url = urllib.parse.urljoin(repo, "repodata/repomd.xml") ++ with get_from_url(repomd_xml_url) as repomd_xml: ++ if not repomd_xml: ++ errmsg = f"{repomd_xml_url} cannot be found" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ repomd_asc_url = urllib.parse.urljoin(repo, "repodata/repomd.xml.asc") ++ print("ASC", repomd_asc_url) ++ with get_from_url(repomd_asc_url) as repomd_asc: ++ if repomd_asc: ++ repomd_key_url = urllib.parse.urljoin(repo, "repodata/repomd.xml.key") ++ with get_from_url(repomd_key_url) as repomd_key: ++ if not repomd_key: ++ errmsg = f"Error. Key file {repomd_key_url} missing" ++ logger.error(errmsg) ++ return {}, {}, False ++ try: ++ verify_signature_from_file(repomd_key, repomd_xml, repomd_asc, "Repository metadata") ++ except Exception: ++ logger.error("Error. Invalid signature. Untrusted repository") ++ return {}, {}, False ++ else: ++ logger.warning("Warning. Unsigned repository. Continuing the RPM scanning") ++ ++ # Check if this repo contains the filelists-ext.xml metadata ++ filelists_ext_xml_url = get_filelists_ext_from_repomd(repo, repomd_xml) ++ if filelists_ext_xml_url: ++ with get_from_url(filelists_ext_xml_url) as filelists_ext_xml: ++ if not filelists_ext_xml: ++ errmsg = f"{filelists_ext_xml_url} cannot be found" ++ logger.error(errmsg) ++ return {}, {}, False ++ ++ root = _parse_xml_file(filelists_ext_xml) ++ files = root.findall(".//{http://linux.duke.edu/metadata/filelists-ext}file[@hash]") ++ for f in files: ++ if not f.text: ++ continue ++ v = digests.get(f.text, []) ++ v.append(f.attrib["hash"]) ++ digests[f.text] = v ++ ++ return digests, imasigs, True ++ ++ # If not, use the slow method ++ logger.warning("Warning. filelist-ext.xml not present in the repo") ++ rpms = get_rpm_urls_from_repomd(repo, repomd_xml) ++ ++ # The default job selection is a bit weird. The issue is that ++ # seems that librpm can be not always thread safe, so we can use a ++ # single thread (asyncio) or multiple process. To avoid change ++ # all the stack, I go for synchronous functions but with many ++ # process. In the future we can move all to asyncio. ++ jobs = jobs if jobs else (multiprocessing.cpu_count() * 8) ++ ++ # Analyze all the RPMs in parallel ++ with multiprocessing.Pool(jobs) as pool: ++ for rpm_digests, rpm_imasigs in pool.map(analyze_rpm_pkg_url, rpms): ++ digests = merge_maplists(digests, rpm_digests) ++ imasigs = merge_maplists(imasigs, rpm_imasigs) ++ ++ return digests, imasigs, True ++ ++ ++def analyze_remote_repo( ++ *repourl: str, ++ digests: Optional[Dict[str, List[str]]] = None, ++ imasigs: Optional[Dict[str, List[bytes]]] = None, ++ jobs: Optional[int] = None, ++) -> Tuple[Dict[str, List[str]], Dict[str, List[bytes]], bool]: ++ """Analyze a remote repository.""" ++ try: ++ return _analyze_remote_repo(str(*repourl), digests, imasigs, jobs) ++ except Exception as exc: ++ logger.error(exc) ++ return {}, {}, False +diff --git a/keylime/policy/sign_runtime_policy.py b/keylime/policy/sign_runtime_policy.py +new file mode 100644 +index 0000000..8752906 +--- /dev/null ++++ b/keylime/policy/sign_runtime_policy.py +@@ -0,0 +1,200 @@ ++"""Module to assist with signing Keylime runtime policies using DSSE.""" ++ ++import argparse ++import json ++from json.decoder import JSONDecodeError ++from typing import TYPE_CHECKING, Any, Optional ++ ++from cryptography.hazmat.backends import default_backend ++from cryptography.hazmat.primitives.asymmetric import ec ++from cryptography.hazmat.primitives.serialization import load_pem_private_key ++ ++from keylime.dsse import dsse, ecdsa, x509 ++from keylime.ima import ima ++from keylime.policy.logger import Logger ++ ++if TYPE_CHECKING: ++ # FIXME: how to make mypy and pylint happy here? ++ _SubparserType = argparse._SubParsersAction[argparse.ArgumentParser] # pylint: disable=protected-access ++else: ++ _SubparserType = Any ++ ++ ++logger = Logger().logger() ++ ++KEYLIME_PAYLOAD_TYPE = "application/vnd.keylime+json" ++KEYLIME_DEFAULT_EC_KEY_FILE = "keylime-ecdsa-key.pem" ++ ++VALID_BACKENDS = ["ecdsa", "x509"] ++ ++ ++def get_arg_parser(create_parser: _SubparserType, parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser: ++ """Perform the setup of the command-line arguments for this module.""" ++ sign_p = create_parser.add_parser("runtime", help="sign runtime policies", parents=[parent_parser]) ++ ++ sign_p.add_argument( ++ "-o", ++ "--output", ++ dest="output_file", ++ required=False, ++ help="The output file path for the DSSE-signed policy", ++ default="/dev/stdout", ++ ) ++ sign_p.add_argument( ++ "-r", ++ "--runtime-policy", ++ dest="policy", ++ required=True, ++ help="The location of the runtime policy file", ++ default="", ++ ) ++ sign_p.add_argument( ++ "-k", ++ "--keyfile", ++ dest="keyfile", ++ required=False, ++ help="The EC private key to sign the policy with", ++ default="", ++ ) ++ sign_p.add_argument( ++ "-p", ++ "--keypath", ++ dest="keypath", ++ required=False, ++ help="The filename to write the created private key, if one is not provided via the --keyfile argument", ++ default="", ++ ) ++ sign_p.add_argument( ++ "-b", ++ "--backend", ++ dest="backend", ++ required=False, ++ help="DSSE backend to use; either ecdsa or x509", ++ choices=VALID_BACKENDS, ++ type=str.lower, ++ default="ecdsa", ++ ) ++ sign_p.add_argument( ++ "-c", ++ "--cert-outfile", ++ dest="cert_outfile", ++ required=False, ++ help="The output file path for the x509 certificate, if using x509 DSSE backend", ++ default="", ++ ) ++ ++ sign_p.set_defaults(func=sign_runtime_policy) ++ ++ return sign_p ++ ++ ++def _get_signer( ++ backend: str, ++ in_ec_keyfile_path: Optional[str] = None, ++ out_keyfile_path: Optional[str] = None, ++ out_certfile: Optional[str] = None, ++) -> Optional[dsse.Signer]: ++ if backend not in VALID_BACKENDS: ++ logger.debug("Invalid backend '%s'; the valid alternatives are: %s", backend, VALID_BACKENDS) ++ return None ++ ++ if in_ec_keyfile_path and out_keyfile_path: ++ logger.debug("Both the EC private key and the output key path cannot be specified at once") ++ return None ++ ++ if not out_keyfile_path: ++ out_keyfile_path = KEYLIME_DEFAULT_EC_KEY_FILE ++ ++ ec_privkey: Optional[ec.EllipticCurvePrivateKey] = None ++ if in_ec_keyfile_path: ++ try: ++ with open(in_ec_keyfile_path, "rb") as pem_in: ++ pemlines = pem_in.read() ++ except FileNotFoundError: ++ logger.error("The specified key '%s' does not exist", in_ec_keyfile_path) ++ return None ++ privkey = load_pem_private_key(pemlines, None, default_backend()) ++ ++ if not isinstance(privkey, ec.EllipticCurvePrivateKey): ++ logger.error("Only elliptic curve keys are supported") ++ return None ++ ec_privkey = privkey ++ ++ signer: Optional[dsse.Signer] = None ++ ++ if backend == "ecdsa": ++ if ec_privkey: ++ signer = ecdsa.Signer(ec_privkey) ++ else: ++ signer = ecdsa.Signer.create(out_keyfile_path) ++ elif backend == "x509": ++ if out_certfile is None or out_certfile == "": ++ logger.error("x509 backend and no cerficate output file specified") ++ return None ++ ++ if ec_privkey: ++ signer = x509.Signer(ec_privkey, certificate_path=out_certfile) ++ else: ++ signer = x509.Signer.create(out_keyfile_path, certificate_path=out_certfile) ++ ++ return signer ++ ++ ++def _sign_policy(signer: dsse.Signer, policy_fpath: str) -> Optional[str]: ++ try: ++ # Let us validate the policy first. ++ with open(policy_fpath, "rb") as f: ++ policy = json.load(f) ++ ima.validate_runtime_policy(policy) ++ ++ # Now we can sign it. ++ unsigned_policy = json.dumps(policy) ++ signed_policy = dsse.Sign( ++ payloadType=KEYLIME_PAYLOAD_TYPE, payload=unsigned_policy.encode("UTF-8"), signer=signer ++ ) ++ except FileNotFoundError: ++ logger.error("The runtime policy file specified (%s) does not seem to exist", policy_fpath) ++ return None ++ except (ima.ImaValidationError, JSONDecodeError): ++ logger.error( ++ "Unable to validate the runtime policy '%s'; please make sure to provide a valid runtime policy", ++ policy_fpath, ++ ) ++ return None ++ except Exception as exc: ++ logger.error("Error while attempting to sign the runtime policy '%s': %s", policy_fpath, exc) ++ return None ++ ++ return signed_policy ++ ++ ++def sign_runtime_policy(args: argparse.Namespace) -> Optional[str]: ++ """Sign a runtime policy.""" ++ if args.keyfile and args.keypath: ++ logger.error("Only one of keyfile or keypath must be specified at once") ++ return None ++ ++ signer = _get_signer( ++ backend=args.backend, ++ in_ec_keyfile_path=args.keyfile, ++ out_keyfile_path=args.keypath, ++ out_certfile=args.cert_outfile, ++ ) ++ ++ if not signer: ++ logger.error("Unable to obtain a valid signer from the input data") ++ return None ++ ++ signed_policy = _sign_policy(signer, args.policy) ++ if signed_policy is None: ++ logger.debug("_sign_policy() failed; policy: %s", args.policy) ++ return None ++ ++ try: ++ with open(args.output_file, "wb") as f: ++ f.write(signed_policy.encode("UTF-8")) ++ except Exception as exc: ++ logger.error("Unable to write signed policy to destination file '%s': %s", args.output_file, exc) ++ return None ++ ++ return signed_policy +diff --git a/keylime/policy/utils.py b/keylime/policy/utils.py +new file mode 100644 +index 0000000..168b125 +--- /dev/null ++++ b/keylime/policy/utils.py +@@ -0,0 +1,121 @@ ++""" ++Module to assist with creating runtime policies. ++""" ++ ++import enum ++from typing import IO, Any, Dict, List, Optional ++ ++ ++def merge_lists(list1: List[Any], list2: List[Any]) -> List[Any]: ++ """Merge two lists removing repeated entries.""" ++ list1.extend(list2) ++ return sorted(list(set(list1))) ++ ++ ++def merge_maplists(map1: Dict[Any, List[Any]], map2: Dict[Any, List[Any]]) -> Dict[Any, List[Any]]: ++ """Merge two maps of lists, removing repeated entries in the lists.""" ++ for key, value in map2.items(): ++ if key not in map1: ++ map1[key] = value ++ continue ++ map1[key] = merge_lists(map1[key], map2[key]) ++ return map1 ++ ++ ++def read_bytes_from_open_file(infile: IO[bytes], offset: int, count: int) -> bytes: ++ """ ++ Read a specified amount of bytes from the input file, from a given offset. ++ ++ :param infile: the (open) file to read the files from ++ :param offset: the offset to use with the provided file to read the bytes from ++ :param count: the amount of bytes to read from ++ :return: the requested bytes ++ """ ++ infile.seek(offset) ++ return infile.read(count) ++ ++ ++def read_bytes_from_file(fpath: str, offset: int, count: int) -> bytes: ++ """ ++ Read a specified amount of bytes from the input file, from a given offset. ++ ++ :param fpath: the path for the file to read the bytes from ++ :param offset: the offset to use with the provided file to read the bytes from ++ :param count: the amount of bytes to read from ++ :return: the requested bytes ++ """ ++ with open(fpath, "rb") as infile: ++ return read_bytes_from_open_file(infile, offset, count) ++ ++ ++class Magic(bytes, enum.Enum): ++ """Magic bytes for identifying file types.""" ++ ++ CPIO_NEW_ASCII = b"070701" ++ CPIO_CRC = b"070702" ++ LZO = b"\x89\x4c\x5a\x4f\x00\x0d" ++ BZIP2 = b"BZh" ++ GZIP = b"\x1f\x8b" ++ ZSTD = b"\x28\xB5\x2F\xFD" ++ LZ4 = b"\x04\x22\x4d\x18" ++ XZ = b"\xFD\x37\x7A\x58\x5A\x00" ++ ++ ++class Compression(str, enum.Enum): ++ """Compression formats.""" ++ ++ BZIP2 = "bzip2" ++ GZIP = "gzip" ++ ZSTD = "zstd" ++ XZ = "xz" ++ LZO = "lzo" ++ LZ4 = "lz4" ++ ZCK = "zchunk" ++ CPIO = "cpio" ++ ++ @staticmethod ++ def detect(magic: bytes) -> Optional[str]: ++ """Detect compression format from given magic bytes.""" ++ # Magic bytes for identifying file types. ++ MAGIC_CPIO_NEW_ASCII: bytes = b"070701" ++ MAGIC_CPIO_CRC: bytes = b"070702" ++ MAGIC_LZO: bytes = b"\x89\x4c\x5a\x4f\x00\x0d" ++ MAGIC_BZIP2: bytes = b"BZh" ++ MAGIC_GZIP: bytes = b"\x1f\x8b" ++ MAGIC_ZSTD: bytes = b"\x28\xB5\x2F\xFD" ++ MAGIC_LZ4: bytes = b"\x04\x22\x4d\x18" ++ MAGIC_XZ: bytes = b"\xFD\x37\x7A\x58\x5A\x00" ++ MAGIC_ZCK_V1: bytes = b"\x00ZCK1" ++ MAGIC_ZCK_DET_V1: bytes = b"\x00ZHR1" ++ ++ formats = { ++ MAGIC_CPIO_NEW_ASCII: Compression.CPIO, ++ MAGIC_CPIO_CRC: Compression.CPIO, ++ MAGIC_LZO: Compression.LZO, ++ MAGIC_BZIP2: Compression.BZIP2, ++ MAGIC_GZIP: Compression.GZIP, ++ MAGIC_ZSTD: Compression.ZSTD, ++ MAGIC_LZ4: Compression.LZ4, ++ MAGIC_XZ: Compression.XZ, ++ MAGIC_ZCK_V1: Compression.ZCK, ++ MAGIC_ZCK_DET_V1: Compression.ZCK, ++ } ++ ++ for m, ctype in formats.items(): ++ if magic.startswith(m): ++ return ctype ++ ++ return None ++ ++ @staticmethod ++ def detect_from_open_file(infile: IO[bytes], offset: int = 0) -> Optional[str]: ++ """Detect compression format from given file and offset.""" ++ _MAGIC_LEN = 6 ++ magic = read_bytes_from_open_file(infile, offset, _MAGIC_LEN) ++ return Compression.detect(magic) ++ ++ @staticmethod ++ def detect_from_file(fpath: str, offset: int = 0) -> Optional[str]: ++ """Detect compression format from given file path and offset.""" ++ with open(fpath, "rb") as infile: ++ return Compression.detect_from_open_file(infile, offset) +diff --git a/setup.cfg b/setup.cfg +index b8b5bde..47d5075 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -55,4 +55,5 @@ console_scripts = + keylime_convert_runtime_policy = keylime.cmd.convert_runtime_policy:main + keylime_sign_runtime_policy = keylime.cmd.sign_runtime_policy:main + keylime_upgrade_config = keylime.cmd.convert_config:main +- keylime_create_policy = keylime.cmd.create_policy:main ++ keylime_create_policy = keylime.cmd.create_policy:main ++ keylime-policy = keylime.cmd.keylime_policy:main +diff --git a/test/data/create-mb-policy/binary_bios_measurements b/test/data/create-mb-policy/binary_bios_measurements +new file mode 100644 +index 0000000000000000000000000000000000000000..0d993ef9a3c2fe511dd393bb96c9aef732520303 +GIT binary patch +literal 49088 +zcmeFYWmsI!QI_81a}LrL4pN$m*DOaBq4!d!5xD0Riw}E +zuIh95?dLoDKKI96-OcK`)?1@St(rsUEC2uifC2sYcLxpVqO!e-5ryP43Ncp`TW280 +zFAN$04GsD~SO6{n1wirZ+7BVWF8&(-^)Jd_hkrA|0$o#fHgI%yv6rzkGO)IA_V}e3 +z0_ess!MQKx$_B_ITjZu=wV$PRVy_Eqw4ne{d$EcbgNVoMd}y4G(l4T*RthyT+?paS +zkYge;YGmMuL9Z9FvvZ~pcQJ4@rjRu;aB^`pF{V&v7FAM_{izA!f4#+nn*C0a5BuWk +z!W;{tD?Q0bg3KN$03l%nWR5Kq>4%;=dF=X+oZvkyQ>KC>?j!|Lq@#GS?Qub`mjiGD +zfB-Ro3h2xRQ2XPt06?;Rf2p79bo(|H^Ol(VG+GavNbgL_yrv5U@N$xQBH1@JZ&S>* +zv;p&*R<>m|Iy~aYJRz`m=qqZN!^?$jLF%=e +zpk{xOUvfEr${oPw`$O!Xo`ZY{4HbDvc4U6X>>fszp=3nGz6uH;qI^=U3Wv)zZ0+Ih +z@GezTbDPU*ynDEF$%NFYl^FN+FS&U?`1~M&zh|u4sBC9~h`44U +zt*C@NA4zfOSCK{(02XNcY2^CZ6nCdn)s8~O*_efm97y`( +zDhw+ApRQ6UNK=RkQ?LWs*nwmOXrRB`?7VD14o(hkO#(EIA4AUH!~dhY_(M_s`B6Z@ +z0bu;0FaSV*C}e1VC@4Vv8y!QV=}9*w(^S2AD(qkfVzP4FCxp`Xs4BPD$L}>IXWqV3 +zBJ;79?$<{i>uP;ff>Uq0h28Dta`L6)g#AII?(v&p^%T5UJr +zmSEBFmoM)0OwueH0Xlf0UNiR*bHqeiu?fk?l4j@HI*4 +zI7N;fm!*1hj}OXoJ{}qd3ILU>0F?XrrIMn<;=|(JyDdo*$T&neUlL^OatcH}Ku?eW +zl7qf;QfgR2AinS41E@a$99TFY3>4J;pPxBs`hlJ~6u{o3q9TT-Om#Lke`6^&JM7>! +zyL8=t--qZHoM=%U0Y|wf@{bD4D(PN~Tas)8_Yn=4Ux`FseVN_=z?XnZXRstMe|Sgv +zsbJJS90P$)@5AT1xp>EvL0gm14TR`|?ICO-$e0VgoF#x)m=-Gx06HZN_k_LxGW1<=~8sR{H+X2prm +z85L{m{T00ib*J=cH26JCILM#G_)-sbrm)!JM#sAT)rE`+AfM!)Vu1oa_EEMo(z7cJ +zzBm*>uV^c-t8S4lCvhV&@|geTlkZ4g=KDd^7LSz?cw}O<2|&&Y+=cx19Dt+Wfkzq? +z=^>rS!f&h%bgu6&_3^=eBlkuJ5hEp4R1D-qia<^z_75kLs3htiu4%#o_ALUf{^y(s +z1(58Q6CwDs6Ok7CdoRMp!TDz|@?-daR+)e2MmF-FP;ETrREU%&+L;qOuvB}ALH4xk +z;Z<{$y{N~hGN?=z{IAQV*O7I87;_YX3H^M8UDm#oFECY8%H?7?^?6c+Jo?hwW^sCtkdPVrb9cNlZ<*oI6U8WKm>SOG5v&qrkoJ)oKHGxM!)6vPB+P +z_wV%A^az&)B)o>2LzX*mu7anO595il9MX!r;@9pTfJge=3q7Xirf9uoa~~Y>OaLM2 +z!!$dAVwt>GKz-R)%9?!p@$XX&WlMc|D1&3$fjdK4Uo|i1FU=RKmgyBoWAcK(Wq(6L +zo$JZ|93j(s5cV|E)%cDM&p-hoqW$>}vO%-S-?WV8Vw*!~zJO;B8!bu(mYAuERX@Hv4AuH1{1!%5D&x|TDT*kgR5=XM +z-~RL>{zZd%vmT4!dSjq;RAXN7+d|Kh8xtjma%fY@nV^=vL^`;rC)zDOLO4?*^iZL3 +z?{iR^EvGpv_^&Nf?^#&i!+R1(aNiKWL@Ymt_GeiOc>MB3Gquu{-Jx+lGS9J278R0K +zyNQQSYa%JFU`{7068x7OG(fS{_CC#DA$_|G%3#k_Y> +z*wjV@;q|^qJL)KUdslN +zmw%p`S42B_c0dUKD}%U!0|466uG}Y+c_FITYz4{2ju-}cWM=KL& +zduszD6AC3zP4qL3VEd6bumd^S*m*$?fSvo-kn6_~=;!+%(U}2A_e*EWzt{O+K8g*L +zMf|U`i=NH8@b)sbN0pDdx4&Jhc|L>IK|uQ`weHNYQHil@v$N~dw5n;u?uA7lkG_iX +z?lE3`bl=%>U{zW=)3>vR*jc2nD;=0yG}gLdT|3bZ0C#vb_WxR!D0O}0Ehpc28q?&_4$3X{QJc+2L|=x +zYxF4(fwgoKz7cwctQ$v-oSpKxgL5t%-#+NwHNBAx*Up+JuipH-#8&5V}_ +z7O(MsxdneHdC-%R24cb^F~UQ^LizLHLBWFZRluKe4#OMK(QLuIVKeM0tp(N6mm~Gz +zdcX(&^#WXISX44(P-^Z1Fa(GKgn=la?}!uw_J^3@pSdh44k+#TBm4MsvLa@xB>L4` +z9jkp-E|Xdnk?#qWWg$@e$IHoKRe{Pt#ccU(*H{QV+%zR<|K@W2#F!T&Jfe?C#f&ZEzjs$fs +zd-7;g$+U1kr_RMQ22jkL=c>8w(a^+WsrVAp`24H!F|^tH?}fzfZOvn$>c*(EY6A~) +z_26bd^-WleNl3Fd?C4AZ&pOT;o81v|8Sm0RalQ!L^&cu1rm=rxwRIPRF_%QX{w;yL +zngKOR`ksy+^X2zBulz~ZCb$8QJ&c&RH(hs2cHY()FK0T%K65H2@nWp&zL+P3OFBg; +z5UI&_yG49(?C@eFE0{VjWVql~-FwEu3@_H(@7M=t9W5f9KO}Hxb*xQvy^J|9KH8E| +z)9cxlQnB9vGfB&`(h>mcB2A!Ezw#?bU2f>4$pd7fZ>3&&o&F(mkE>YGdT~SSZlhRQ +zdF#JzbT6-s(gq$Rh#b&I#5@<~^CLE!zZBIl8AfZp1;ojpy70X#dU&HCEab7wERSqS +z>uRgCmt^s>0^@rK+=N(HnKG^)`85!>n1|coqm%_P=e+seEcCc_0_ya*HI+wM)nO}4 +z&U5{u9)vDR|D)UEO%Hbc_W9C$DqQKI&UxvbS5MWrsyF0aYAf|&sFcfNJxFKMg~{=P +zRqU(3pmAY%RdPBuC+qVadhFy>lj!8&H-_|X5uev3)7U-ET8+I5_eklsKySz;p@2)i +z7{Vk3nC4CzoA+pInxMlFOuDF$fh&%*b=Ti(<)OTj?3tGzMD# +z$)W%MEK(_dW*VR(mGIB`<&U`XcV#Lk4+queXhP)AV^$u4H?mkdGhp!`ZGyIWclQRHAj|@B4GQ#PL23H#dkdALrC);Z(d63`aPWh&5mGVB(5DW5EfZHO!~lVplPRwu#a^@gJ9{ +zq|_T43Y7}hJUVAa>3vAYAQ=ObJ+R38T2^@a^)O>o?|7u`B_|UO`C?z^r;Cd^qoMo1 +z%P<2X$W=O3VU14pQtaV{jxa4AX-NJh!#wbdJs%~&7e+Q$uvPK4AB}@Eumb+u46|fS +zZtSXy_w4Hqe>_)V;hRKm!k*}q_u{v7?ILs$RFdhBsYP=w;M5~-#J~IXhVavzw|qi; +zSf4f8ZbLXng)0O_=%h#P@odETcHpu|T8l5&w});+1n<#Kw$tu#T0kcqV2ieSV&hRj +zxG*ac^QlR3#KZRaj%e&gC^v(PbORv3Jw}V#`cVyT&BF@$f}CR4spnYA0HI +zS{W-L_1SL^E!i@a(Kgi;MD&*$!afeN0hUiNLi;A)oR}RANOKc>gzdZNfeWj7-HuAD +zx^(k#ri3#Mo!N+JMWL)$TXFw=>73Ln*+s>#G_uhn&$S(&AdVk}!>QoV +zDMSq%?W`?q4cIV1FZ_{LqQd{OUN#aS(T@u-s5rmPSyk-0_1CI(2$My_Yn9WdfR_zvaNgpjrNrh@lpc}6$FD-spMiNIJEqzMoS2{he(2SUhCV; +z3Yk;kR138mY%1?xn(P$=iHB+VJ(a)8Mv}L27q@3QcNgbw>h+0Q@TLSFp~!^|={?lt +zdmxtSomFNOQmIdk>vAz<$FkBh&NrBreNfq&`l6d%Jo57-8T#BTwvoe~0(ObCveK~yA?(`+pP63Y-&jF=cR$N~^ZG9)EA7{>P)e3hKCE{| +zNbJl$%xrEL{qIc{PFZV?#bP=8N*wp;dNPBHpfKT3x1zoo2_n<|NpR@{wQp_+W}hB) +zWLZvUDu?tShL~-ZX%OxRRL2bvKHc*3@uBFMiB3o(GBBnzJ3V<2&`z=?M6%^^G%{b=H;K$4&xVymT@jfdPW43<#ZvoV7<=*;k>H!|f +zt5$dOR>W|~eX4$6Gx1Azmom8@Jv{!Lmo8Nvr?oA`vhfW6_E6jqlcKAsDv)-$8+fhA +zIBPYZ&W>WahEPHm0oAKqNp6@RU@m=%WccdrdMtZ4Y=?~DX+=Bda9d?n1JO8~8UDq< +z@QKv3d!``Gj74yh75ZHiJ_Ma1i$ZuzP*HgCM^T74-SacB!-8Au|II}q4?7z>J15s4 +zMImU&@nZ<|^ZTod`5!7|8G-aa{SOY+-xq~{a!3EZD$GZ;mgyHmvXn;6>j?3IcR>|} +zQZZh}_d(c;dxcW%)Q*bWCnaW(-HXqhG=0XHeJv}QF6{L@I?-DFWK+s-kj@uyp_ +z81oe$>zrLTe{zTZbia#Cob(}}@d3_OeaB0omtV>0Xa}zjUg(l+cqe#w96qb|;9dV% +zZ5Scj{@8xS{2}fjM;;>22XzbGX|KIbSY^a-a+M4rMu-M>MtBvmXLqIbI=~jD6(N=?w()x`p@E}%vZc;hfREx| +ztW)Oy(KkRC{&g+sF-+45kq+^ZLd<22YM=-m2}N6Vxgu_FM`>~*B}vRDj2pMiM5ox$ +zp|&=qCO8GOCAG|KgFUo$8=3_n3<9Bnv0SMx*@`r5Qbd*4ZT+oHi~^F3H{9?95w9tPI%?8rTxdrJVRFxPCckm3cs422O=C|Yw6{1fEt0+V{jf)P%8H9` +z#rnlGvXXkeKX86;yOYDe?1eNT^O1QX!sz&+<6(Kr=is0eCQ2C&#zPFLL-UZjI8}C+ +zx0e2v{JAt(d3$AZu;s(^LK@kDlZi5hu~bCPq>%#R=+6q)D$_Z+s;=sl3z__O^WYo? +zG8&sViJzISiL0PTzwoKa!cVCVo-h}Tp{`@qeXS4_)feTjC;ax51#LNX%GzbE-lC2& +z_zGe2d_{KhV;Pg1G8qj!PrtR2V`6aS5to7GgR^QrKZO?s?96vLVV5Gow1lc}{f9Ku +zFMug3xCP=#G^_)U)`Sy?w4tA_C#?o)@e4j}5yy8_U7?|>T6p;l2A(1JJ6X*0ZUEZs +zgRoU}E?Pqv2fTuV3EJ#z4IcfA#|e$g?)DeWGQlAU=gZ~LV*4gt^|{^__$~->>1PG4 +z8UgCy@dM#^{Mh;Xrs5nZezgA)KM-7B{EQ!n5b-1QKjMqofxutBnEqeB_*Y>EYMxk_ +zS{NBPn@|Y5IGfu!f;MXWSds+(>Ls!PK`G&5PEd;YGi>}Ca{U+r{R00Zsn;Qoc<^PL2XhBgr_o)uF +zrR=T+ISrjQ_x-IX9&*C+gWB<>F6@;qE!urrcVL7T1CkY0n6f@TQ-?XJa`Ud_NrpjW +zkU5gmGo4pCDvX~!zT=H^=~$C6YLB(Pp^mV#t$Vb2HWCl=8|^G4(MB)gBsTxQCref3hU +zQ2w6@2_Md148PvY(Ka-n6iA5Cnj1?a>;b+2ea)bLqC8L*D4i{tEgmfPkNSzA=__jD +z2%59MI*CATW_FIBokYz)I)|W6q8v~LDD}q})JYWl-|Hm)H?sdW4B*~)IVk3PblFxP +z@Cvr_D`D6&KE=dwx+^rFkSO4k4Nro5w9ocD#=v9t{xOS~)oI5%UE@d6*WsU#=7{=b +zkhx7QBX^1CU}Fji3G&jLR)pBOU3)q;``XuFJGSwsOS{bv3`;+&SStY43wFHj65h#0 +zpYD1EJE?UkQ95yIp5%tV7=ZnVfV6iJu{w6@9bJBSjuMV|{dADj(MP#)=e^PLssNiw +zrQiB-=_r0)mOx=udY8yVOrV|$>3!ICGpTrtU8R$paF$S(-uhdHYOGkNa7wzvu`f=% +zdV(d|`nyXon1$0wTc_tqLG?b0p{C*wqH-#+1^gJy;oMqcWei`|cOvvRdxmhrpi3Ki +zrfv=0j7jkfGJ}>~NbUp{8!rfN9j~TKPs}^8 +z&-`_CP0UGaVrtB@1pk5mbM*<`48Iz)3Tv+8*7c_kSGaPJUEt3NU!=~sGflz~?@WY3 +zWfWr`!7Vj=GZ{6&r&Ck*T)JDYs1v|*Gsa@WjN~mGGdEu*WC-mMiXDxGYg-AHz)af{ +zG9X(n>xoZ(FhXWOu9)K{XRy~?7tug;kXCE<4)8mq}`sPM-LT?58kSxNPV9& +z+?$xEV>p{_gh-Xe&DV~j2zJ~p&yYmJ`>#DjqOR`|U+AjXM)8%+=9ItbD~x6x$R6VF1_B@f +zbnnOUqlf4Y(M42%{dJcUbcQTiU@1UB2k}?Xfr5gFHGg*C;v#=+T|q=ek#w>)p|CRr +ztuOs+1q&77nVpN7wSm(gEeK>(bWsCaJ6llju>Q5ng%imB({-aV{`Ce(j+3*ABL&D_ +zA%o5DtuLVEDmFF_Uf^R;ocXzW#rAs$^!Zbm`ERxS-4=n=>Gysd6jT&t6C=<@L5d%H +z2ig9pO;D-8H2o7qp=@Dh3tGkl`OW|Jiq)WAy5Vb-N@M@h_k$~jtI|_hWZ@qSh!}Mq +zpEis&r8SB+iYB|2$ort$KS#OovFWqf_SJ=&YA_`W`GRIMA)H*8Ux8{Q|J|!gX(XGi +zs3lA%mF&5ls#BAz#qM0^4drYf!Z-OgsxNDWTFjpkIUgj?(n(Xw47HGY6cG4mUYc!a +zyNd~6+Efx~Ji_W#TlJCiZ$4*|8{m<>55fx+U*6;M@@cKuf`WU8YS+CLl{@ynV0Dzg +zPMs;U3x-6Wiih`J7`l@_&qVQ_gR@u!?=sQ-hWYGcc#xNW#x)O-?C>uwk+RrSlrDyUtB+N70;`Mqu;*=LVbC;cwx| +z=^K!xKJ?!DmC$T=yjr=jOv?6%H`|9pr@^LK(>yOGI>J7Iiwe;_($pS~qINPN#cTRP +zWi=~lHkjKtXDFv#cL@_9*;>v;fz;u*(b2YqRir5$W_)5e +zJe1*QwSopBR6PvZa=oOvE6YmNJEswhi;sDAu0!9xFwtX_e{>J;j)}b!sl&e7Cssj| +zo_!yEAdZx~6~Pl1XW{D2$rlG_CRa(b*Q%@cx^{L*=zeX5jT$wGrW{ojYIj{+cS)-i +zmt(c>jIe&;`{$b4w`|jz+B&ZU+mYASHfuFr@zqA=64z1WHVFpWMN$H~dBKxdrr&M{ +z6Lg03C(t(_V_5s;Pv8LWo<|*kJyRgXAM4vNIC#%Y3@r?7DgJCyqQXDTog4TV)MtFm +z&dvq=)s6Vq5crUd3P|xQ1AOo|5`Gw^iQ~Vu$?3^}7>ti5Ijjhp`iVQ=`-8nGf?QubzgQEV;u|FQ)UCE$ZcFg<%EV7P$PPw +zHK;tnPu$b>(2bG18!i6ge{-JXASWr=G`&?>;xoq}-k=Pm^;$GdL9(0lKJyU}oXN2a +z)qrMY6&UEF`yz6;pd|3H>qaOA>)f?HvgP;Y5?&w=Xj8&}nHbcMwO?4wpB-dU8d!9Y +zwf@T(q5iZ!jNl9Qz5cWnHR3#d8Tzf`S615bpGK^+2%lO=q`7=CJ_(RQDtbh@zx6`r +ztKfpm@Q6>l?Am6SfH#&%h*0;3We1N%fb~}g!~T@6VOkW{tdZTTi0TuR%#-8p!K_a- +z@iBR$El}&xr*o=>wJySJw{ki#gxOuCyeY3P-e60hle5mQg_2*0OOMj=k9v`F}pJ6@EIiTzjX1%giCw&Gj6V8Pao27Mb_ +zK=xo#o-|Ks!=_p&vQS@Em^{uT3mKe0V-3i?dUHJ+hv7)~@Y5QM80VB$5W+(%zQk9h +zw!~we;C%1G{M+D_0cNC%Vg@@O7N-z2LF4aCa)<|J9+rU^7;WG@H%H*4EIA8`JD#C& +zV2?(bS}x&%XwoBni3#=Ix9RP!;5^Z8V579PNj^43(R&BKBqbmvn&Ip;zeMUAD|$^v +zWO#6XW5|3<__<-);ZuvQ3tOuL!%JX|mtE_CdqdP^Bn;;|INx2A_8cwGI9sh--6mk{ +z?6e%Q0h8BbFO1!p`EXAe)fAjpzfK@JNX*d?t-02t!OUKIx+bGOsEgbP8$$15UXttp +z&MU`u=Uih^LKhU1Ez*>`ea+IbLmEZeuU1H+(4;k;C#&WH*||x4xuPP0#tMMPt{XM +z%d|AKOZ5VhQ;CiRr!wF?g6=jvT$oBkwtn^99_B}WivGKFf>CIzgw2A0Ml7Yr5d218 +zp}MhM)j^*J%pF}7J`_2%*COB#} +z%g!;*T}dO|&Re)Wn6Y>E%@k>?I0X#rkFaPE59uGdPb*7|-j< +ze8OEKFae)uwB(@4J6?Gj_gq^a-P&rdEi4k8M?-r*WJkZ7ZO1|M%5g1J8XL){Mk3`2 +z18(-*Q+Z5W3js?1v@6{n-@TOs +z@;RUSX8WeBO!zX!+eP;~k5QiB2b;eE=LzJVplFHq6(F=s3(3QAk_n9S8k&tIT-2en +zP#1lSLj~svIXLO>;SBe(l3@g~Bo_DAncuTW+NfaGjn57kPfbGT&zL&?NQ5Qsj$5x? +zx*Lra(K_?1w5Q$uvujqX7$}QWUU2!0?M&|)j~2*j_m%3e8FVHj8#NizmEP!>8p8X* +zR;pfr^Gr?)95l@~!|P!)d42p^@v0gJao2|mdkQ@(h)VEBufTa0`&hH!g6~d>(u(#S&FEu3#XeH6)PZkpefyziW^Xku375&t8WvJJyp&urd_kfbX|7CrlRoT +z!R2!!?lqs4VF>5pzIF3#XRXjRgW)vO?M~IT;PTHzEuI4B9}7=Wa!26>%xNP-Z&2#< +z%38}Io9Wk|BQ(JnwgzcIgY!?v(zBKX4>MGhDlc`{{JI(rZJeJtc{r#>+v8Y{2|9uE +zqRAhJ0y2G!5WMP8$ +zHEpauaZ{Ri6Em$cXfA>CilJ~9l}Bekp${?hgc=h7peG-Bq|e*7vo~jBM#*9-E!O?u7UIZpZLaV&k;q( +zGLpL|Lq^jzghvH7G11lS7(c$?QPVR6=fgCUgU$(tNWEv|JKu_&U%L0zJ4BWOyEe_t0C3|&PSYb)@A}mzT-jX +zjK(0E3`6bm31stgy;sm%dilZVj1ruWip~ie|AtJ0P~v1VOt-JNH_d;nV6z!|T{er} +z?khn8&ZnkK8}-D#$8U3P#%z*}iv{3gIoT~1h~}Caw_>nqih%Q(QWgYk2IefPx0`oo +z`a7Y!du=Ylkkp +z3i`q)J1l^{QtsCer$5QBMgh*3@07DC(qcR9B^I-oVe#&<6$P0ApKws6T<{?`iOIR#qk7sRo+aoVi&6t^V8@v@+BD|=o)aoPM&Oo+lai(94?9vi3f(L +zvk7&W0G@%DWXQbY(;Av8INx~az7UWj)x$w)6d%fkI~vc#Q9=0avwf(HKi4U(8aFuK +z1IfX+&beDE=J?wxJ2``veNevI$x +zy~eE9W@*%|6#n-f!OO0PicecjG}Pv}uISH%#=!ac;hkizGc!hRZ~3Iren|@h9RdWY +zEo(UTLwRAdVyhAgZ^+tsH9E?>1N9c^8p^-e;wsGn#2X+8ddKY2Xt +zbC=INBMFOTn&iU;;;j3eSPl!kn}OgwA9RSaAZqu-E>R)%@L`c~@w_9R$7D~cFY{z~ +z0e52&I3J(3e|mPMxWXaSeY_SUF&65mWO@5tsr%hY?s03Dz9cwrtfsYq?ViShTlV6d +zlU%97>H_uh8Ngk`W$RFm*?3M2oEPgKD0e{~Z;`m+QF>*J{n=Qyd_IYwHt +z|F^~8|Fl2!o4=OHWbyQ;F|m^NcWM~Us}wxm+M!s^rpJG{`?fh4+r+hUo +z*$fdMrGw4$XG~f@gMQ=^b#pHQD?dC@KE{4yk5wobtG}6r1g?MB$>jN3P&el&Tn19& +z>ga6;oI_P$ib|m8vqe>jmBnjtesGry7XH{)HSN9@25n{MiwYN<3|CQ#zSiq;*ktA? +zhtVfBo(aOLZY*gfHv_}ymOVr~%_20~QzPh3sAV}qHbtm< +zR-pBkyk0HiQaZZ&uxC^sq8|LLQSk!t8T@Lb(e-i=1~7XR+wD06Wt;{^rB;{51~x>! +z|EVGq&PG7%aea_7P^vZ5-@6rXf!fsnHF>mEoobZL8vOH`oOEOp2SQI9HaW7SeAm@Y +zC9@{9jhBDP$YQneWdN`X&TBo4jumhS5)jBQ{~VPj-Rl|a@JQ`3!L{Bnb8ybl0L1)! +zV<*xpr(xF(19_O5P81p?)wFys{G>@ +zX?t;r20Z2J5-djz;JoeK9f}r|c*M&D8O-Uz(wvbziqOy%xU6vaVfBY|&6VJM2B2JK +z##6XIU{T^kq{FqV(~cA8bZSXq(r<{bt}F<`9_ME^KA*2KI~U}0ZyVlX6Rgpr;A&@T +zb0_8!iyYLQ<$=pTcB_z2UyKdk%*xg7tdZkPk%28wHeqMtuas1iVFqeMMpCY5*otZ+2J+X^nfN2w%G-)Tv6 +zjAo=j*2)uU0Uj7mWU`|qwA!;XR5Sw7pF9c{9B4Rwnox#&jS?YcA?X=uvfIj_y4)gy +z?#39o7z-}{<>0W2(HI64c^#r|W3$_2=~O_^QzUCiXSqTHe+$l&)GiJk4`7eKtBsGwakyhlip5w%ptM_1p)+H) +z_IAPt=LN@Thii79Jsw%7Zldiu)LYcnc*0})DMdzC)br{2TPtwheE&In|9cYC0l~1( +z@!R{@33zGkVzpiU%&+<-({i#!!TA+wHceH02MK|Q7^fQMuf(!ncUVd&iEf$Suw|X; +zB2Iww-?~tq)cWm)f>vqs>3RI-r8SAVDcOJ9tu3a3=+AqX`WE30$jnsi8im0w +z$W@sas(L8qec-Kh0Me;4T17(0Hz&tqqueGl`!3yDIoi7pZO=p;*{fHtW6ykDBvE$? +z&M%Jef9*hDtE_?znRW#I1mLX8PAymC{YzS3GF-XR<{oe!TJ7Eeg8_dntj0OM!xi@3 +zXpsT^eBrJ6RDFdJFD#o5IR7N}@N%8bCnr_rRXhq4k?vQS&94JMskOwi-pC#?eu()d +zsSNNyw@yD+L@UY9sQKWX!5B0*XAH}^>(ZPXMB)swe&knJJ?KKTLtgUk(-++>3(}75 +zeFnDf2;A60T(cJi4ff#rL@OUG;$0|>tru+3v$cwB;PX#ooJ)?p+jLaw&eKRj0q0Yh +zxzgU{e-E27Z<4`klF+mw*$_3X+^c;?GD7IZYy1+N&&vA%{d7if=w1ZS>>VYlOq0Rr +zt?O8aWP9&RJ8OdpF@L5VJ0?zV2hUhC$*FH%>^lle5u7MYj1_cQausw_dg6e~*H(vn +zU>UT<6DDR)Fmt(MPS@lmJe*!Iy+}TSrKjWx5l?;PxMYmXTWE_g%A+O2)!-QEISFaf +zpx($U=Kvg4Bq7!pV9urJ6T(B^-+zM9ML~MUgF247FlIF#ZH!8Z!1lT%1Y94ud2c)i +zP89ehR2uo9k>wwAAU!xY|2)?+~hKkTK&s`7vVZnhhB?M +zznSYs25|}lF1UQe7OJqVEn-=U`=@w@ik6k$$>~hi2P-A^eM_AWvZC_Ac_g|+p@{D^ +z8>D9Q4v!1>Ta}WS>BNt0xkB*y@X>|q%OLp6@FI+$2a#w_7wPZ3Lw4;ntR-<2u3twq +z`xdaa#7cwn$S&I`r1sl+(_A$04!YgRfzr=Ms7~pgd%Y6PV00^!^CKGk2FLmsd$7C(;}XM +zMN%;hTpwIFU@a@wo#fLk=z1Eme%Yx>L6ql5D6hm8)DaUOC=&&7+ +z?#CG%4AqRrbuW-&(y8pV7Y{C<7B#RsBlyS&yVA0FZKrYR;>#;UG=Hs7-g^R5TDg01 +zaGri7Pem>lIExqCA84VC`^E|uKvnuuD{Od<4ls!*1hM|hNR_gkV+H!5h-&y2%gf5O +z3+)r>3yIrv?v76K_dO}e;PRQrqoe%nsV6tTiV+Ez&+Y;j-Y>|sHu^mH+&Kb;UrGed +zvk@`$YAJN218YX&n-bnWz!TgrRXGS_?GbNjOTr|A=nu1(u_yVb_t=b=>V6*1XlEkZ +z)5f~sqrx12W2@hvG6OMxIMH$$)j3F_8D9`r08xvUW}8T+XwjEfawXNP`e-tP!1du~ +znI@&ET84?E!)7b8Lq;C|0&NhcypcF~z*5=V?Vo6E@H?$rP%uRdZP)dm!%WmsgeuA|L|>o+u@Aj +z>Wq8+px+|UEHo8gc=CK4Nm1r4v_-tUlLq+b736(%*!WiS!&_mq7GuvqegRR+Rh`yn +zi<<*H!amG&5c#SQa-WSxLuEqIDhgCV8`P6~QlJ_}z&8z65z_a1r_Hay`3G&tcR{Lw2zr(vE*8D<1A45YXI{0tdlQ?pL{Wu*(sAHc>kXTxb7+dPW45_RI?E +zphmHkCc_P!7YnhHoi~`#dXnIa;0*1lQ>!Ua{f=H339(+7H9TYsBECO2Yo|e->&&(a +zbne3$ty(e;_fdDUYge1lv2)QWF@wlA#P5lXs)XuqVD~(5aM(5!KWeeC5@6_*pW+F0 +zUH7tAf$Q^6`G30KLv17W$<>GwRh#Q*1>415XqbvIjk5CrlBfUNS=w}B61YCIg_dznT9-cL+8_Os6q-{w3e?-9dE?`wrv&($GI8*Y($1ubo^v;?hTzcMy!cTTkr +z^=4wvXI^-u45HpQ;3Pw^@NlO>FCYtAoD#tiJ??Dh8qtbEo;swmisq2}TB^Uex+HU*F_Y$gZwLgyP_|kdlooii^j~fy=kqo@7^B>K^IK +ztFgy#j&;%7THYlWRsbfjXts(UdWC}X_HV_IkIQ&fEy|GZsuntF*yXm6cX_+iL!mfl +z!k86Hz1u-ow36UB%f3+PjxawX&x7dev5lRKwdvbOUP8aqM1OyVB8X3$t%T0hZnTvZss;o +z)DZQZd+pv($-n}VAjJxSFH?E#VJ3AhuZ+})ZOG<{av~cr$L*ITvC@XM&DimKnt$_KV4t&FKw3iKy5w#-6ZV>S3`C`hH(~=R3ln1rtHKq +zlAvZsU{9VmKaNq4HK9M~r8t1d|D)n(Q7s;5nBcmuFFu$4$`Ev=_G@JbKA8kJxjkB|FOaviC(3B2@v7w3Rd#rjm9NNcXkgqD +zoKLWKzw|OlN{W24^(IY6-PAicl<9n6@I#ncUQ&zaDL*)$w5E`tFzx%X+weY`UNsCm +zoSbA`J!lFotm|b?(F792d`k|f+kY0fwHe!|dUWc`m6%Z`#*c3Jn6 +z~6q!n6(p+vuJ-@C^(bN078c_Q|%%9}K-e#;>@`Dx~OSq46c+KL> +zJ$<8+jyFUwI;)hNtZU%%Q|Tx+b|Zy9mP^l +zXG|?Nut|ixQ9WcQdqCZ^p +zaC&I=qeug9dQx^`wjJSlRY`uMQ{{(~?0rh7+6ZfK`DJ|TVIAUf$zQap+c}gaB^s<6 +zVO*CM)vNC2%fb%@h8$dii;=U@|&K* +zr)s>8ptwRS?PR*SA*V6-cxvmG3G1P2<3**6;R4RLKz)SaXfb-@P2y`)`GRyXTp9nE +z^#C*GItpdYrd$%Do@#Zu#Jp$6(5_3(G;NuIk(XNM$zxqQ;KC9l8{KlH0Tzdg^X#?gjOE*k3v*XO;c&i7HeVg|3M+8aM7aV@v?l9Raa +zR$i`GPc0%Fh#>TN|J{^teOtDD-rV+~JsFIh49vC9s98a;>0ZJZQqMC8|JL<9S)((- +z8~3zc&#~yNrULXULC2ToEoiZiwcAAqEW^R|=~mD>*~QE!EnOq%w~ClV;>|XhJdty0 +z%!SuoCFx6t$bWj6vHDTRbt6x%V{ea1Lxm{I0;~(dmzY{S5<5}OLTA9`|I>bBUxsmh +zBkvmVTQdsA#~X@si_Gc;4A1c}${ +z(&X*O_HrT+`No22cpYsJqFHb!XWd%cr|%Dy{P--KsM4Ak-sa0v5TAj||H!3(i^^Un +z6EJgp9~=WSg&1#9dOVPXcv10A)avRvME_?Ay0|tVr0DBRpAsG7oBEQkNdu=fN&Rar +z-HBgtEZok)j7gHt*#F +zpZB9%p2NLPq+)_KP;2bzZ0Z{C(|f)SF8`nMt2Q?d8I-tPt7+Szfthq-CG2)~KHucf +zmD9ad=E=Ds{PSu`>}f=6Z+zok@RBSP~;Vftx*dpo#38$otBJ|3ye +zgYr42?8&?AQw#6xUTzrR_PrIn;Tt_00Oz;wBZImSn`O1Ee4n4qA`*ztc{G1?GvAP$ +z5Wi0+H-+eL><8qVN*W-&leg?!!(0Ni(C5S~L*wC>^MzB>MSHVA#IOCi<%8or)l3GD +zeQxqp!wNpK)L6Dvl!jXb6}xg|M>BAJj%v>p`Os9P%%gD1x?&Fsd$UAP^B1cHQ8eMN +zywMN^!1?31He7Wo7Q!L#(1eD}=`~JIw3(K-8*h!jmohY+bxDEqC*(5*y<3TxLQe<1 +zNOR_khF9!0Ua>bXy`Hsllhlh!0OwDmZRQ=K6Z!OWBck={$r!)l +zs|ASlodGy~e);UjZnh%FMt!ig8A*XKRA1wmU%(etw$!!X*@4R^$47`917w`De9CmN +z->@#;uy6fF{5o#h-?!H?*Aa#koaf2&<&mv0gdX!iWeaC)EapjO!#2Q2js~hmyxMl} +zF97F-A|eFNN-9N_h$OjapxInXunnrdP{PYv9gC0QQYtrr^D2l?pE-3py9H0#YP3!} +zHX^1U)VVd4d@UM38t%?gc@6$skzMNl{BMrWZGvRc=>o_8#)(evSIpH-#0$ +zpdr6*2I0$uW`~yPo!n!5$7PiGto*JilmYzz0?X%T%xLwao~A`e*SDwRDa>r57_ +zKSB?C0?wz-m1WO*D{){(wYl1}O5^*6jQ8xCNXl9lmK3VrHGc%>3lIx^6x+TlGfMkj +zC2lV*5noge*M-00-H+_%8#GM%Po;@EueV!*^s{5)4v +znZ62K{&1?a5L`^Xb-U_~DVOTd;mpz4BWL3YF(zt*W!2RKGL+}S7@5AzNOkQm|;-;uFT`r$=ekbl@zA|b$)@n7~G=}gU +zXPa@KZF{=DTuDi4BT=Rs;&{}{=1x|xN-at2VL`X?fzDP%eL72I=Dit4Wlbk}fU!h>hx>G=`Z7hc0`xIvo0diMa4%)-gff~a# +zIC&BNS8t-?$v?cV&j6R7s<9^rw=PkaMH!Enr)snv)Iudgv|zJ~p49P;{ASb-oENVl +zB5^k}mS)*R;$zl(Pq{+w)@k6$gy_erR=OO7o(0Zdwen<~ByOBNbgY%nM`X=HN6Yg) +zR7~kJ$8Grk+ItVEsFvklc$33`AQB}wfMkZ8Gm?=cSwUccAxO?y1OW*O5=F8E6-k1U +zBu4=Wk_1$u2r42-lqBCC^c)W}p7X!=y?ej)-db<7_UxJ2wX1(!Roz`x-Av<{U~MR) +z(!UE{-#>8(ZE9(@$Zs-?G-Eh0c=)kj%3Rf6QKR^<1cFMh@%mi?;RWst9L^Z^XSY*Q +z(g`u{RFPvbXjm7Fr@kOS>nly7zCJoDoFh#=QGqoW{B$q>+a{jl*PWVI6E&<%Ou}eu +zHGp7^WHO^u#KyUI1ery0(bl%kSiB}MJUv4^Q0|@S92;h5 +zckg4Gez|(VOS^1#x&F>+RQ#$N#+tJopAkeX3!ThXrn49gq43~#V1$B%@C&oS(;ig% +zGQV;;YYW=u)F2g=GKP3{rkgmgnEGF1Rp)QHl7C-B8=E^!=5y~C8>we1f1T~nDCQW- +z>%6tqG2wHGJqjPEm}%?ZkEnRA +zoCsCcuU3iI#ksjHmgl>`p(^HC_2$)8;+ukO75l~3sPqh_uejCgD|D3=X5D44l=SjH +zP~RA0zbttwlBivSX$mctV_XZ0T3ZbI5;!90EFm!ku23q?u2`AFONuX`ULQTPjLM&d +zaOPPAv6FI?V*D$<(HOcM{H^3}?m2vQ4*iJjGpT56qs6l`?+;wMaXm$}(%iC+yR%TX +zpe%T?#QSL($rETddTvFMuWg}_`9<{rqEZ~woUFWybWHKunrYLEw15LESzI(cvLC+< +zGQ7>(r@l*y9X6%W+R0k{6fk*a}AX~UjEZ*Er+igW8CcbUJjluWFLJn>_EPM^{U>ABD2LITK}K8$t-i% +z^}!nkjkMgUv)ER)?2Da_SZj5Q=cy|Q6Q7~Q#-;Q<(M5shCmA~)(d032-f5uLQM~MJ +zZ(0#g)LZ%03T>{msVARYX$_)ZdSB3;5es0d8lT`rGgvn&D-blQ1Q2MTorCB +zTxoB<;CiLVXDE$z0hT5<#o~0hgskFqp!1b8pF1m6v@gQS)3!sCVu)dS~7stn*x65U53q<=#9!`yzVS +zY$05kfK{p8F4KaoFnD>rqb!Uo+5IbII|w9Pg9v(FZ3yH(jO?rmkhmsY@=Z1PUNP>5jtAId{6%V*-+&^=8&zc367}rr>v`M)hh~R +z{D$rOea46|NuG#`&jp9tkKOKOGjfbP_maHkB819cWxHTKlq{mK +z(`U=^J7?HvPtpsQmZcA~sm}4sqc(L6Z6Eick+4P&K`!MjlcewNzd?dOW#>X?-Q5t9Ozy#@fOPJ#{EXq +zEZIuM;w^^CKb*w~h$7(cs6Op6y_!`(W;%xX39(@!r{AJ115nAnMWw%`C-6ka$iKQg +zH8}C$9(Dx*$rZ~vOU857oBCChGS|@j^Vso6STvc7qw>9Vd<41Wheo*g^32;{B4l+1ujI2;9HWOQsS3kSNy0a97N}smFBEKM0TyGRs +zr})NZrwWgiN?WVNx3gd1)0WDY`dn1{{3qYvogFgO+cSL{!T0vs;q^rsqlQ)T?%YIvjuRh()`RbK(bu<2IVQ@^K{Fs|4(y{eC>uJ)u|7wn;nneFpIb$))>G~ETRQ?~I(*};_Pebx= +zap%W6D#s$+jkaZNFoNdIsK0nRkD=i|;IkBtJwe~zb|3573hner#~DSB8Vdr^3C3v^ +z-llW}Du0CKC30z|6ZgX2jP~u(W@SiDAkHOy%B_uHuV<5~r>;b$zlP1+Up?=Y!g!zC +zJ-w*w47m6!dE45EN&OXuK34$i2r4}m`}QPRrS8gu!kOzHSb{a?vRc=N6L0M6QCmG_ +z+BQYYJz-5N0c=tV)Cno8nI39BU7!P4rCz +zD_Sm|Pzvh4qTSGhZAR_=!JsaBbP}In>&l=allTKLlYcuFl|P{+9$xX9gqGu5VSnWzD<%OqV)^MPod$wPH~D(`-D)A{>(w-mJ$fo(pgA +z5?kXRGP>b)*&UBz>PCjWFDm_6v-A)l9;Ml58{fwH24f%WempqDQFFij$@*OT3dyb! +zDm}m6lXo~q`r`4*I1}Cj&Lx+0PUyd^D(ZW*-#=KdGNX-3FH+rCWvF5NLO;sHiQD7b +zEe`zp27Ac^7ICEp{&!zO>QU*%iprTcby>dWA2}bn3Bv49 +z@wn*qnQRD^r)!b-kOf|})y2z9&F)RD3kqDPFr@h`gw{X1RbhM)RetT}vR4Dm+d0dq#vWL +zEe>NkA6^6#SKBOI%vf1MrN5njUDx59Scgf+)KJt|%dj_TdZxKdqHfk}ulA7(x@h^m +zcpZYlflRyF_k7kF^>bX$q^Y{)HOmY3qAzod?gnW3mZ5T^lGXv>bb8@S%AkrOJsPyG( +z6rG*5{||%V*I1`4;`a +z9ZVxkyh8i)4za?r^W-#du83hW(NB}^#YH6fVx!{k-@Z)$@MC;&%CJi`xgon6G&pmV +zq?3>R*>$!cQ?{3A`Jd7HlY8*={blR3>4`+XW+mo5;%uw^Qf3FT^l7W-MVnChzrL%$ +z=H{6KAr6uyM!>z--DQSWC2n7cS(0*uaz*x%qtbuMaBu04FtboBIy7<_BO95Py+F=? +zLvLkZW!d$Mv0WG{{nwu1V!*`c)f`VjKvpBe$M-l~llgZy*IGmrsHSc_NJ6FGR#Ul1 +zcq0b~Qzq+M#x#-G_-JU4yvH{v-BP>4ik~f-f4&E)XSwppbVs@{W6e6G_uR(fGY5e; +zRyAyv_;`z1*Gy6Q6A}4d4}M^~gl!UXcXp2OmCX(T`|B??o9|hqV|{)0pQ6&AP-NsP +z*#Q5Y=N&(T6;w$SCvv7j(qHvNh{2~%Q0Y&-$qiFEXq1XR`~BTgq}9#Y@twet +z1RcZXk`wVg7nsrdEBXRi3GP7oZoa!7?lzV1>@CWC!9B?vm_n8?|FWVqe^ma=`>&e# +z!ul5qcOuPn#hD?5lQUhe##!@l1A4lL8z0c(cUFyhBZONtr1$Z52Fc_piKtMlJr`}L +z?h9O%y)EDbTK~Z6NY2sqRxByxNyz!OvK!2A*uV5YuETVwTh8f~A9iR%#luCL)w>y9 +zPIrL`K_8H$cfOqeQQ!?NCQGcC@Ha9aWHeFf1?+8~AGXvu)jZ+Cas3b-)k`5XFu(^% +zZT6Y>*eQ>@he|JaRws%iBku5Oi>Y1$?V3AVQC5z_3AzxcknC8-fl6~!dP(n;X-oep +zzB-DAj(%AwwzEWc1ob``Fa%&Z-hN4Tts9kIANR~AaXg0$uj2RSHos$Rm)c2msF&_NB;CZFFi&cJ#Y=5gR&OZLHO5b>kQj!_|B}k%a(dp0 +zKJiYr!G$LE?-LfM?_IufIc$$~{f@!VFf%H>nQhZq2tCf$7WM5&yZodMLcSsv6I^(9 +zZrI5sRuWrvRC>6c=_7I#_`YgyrvLTJyQ|Pj`{n)M`|LZ{N24WL%{fu&?P>GmwR|mE +zLh3K1mqA`o?yc*|IWlr>Pa_l7GqS8CguBEyVDcecr1aUYKhAGAlcz0%&U;ob0 +z=DZvi8;7cCDxk$aI{I^2$l2&Yk$?i2ZR-;Sc`LVB%cgDL-PkbuIyc2%j)h5 +z7ii3|Jn@YHqltNu7bX;0ZB+L4s)ssSeB}8g1M1Xuk8Ut!^zJQ=l&TVyAoow-oN4mj +zl7Bu?M8bfIKhVSdKC#=3s$Wea5v7{qgp;V=Om2cYKQleOd982g8Y+D-2Uq{C?f?sh +zub--Q7+a6;Cvusd;0d;m$io +zP5(D>R95mEm?X^FQsnQMg;V@tB53xXKyAu;F5L6XgRiMQT +zE{{^7@-LyBV&KYkOA)_t)=u);N=w9-s~1Luz8%_5cx*VwTAG`PQ2AE{)r?9dm@k!-^(z=c9mBU}n3SALwO)Pk*(#hp +zjdv53zTK7bH15JvT^0#TVUYwizrI0gv6)-3nq4m5q5=!&(Z=6ikCeeS+>-tIEZ4Eu +z>o5X|INqhXVjS;h;i9gyv@&S+)_;0uzyHc$1~AU-!2DS1o76=qcvw!T&HK2IrjCy^ +zPod%&p=!p~H4UirV!m$a+dm6Eup#%m{>&q~7=z(#qt^o3+Q+$pNV-+GWF;jW>>$sO +z6Ju#}#0Ml4iDh-Gp|1H6&1m_V#fgUc&KQ&Bi$omNupX`ypY#~Ja!!@0m!36WpYX;t +zqT=~_=bl?mT9w?R==Q^)>8|TO53}Dq`94ElR7(()U}Se>$j-5e5(FzG0e$^3YJU*ZZuUn}cb?0}j)jNO>kg +zRC)(}wcwrjOZGnJVy5Gi$|=V^+rK>&schI6mB!gyznA9e`scg)5P%Q#qKrcRsmbRPd62Z-t9UnMp&l5yHSXW +z=WSru;M&+kXHCNuVhrS48xzTYM`=)hL6`AqN}exK-hdoDobosWu# +zBLO#78}0&g*ZVjcLRlEhlpw3wQXOsFsCc(F^z)i4D*d=G)wUHu`8-tL7ypHk#3!QI +zpjmllKZ$c<;i1&$BGA^d^g?;!*z`Vob$@x(X__hi<^Jxn5w7(|zIP!Pz54`VXlp|Q +zl2&!hMa$C_IG^D+^2h*uV$Ix3dJL4A%EVPf!jx*Lcm{?Zu1VG}p3_!$W}8~K`x=1n +zvde^EzfF>p;Nk3Qwu4H)`sC$Ehm!BLxru7`*sAkGk(DF4&Ry5no0I6H*D{mP)>3~6 +ziLj}jA+MSbCER_ec`0o7+>6VsR$Z#Y#uf1bp3fLj`7;`5o{`+ygh>tE84B98q-Z|C +z&uOw1vs)kUY_@)Dh}NHfTfeEj^l^ON%q{<7N7Gj~FH@mg=J0GO=XFBaxR+dmsQhE6 +zd<57vi`4VNEF5zJ=Ca%mmI;*Zu%70%_E1xg?m@Hvf#9ql0#fFC_u4jJY)=+%m&ak{ +zzx0D|Q{aBj+z^ZXg36zZ5i>hSP}hmRFY880Wa3U`PSlMyHrnV2H#@|X-h?qK{i%EF +z0n2;(R8`fYb9o_-aicfJG_Tu}S5z!_A2{ZnFh>3YJo0DQk-I)(1Gnvad>54n*)Mn~ +z(8}m`wCdiS_kaM?vhY;wLf$uWMl`T?=`qz$KGAP=>(y?4z&YDYUUQP>=g+wROASp&pDtrHXtUI4k>KR6>l0RZ=p3xJJB4wPPKiF4`hK7Uh?)Gy(=hY2<=IUsRP +zRhXMAQ6ZJJ0y8<2*)sSL8wHaFzo@p{#VVh^_SOv2TKwd<5(YsKptQ%BHOzOmwLCRM +zI7+YoV*V{W(ptV89dM0W{?-DP!4an0SB@}2&;A7Q^YTAoI(4~_4P4ft9zXXytce&p +zjE<>ZgIr`&lonVc;<$3fWAdJ>#<~CmkeN#pG{&b9-U+)+;yr~u@L@G)PlTg_ +zp0ARUH^SMIj~~hRh&%Y3+{oV=Ug?tK@hic2k63Gj=TwR2>JdqEI{OVio=)_^;32>y +ziZGw}uE&%;Ks431vMyJYgb6-J2|h;xo}54(J_dM2?p4DMo)`coxLwU3cLBhd7W@)8 +z{CR+PVL$F7pFvV1ufI+V5G@js)G*oIFkxdSY8}ss@qDYL1h!ev_wJXv6DHX@otE0k +zN3u!U=0dk6WUe#C(3RZ5#RQ*2mYD*|iV)Zb@%=h504%Vw$TE<)k1Fl~UNJz#$f9(k +zX^OL&=-fCa+d=6O@ +zHE;%eJ$``I0oyK;D^vM~U{hQ0`M#8FeB)9|amMXaJY9D+UWWo&H$C8ucc(`}(n{`_ +z#NY8Dmg%I}5y1qX`yn(3;0BQPbDP&6+(EARKe#ee*G;xX0h{6YsT(yT5fGfu?FS8g +z*5P*v?!r}AS7uCBSE%SzUGIn2Kit*ku=mFVpZlc>1IS;|f?@&1@k0&Z_rL5+*|DH!JGGv@35y)FWmW6UW^C-b2o +zk3a29&DqA~$ku)o5OHlr;g^p8=!;&Z$gYaMJ76UC8fo99%XvFub3q9LY;Jgu-*0_w +zp0M_?>%hg#a}J07tc`ZD$HA3>GUj^nMi|Jbx>J*LQ+`tEsS9jasAT{_^s`=@Kd%}9 +z0&oUg0B`VA08eMY2CxTgk(I^3I0AxvIXXbu{;)wL(jWK0@Bh60ljaBQ&l^YdKPcXV +zn@j(B@Bh#@NPysZ?MEjvQ-0==j>eM@mp*mx?U263aj0d|ga8-$zlf~Six1-5b0RFo +zYQm`DU+y5lt-5y=VH98gs*&FY$?D&`U?6*Ju*LpVWJsVhyH$OF^VU0E +z`Ws_hB`+D0o9KiuNHLO2nDPVSWnkgp2Do4Faq55zg5Q!KLv>BNQpy(Flcq +z`cEK7Z~muIleOn`=qHXuwd>gzgPR#$Zy`r0tCYus; +z|2LSxzVztu{(1ca6SytO(H~M4mVh#d&Kkt(4#2_B*chBgG6Md1_~)CCjaxOO_3j!^ +z)SYS$;(WLB-d|nN_+Wdu!Dc}Lv9W^CiKm-s1c))we{tN)nH9$RvaV75> +zHkD{xfB2_+WVZ3nBCA?ok3ox +z8u$aS40sQD{I$0HAWCFy*#QGU54-~fQ~^cs542Chv%rv;Z+$3%P)DU2@!Q%sa6k+$WNIE=g +zR%VZZWLa>SSgx-Tb1~yvW`(jNSl-{EmICh~kK?H2QBW)VE#?SUvXn=cd{wI)M>2<> +zT`@q%tf}Ac*@h6XSu3V)Cig;1FD5BB$%)!3C7V2>k{smD +zeexaQ-|@LJ?FnXTwhf%WxXKg>^&n%jH%?mYHysFndg#7}&;<5K +zf_FkmN~u*(Sh+O>kh&dTKaKs(e@*bxTVm~?tHp2Ka6QR&BJ5H&=euju44(W0?(4tB +z&3}a33eW&iYk`Oj!4nGl3mV|n;BPyV60zR8?d4U?AA1}hjp4SrGV~huUgRZie1`XdD +zx%=h#mTN?N=NJjaZ(SAx9Xd0~3y^MIjhDC>VU^#(W$X0vy4VZJ$yj%ve%rEZHJ|3#?@z2Wo5%I +zXiwBZ0Gf+NxeX;5iXjk3E*!h=YVo`rSSS(*JH-5dZ{_YQ+KK_XI~% +zqz8di+>bE}yq+0U{l;IjnAB3vi&mF!4MTwE +zR8GwTm8s{?8&=iP*5^$QP3-E>kL!&wU5e88vx{5w_y@r&{8sQnzXAShxJ8GK+RT3g +z-JL4ER4wD|i7!G;CJ2f|J&ik84j@2NYGHwG60f$t@)N9wQBGz7tyC42EWtw4D%~4Z +zUpKe9{sDUGZ=mx5Kbng**gWjPaKH=PZ4d@}4)&nzod6%uKXM2400SG2E9f!7LB0I& +z>;4^|N#t*RCL6E>uOrUC^O}&pG#GLJ^jrH#-YD_k|196gruPh+RrJ;S?KMLPAZ_dZ +z^qba*SMnGqw}$xv#b%xRbDiKh7tYeT&P+qWXY3#k1-PRlxT7CbNm&gl@9OG^-1io_ +znXHYwi__7@fKY8$d7! +zd*fER3ymhki92_PLOK~_xSvgwfcuF;^Rh)Pe544HauPo+Wq<3F&dRh)Bq$*F>TJ29 +z$9I|td|nv=_XPL5)Ynv!=0nO9l*Lc%@|v+U@EqD@*7p=;2D?x5M1$U6f&kbD&*@!+ +zM(8ON$!|`HGBb(MRI)B~3AtFMPwZxgNs-Khz>w`mM_v~k`7ywj4Ce15an6I6KkHOB +zBcCX +z>^zXuEz>QD8hiZ_n;qi)5%c_b?nf*J1xGkPFaksNPEG(69Qly_7BaPgJdwGj|LG`` +z?j*S$=B^U!Cv88j5;UK_rt9gI$gN#Day?mq;Bgdy|D)VPJYed6yT$Rt +zr>S`tnzi-KRs{%<6y-=YlHJucP1Jhb=A^On>vFj1;tS(}Q~O4#Q-)7$AAvlmANxlk +zu{{nvfxP^A{7AQh_KiUVyp+d6nmU+d0j(4F=oP@CBK=`cEMgUmgQ7bB&iL}b7Lz0K +z5rgNC7B{Cv-k;v;61j%gAi%9?iBg@>A!D&_ +zQ!Mq8{s8@(X~{g*31%YBuk`{iwmn8>RDSX^Mh<`=EeQ#9mrHPg4=(p;L6(LU{&ID5{bk +zC!LI8MizoR$bMYFrek?Dup@JW-r(s7cv!+cx%s&T_yqX*1i=?E!sC@|ic#VlA5xz5 +zEY`ax#*{AOfLk;PqXlcuTl1l$A8|i9Zj4z9C25P +zN%p76y0(5R=CxOcG02vBCgcjTHgd#&WI~Xi06^@=1^B1DjV;(ycpdGsjGnhait3k* +zv|Q#A>ik|UBt;_y#C(+L1%Qth5Fj+c3`#nax8cxtJ(pu?O35kiO0i;7`;Fp0@&*zU +z=6>V^1iPKJAh)fnCqD--*qQR$Ago>7;k>r)UY0yoHnzwC=KqgkkkO8W+}+9!3WGVjz-$~{TwVRZ_wHaA1hYnX!(5ylk)LfmU|wGK*5Er2gr_uk +z1Ij0k;1?7T6NcGXiipC5#QCjZmX@NzFl!r2O93G(J~6nZ_)&-d3$4N9nETgeH5F&; +zPoL=(w|nFHNxu>Td`}{u96s$ljIhw1nBE|-e1?<7TkY@YHjtC>wJ;kd&WeK86YA;? +z257$0e<(zN<46kvD5c457&%2gmmB_k_3R?%il*GsncDXdz~wMSDNy?6`w|CRsk*BM +zqw<>rf`MEruo$vho@Vj;R><-1muwKet}gDL(Ep*NmRai3DRm6dHswI=jk8Q@w2GI& +z$WXhe2&WoXNnY!u(sgR1Jk<-X9YQQXcEnBsapNhAuUNdtO0sfswz0Rhu?KhWMlJ2{ +zY)tD*!}%-HOz%{2 +z@nD?ak%`{FFX6a(snBXZaj~mO*7i)u8-Bq#PqZ4e2Lbj^H_PpF$V`|iB~=9~_&yYP +z6&Q6PuV4|2CqAn-!#ol>5B5uhpuIp{;kF2OX?_$1^@_}{%Nka?)~dc7>tw@+Vd+1z +z1#=(-_&$$s6wGcUw!_8lpH{xK)!?hYYjya^-|sciokMocW86)kFq_{C4B_kzy^K;~ +zdw!k0_SBOx#oYI-p;Or1by{$32(XwqYb`voWLPrgH`a9B0>7bWN{hFwyviY*%gSN6 +zDfzgfkU0c%B=dhL7^O4lNGFdipb9iTe+=rWKjdsZ +zElE95WuJ=R&79|>n~F|;iOz}=Q2Lbega-`>WMBSEPbN?_afNP0*iF1N>-cuqL!mNF +z!r3cDrw9R7Cp+H1>KRiquWcHI^}k3oz;`mkVt%ym(C)p*^cs-TI>r{Mertpc+{@8Z +z+Da6`XCrRIFK7i97ZMT>f{O@)JH^BK#e~7_=LN*XU?M#H{5--iK|Ven8!I7x9$zsL +za}gnw%5FZkeV;)oYg?Xv-KFv4hx{A&GA1E_lAXwzYGYdde3hDn-V+fZNcJ~d2RUOtuw7NS#tZ=nz6NRToxk(d +zQrU`|vAmd(rQ4J|^6r=SxY>2ILLvsIn2*)j3E}JoPXD|6ncG{NyBvk3(lD64HA+P+ +zRWGKO68rWx+zVgk?Z*gRmY=%=0n}IPM2WH_{X#K=FlRIp%>+idbg$v5IPB{TJJCjj +zn6Vry>SqV_n-Wn9`!AUYg&CAiWmBjedh+%whD@qxT?09A3j6z5H(%myGAeauwlmrH +zeDYG``)JD?`As;;6$$BE(o$Q_Aj^-|2o~~Y=P_QfXaB)PSRN%j>_6{-v +zF6bCDfqnSDoz^&j(#p$*P +zUb)>m&r2z4ZTE)Hsnxn7&UQo2UTaNld`$g!1xX8|@HhPWbxSUT**06VYcK2YZp~WH +z*$glvUb)dF9GW8cJinPsYSQLbEag@h#Px#SR(@1Db+9!x*D?OT?^L}!5bj7%2d&B6 +zj(u&Ol-v(rzWd<7)Zdc#nGYSME9Uy>fCndJd +zV0O0CP$ebv%v1oez&D*7$6fe9@Gg0%*vk*civVlo428nrP=2uG^P$xuf3|ZI$M0!i +ziu}pJPmHb$JIvkk5Wup}dzdb7kl2$)x$}^URJosK^7cCXA=~#(BZiBuDoM!E6s;Dn +zaCc90&{IUIM42pJQE`^3VGS=qQK~=`&ZD}m6$nrxmOm6h6oM0ivsZrG|4j831aXm; +zAYBq5PUjyV+JheNxPHnB;p>U&?!YfxDy{sKRUs5vn=wYv9R8v2+%5zl^0#rsu$7=O +z5w~4=RP)}5!aJnWA~LPRq~*Mlk1xs{g95~|BCPT-xn`=c-FI?)Wf%T +zOEkuUJr@cgO@1m6pli2IeZeZYun@~rv5U~4Ja_0dH&=S(x#|!szZ-rATNK9x@Kd}t +za8%t9KW!Z;H(Bj@Ww?8Aitcg_WjVhSxHRwrxp4|&*orn)Plt}Mf+OO_^)p1hVJ4~* +z`YXv7Ud|&YM}JwzzaU-Y>;qImK}Z26A}%U|Vn4$S<`fQYPQJnw@F+^fI`d$}TsQ>K +z@@Lv{C{+$xU9&H*e8|j1Yfe02_fn`anOTZR_-K8CtI& +z89@M*H#8ZopMijWp7*l22FIq8cb($7K+kl7LgIv}DZa*r9Ij58C(?c3*C(;H6YKqdL)`B*c)_nXhVPSDGn2;C(4ujiR^1(zztq`JCf?~pg +zHdat~7pObJ!^;UF%?kz>yiRaeSA;vSqnCpR41AmqZfzw5vl8U9gbBgn)-X6+j1Oih +zBqDAlAZ}^JFTw|P2j_LXoxoH2XIyq7 +zEQR>_VEiK1f^a@jYY{$CespFYK5*BgZ&8^2OUUeOEun5+_MmuB^jR%&eRVar_rX2K +zMXZs+(iIQ4FN|O)%46jp(V}7J7_eM;%@#i%YoOLI3`^3x@^y=DVm{HfgU23S=PwPJvNiRmHjL%CZgM;5L&|>fk&5#Nd){ +zl4Ok{>F41z$mMCjwEC0r@!)o%`P|CX)VG>bgUOvM)DdsHNf}@epjWZ#Os`TGLK +zJZBpee;Y4Ri!Ne)wd~!y2(P)YAeoCfk3oDoWC{JBZK&(IDBcaQs%B6e3 +zcHC-6yPtkh|KG&N2LmnG$_w#3L}h`nw5W@bz8&K_%=4tMy_&viaBy+YjwHTr=v_Ik +zs#A3e7=K68d9MZQjP}yy)zR)tXLO(ZMQ4U-A6{nyMc?DYNrCVQX#dIDAk4k) +zEC>+Aeu>gd!=Fg`^*+2DNXBb=U>XAK1yCeJxDh>W8+HzebYWcDlaYt^V7)7gdqY(~Vi;;pf6SCk +zjs|{yD9i;M{iVSVsIGrztg`8t%U6|AU5b}-=bZFU4(Nk^jAP@>Ueo|x#^s>~mi|{5 +z)5PIqF)^h}+)vJ)_)wT;%X_>KWG{&H`poU@tr6xf4roE|BO+S)m)Hp1soea4GqD%h +zC{G2egR{uOZVsEL+=3nK`PaYA6Ifc<=?t_>vmA2gdujHbdb>pa+nS(Opz3xb&L{&O +z>0t_C6@xEjrHgsxAOsjYyB;iz*#=mRomT%&dgg8U8L|={TwOHff(Pe#(y*LzmT2Q664aRtOIdv_g2)Zhy%kAnY^xaAU~+c9WQh +zMHRS)=41)qVW-Y~Te*acg8a#Xda^yhK3=TZ`P!fW*?IUw2ju+ouMW%u?u|GK*Z*45 +zZ)%8AzyEHu@m~tY`spkb9cV}IuGbncQbF=yrxXsaK>#Wo?9qFUjKsV{L*;9S__u_< +zleCsFPjE$+7wfwra6TU&P{1_6PZt@zf&5=76dv&y)+qw1Br?}?_6(QfD#&c)eIdYQ +z)vN4AxKEr^S5{>V#blHdrq0&bK8brFWt?)r#Cg#ORVZM>5$rRNuc(azYSvww>Uv1@}XW;#K7A#;@jjCGLHe&8yEeVNjLe +zv)l0>Ilk)6f~hNS)}rx7&vUKsy+qVyl3z$j>PZ=w)#YOjH55&MyUg+Ycrb93>GKBr +zaTgTn{&(BK#r|Q1#}?Jo`nv(S7glcg$yqSWgT44Ey-ktWiWYX9+X*4;chV=EAHHF@ +z8KNfZ>nkSdT_kxt*8S-gf@>zse^&t>I$Nj7bjsB+XrHOgxRJnAfKL?!peVqI!SET# +zP;0LBJ2`E;xYm+54408@?_NsHt$gXy%WzCn$brJv#@7|@hm@1F0|McSBBp0L8F`_k +zt9DW(7G$v6}B8@bJ#sO(+YBzeK6M=En+)KnrOc8#65mQi$_+I>BO}T +ze9oA~j+2^N$6^+6MTwX1U(?6lPROso|D679A`*;J{$Eh@Bi#Bks6lbP&waFb;?y|T +zwge~#T#pxaI-Ov{4dyiCsIw|2r+ciEH$Ps;AxI6KRg%fJ;V98eIdk`1ELj&ar~j8m +z|Ib=Lu`=?j%Ci%Mbx-48c$kSf3>9w>oX!M;k-~UiV3S88bk!$aH9&AkkE-Z%t!3W( +z;`XiPw)BCINIUouts*DEK&2xj{+}HjkCgwt;1)$Iq0Aq+o=K(O5+h_C?lfSrNuPBA +z=XUml49oRKapyJ(POsL!-JrtXDj?LFPC|EiXwZ{SW~ +zM(Xzsl*ir)bs{!`yiC~UzPMEl9!@~xnK|dGONWhMYGV|qqf&o)TWhq8QQyM77rI{J +bq$tH5(mWm3lpC&!hg{kF7lZtl%KiTUi^kiH + +literal 0 +HcmV?d00001 + +diff --git a/test/data/create-mb-policy/binary_bios_measurements-bogus b/test/data/create-mb-policy/binary_bios_measurements-bogus +new file mode 100644 +index 0000000..e8f7a0a +--- /dev/null ++++ b/test/data/create-mb-policy/binary_bios_measurements-bogus +@@ -0,0 +1,6 @@ ++bogus ++invalid ++event ++log ++file ++here +diff --git a/test/data/create-mb-policy/binary_bios_measurements-empty b/test/data/create-mb-policy/binary_bios_measurements-empty +new file mode 100644 +index 0000000..e69de29 +diff --git a/test/data/create-mb-policy/binary_bios_measurements-secureboot b/test/data/create-mb-policy/binary_bios_measurements-secureboot +new file mode 100644 +index 0000000000000000000000000000000000000000..488ba045d2f081037df77d918d2ceac68de706c0 +GIT binary patch +literal 41371 +zcmeFZ1yogQyZ23ZgLEU(uxJqJ?w0QEZlt6`=|&n+x?8#%6i|`wZj^jc_dd__%(dVB +zoHNEbW4zxt&T_-t_nNRZ?6-yB{cRZH!E(TF%i^>SHC+VmsLyxA +zBMo~?tHd9Xlk(EqQ(=7*mG=^kJ;C^OM9?3{q5ZLi_$Le9JfVl;S3!Zah151NB3qtX +zQtceiu?oWjCAc!*+Rmf@wh;Trkz2pE@DINq&i{uGBR8@&wBh;Ut&uxln$;PMU!-wF +zySoRLunM17k_r`|aX!#Si)&Fs2~(KwQ6ZL6p&6o){;>D+Q~Z3XK72kC2sa4lhph;N +z!^0m}2*`guO|VHm5!AXME)PgBSPY0yO^06*_t`K3OSpZS{k`B +zn>g5kNbyivIaopLtPd|vc5ZDvRCaDw5F6(o!~cm>20>E(^9O^3g@6u%M27$cK_Wo~ +zK|(^rk_G$3wC<)n39<_>i5!f!bo{X>3t!Fs5*cd1!V>gm_2?)~rcq0yPxyi->dstF +zdYhX&o;ekgR0l_Cqd0~d^*nT=1T-v!yAu113t6^T=}@DbkS+)mh&YY@-gk&Ee!8?Wnof?e{i+* +zJ`IH_ulN|`vsAJm4QqLGQEo|S8!N9t@XXX4`l4SY<1%qP!8+G+@cnq2P3wDogiT)J +z%kSvTaDKKy_vI- +z>%(LaeYmn#=2j*T0}#p21JPh`VQ`*p8?N3HF$!(R2Ay+d~^dl*l_shDXd2On+X6Hl6KT)ZtlD<@lkSQNXHb +zgOr8~CckHf)xWAlohZu+im?>tFjO)x(d`eZ-3b>8WP=aXfN9f=x@u$)c +zJ?#7Wl7$feU!8c6w*Q+n{jW~^uTK1bPbYwJA0O`b-F^cL0nwIz<)H=5Cf{aAd$#%7 +zQx+5iyY&$JCw|9$BqOBB@Lw?WKgY+Pfj=;g75P^*fPuq?Qdecg24Vh80}!y0G>lv< +zt?Vsa9qd^#KxjYTgvW+|$PMfq>`hroLBv1bLBf6{Z)M`_;NoEJN-pZ)?C9_iF{~br +z-~;jg+z*Y-_Luv~)hw-?P05vvoL#-h<&B(e%v>F9jZDnQRUVR!Uzr0dh>MMb4aC99 +z#`};tuyOq!a{e5G0t5a>e5MD{{`Q&TuYLZR|A-2cvw>JyA2N!+&op|r>LT0AG@evG +z>E7AA*6qXR6l5Pmx8NR;X|9 +zV1IneC?rer}F +zaEJ_WkT8%z+>an(9x_pge@Y{aZ$w9P1PX>Nu%>iAtC#wW)JGbC2>#;$94HtRQly9U +z+6}@OLKH$6g#2)gNYG(^nh6D={~TaLLf*r`f}kHZ|B`=*o2!T}`)Ojf&nUd1Qb*u> +zN@-mT(*1b^8H_qe6{MV_m?Ix5Yw7Ch_>_gk#MzeF?$5dWGl6Asw6Xg6E{mhHgQ=T| +zs|(A+w0M}~4^#JH-abs)hq-F-Fd0Bh5C4HUnIBM)AsoWP2Ze=&hlGSt1F3+N{v3m# +zLdE_#k%x!JU!CY7$NKkYa0T4H&_NGh=EvahbDTtcs3H1i*YxqkD6=_}$6G4q#RqwH +zZr-sF@0kkRwRgN4nz*f%eK1TfmQ7EfEIwQo6M42ZkA-WRqReOv5ab)c&V22guo{z+ +zWoy{gn*yD8oHsUm!sj#GWqjp$9=sPcR4z>Im}#?p7mGfdOt!w6NLEdc5+i$0ONZfe +zIqO?E>D~l8;I)q)8=u*Ax9H$!i|+HWQ~Vo;axyRay8iPyeAwhO_#%;-9FJQBf)l6b +zBiW%;1!2QQx0-$*tt=kJ`30W%W3Qt|MLr7)-d!DQ6J0N33Ql-s&7kcocBS$@aDa)p +zWl3cb0`q%1f2U#PGJ9Qq_@o&D65(d4Z$W2Jm;z4~3u-S;n8R%hb1QHC=0^9@+9*x% +zVWP+(T~w@uFkc{%#oP~3Ewf?N)?0{p#WOd)x9=X`COeAfftMG5mzT?(}WPxfluRisyHw-s7LJ~26L%J(6ha){ZU(+(UA +zHye%|qPjH0Q{Bp54F#?#w&3gjf2~0WET`9 +z`|BWVzzhRv|Et#iJK_IF8Tk(pRZ$ft7c+CWGBI=cCGfvhl#TVL(6h0F*gXe`xr_dH$Gi7l0d{YgE;hs3n(y)Rv_U^(Kk +z(M{T2^PG$6{Ic07=n%AeJbLKk<-+Qrxp&i7x(92eD7uXjckq;}HgQ%Sn=pSm=_@?R +z(7Ov|6+hs@()Bl`2*pVRS| +zo;&}k-w%3j4l)H9|2clp^XLEejDJe{@BHZJIsW}Q|5D*Ypy05sEXoym-!*c~>(P4> +z`0yNL2E8U)dB)F%q~ERyx9RXb2EUjn_&j5%EDAvk?MV#doPj+RHT;#1+!JPZR#9gA +ztpV0fnW+<~hN5G)wd8%cm7ye0v0`@jj|}p&t@B}gCGvD-$q@~ox?Yz^bnDgJ?iao} +zcW+<_xlhf_wjs_CxI93^%fIDfSstZLB^FA1G6Wq{#JMwt^fn5&oB|Splm@*4ty+Zj +zIrGN%S34*iM+%FMAS;;3@##!c35OAi(cwc_wibkv_=s(nHE#q?QpKXK +zW*dP9GBpnAt6J~jFF9mdfeiE);ZG{BY)^JO=gNhb9d6pOx9o8?H~XQZgwim0;OrdR +zAO{v=o|EvPD^e9x3oW$JGKp3cw35H9gZlwXkI+gnE?dB$IUL`r3Dh=+dN7#4D=D~er +zt9xf>2I+mQqJa+!(ru(Q$87LEa#9F2GW524-milwkazeI?M?hNS=vQb6^W;(&HWZV +zOogX~z!5$2+a*27?XWaXQ0)G!xJ~BBM&8;6-rH~g+943dUgO^+Cy+raIhzsI6RFF2 +z%5mG{Q*{$B*Rx@^5y4M@4*@Y;O)&eEKUqA#^~(gBg_*;jeZ+ITUn>95sx=1dBf|et +zPwf76+?aic8|{DA6Y%cOe;GmmhKs=Pf7R^&rh0-61o|!QbpLS={}D?5(VHR{c5}6K +zaJG6VDSoD9zsm?#5GyY$4+kp;+pjX>_mK1F5EK~vKjJF$Lr3jb=|lYwS2e8cO&vU3 +z$d!Jlk`EOUxzc~Ol=z0ylDEc;xVqVUnQYYxLD6Q#&VE=1GprVH0Qa(`mpu6M(0GEPbm$RnC?ix2aTz--NmQHnq))SqqC+eZUBvWWLoh@k3H?jS5nY_h9S|1FeLc5rNlcrDBf4NJt>}}u|M7@rZ4-dSE&B?loFpVo)5p? +z%F{Kroa9f8)tMbj$L|3>{~d!ALGmEk9O)d%Q1Sm#De(|+9vZ6;bM|);0pem}WB(O& +zwEqk;4?#x(BnOiDbNmo=1pY4Qu$cTAasDs1|1!AX-1s^v7kYKsS0C~Uv_4Zow`UN< +zz;?MSww#bE;*gI_hJA9tDiLesHFM9yEN*kwu}<6gh2(YQSHxMuemNvAbL;3mqFI>O +zVtl-UjHVSKHZJ#`PVK(-HJFYa-09M8%R}SRZ)&zmAkCs(-@C-O3a`%gd_!F{x>P7! +zIJ8gmBcBhze1S*Y{~onEcINl0{OAHX65(2Kki^+vwQ=`@$`bp_1Zb3GG +zaaBf_$V6m>up$xkLo +zY$p}7Tx9f)+Up`(2u`vZ&3-Qfk0_p<5qI`B#U`MIizkF`SCOZENFDA?D$p~Y$uU77 +zZ*#-&c)@=v-jtn^J**Mn!2S+q+@rvlSj!JsK79G3eE9I!Vc`8k7>NI;@_{fsDiz{6 +zWpL-OctH67c072<`ng!yS$Q~lezld@Ie!g#K|hBexql4@|E|&@0TKPyd7MAhU0qy4 +z`j6V?-&Yr{)tr-RUbU}B0_p8*ux1icK)yV>?OyQ&Na##RckN5Xb8LcBB@PaOo$`2A +z;^QF!H2JdYB`y`oCW@Wi;7xSWerF?X>E +zyBUSL<*}Is3pb@3detolZn<$5E> +z7#nh4gU;0)kwMRyO3-~f6cb))xe14sB4Qw(D4QwOom132;f;CU6IU}|YDxt+%4vWq +zBAuy-Iu`IUNb7DMY +zzI;wzwE0ukB_3p5x*{KP!9oVEw)Q +zawebp#a$HdHzOE_i87N>Ja?I5(TJGueJ9cSvCC_gg^%_crwvhrG{~;?Ih~CbQdn)h +za?WwfWr_D8Q4TLP&_zJ_R^M~;nN#8$KM>t9gzAfkV7`9kC?EUw7SiOzv^`_R2O)4= +z`h**A@%V@HbT>(NthoF}09@Pcy0z6Z2nwlSr)=qXyQ1AqL$z2_> +zqE36=dt()neEwFNhwc<~nQ*V0YgBG_f)lv!rJWwOW9Tznqv*yl)SP>mcaaw$o*P9I +zIQq$hM_@?d8|{u}b8imaLTGgOO%jdrx02}z9aN8g+t=!|AM?<3);J_^a7k8)XYA6- +zU@ZyS$HP=Gu4k|fPlZj-5HtdZvoH%^ovqWH +z=Re(Bl1JICHi$7iB)^?u79aEj*@TZaa^%{j5EkyfWZAp(@KBZ +zcfS)x6=74Ml0M7?!N|enk=9N%gV!vIkwv7=t$taUe{-mXqLU^TB@89ySk$4K6U3Mj +zr>1)n!hv;F9^|J#P|Kz=8Xo<<&s>_vedOJ!}k|>B{zoOrGyh(D23mbIY|3vVN9Njajf?HVace +zJSU`;6FiwDXBgJFNr2rv{sqH!C7@xuUgC)xK{w{G?3NUyhS8tz9y-L_UgHR +zO*U?7b?AhpKrB@qi~eh+keI%hAOqptGiKDKv?*J+wR)>MiqI?g$%_^F$uDJ$9;&3& +zY~1~}D$YrvmB*Y$)&%F(e1S^Oi`ba%@*;kSgwo)v=LQXFXM6{xs^Jt#CR4KvJXsS? +zB-DixTu)vN(Rn5y*dmGRtiD1`S@qdx6B>>_|B^ITq8kErh9F`Ujg!V0+UZfz;RH=i +zjuyA!cb>$?B~Qoi&2ph(i5E-dP~rz>UG@2XR=95P@fqhuty(WNf$`(xAC*DlUsnbN +z5Ah@VpDF`fu6C}7>_=`RzXAy^Fp&JuFD(2_^?BJ?IetBR#`8PXXa6|_Dg0|Z_;;y3 +z*)LgnSk1=$M@#1);{KmlN@$~`FFzz?4Z-$Qew%T7<2h;4NSI-Ajs^EsR+!V;q2$~e +zW+Y|2;oXQ0J +z^_;L}x9K4|KYVI{$ksQjW+c3K-QRgR#T&uS>GC2c?@DORTuJl&P1u$$`Ad$yk`?4$ +z1A%)f2F$o8hxqmyz1jFKX@|?!t_&?K{z|V%zVN%(!>o@mWAwiydYQjgh>h)J&+eID +z{A^F^iGF{dK5AI{`w?}Hz_P5^^NnPaqAhrGynzJLsq%vP@)ug|;rhlXfLC)5t8RN*)R2mUdHt83Cr{aH`?Emjl +z0tCDvHoa;X7CPanHck@W`rK@iT1FJwx46RnemzO01o4KXa05I7VH5YdpvvOjth7g5 +z5z-&9y3DU%qErTstrwJZ<)rVP!#}&D_?|vbK!Nh+G2^6e9xjsX?w(X;m5%FINYk#> +z0P7OMQ4b<(-t>lMN3na0#NI%-qv1f91R9F8wy=X~bKr*+>Z$tlwR*d(>AxUqdb5XzrDFnK6XI52r}%jCPd +zCEU3~(|P@hI#fHN2;;-p>gz(E9>Sb0x7b^X%Fg^=Tq^x%V=Q;ksqF~57frp@&+118 +z_WY+jT!;5hlLw<>i0LEH^Ix}((T7N+Q{}xD=}4Q|2_Z8g_<+mV;FgmVUXHkyWF8J(7s++|ao5?R2armNos10ZqhlZ9X1k +z|3;`7ll#OhiUP6XS%NYDY3s{xg6gr4rdTU5H@^?%1T8m>*Jvd4t8fKomy* +zx|Gn`4N5m0xT@TbUGCI7=+BMWzR+Whyv#%w^l5M;?k<}W2VY9~^I^l%hkZX^KbI0b +zA)es&bjm5k3zfbT{=t1tzP2C{T@Ork_crW!lhb +zGa$hM`R;e=5~u~HIU3!Xb}z@y&&m-RFnGQ8BiLM-j`mei%z?b-bt2(mQl6G*&9wnF +zM$V$(nw;jKK2jr07@eDCNs1GYSB>k=yT+t|Dtb>kPhIY@oUP}8IEr{st&~RDKB^VJ +z1LS9(Jn|#4c8cnJToRM#+hPXKnDf-fhWbU@aGT+U{^=x;pD@{@z3^*RJvL=(e{s*= +zmrwsKG}{pS4wspc?Ra?I4#?B>?W)v1dzu@?IJa+KZ0LLV>}t9B4%5s{_hbVnSVj!U +z$6jxuSf-4p2IYzuKwfYxBYRQcC`(PH@`wIfU{}MDo$FH&7k@4eb~Heb0YEW3#qSLwAEfxOd6tiu$gw4YkOgmKB^l$!wzau4)# +zY|HJ{&+WsvCT>7J=quk??FEA9SXN5+WY}nimhdS576zK81H+f++!_WJKt6ItO6xeO +z?mVXAeIl~8kwxi@cewp)i3C$TXNq1e@g^W2^C~Z5d=rTnzQo0BnD#()%kT2gYXH}-ba@kLM&uoFoyU+SA!~*n`oig=1H{!cb+POf!2eGYG(yeUn +zYy5l4x(58~=)_zJMzZY!+$Oz9_VBJ7Am4}ASzL8MA4p7FS4<@La&|CE2Ze@~#0Uz0 +zcO$+l*&oOcqO+UhHE#ZRnWBb0m~xElvirn;E*GU~wXMC+qU387kT=!P`Hbb6&Wuy` +z{DOl_rNZVr$`3IJPc66YBLybYS#cmQ-ak<8hBV$Hb;GUl!W8S9seJid+_TZh$Aj`A +zRu+q|fPCQGwcHzK?;vV38`&V2hT(!rfs^fB@}(R)+{b&l&7nZPiT)&Y9swr9v)?s- +zMxDqBWtP4yf7WMR0&$Ja^Uz)%$S-G=zwBUkpvBhwxI0PWK~;OcgU0Cv|4EF0ulL6@ +z(kvj)`?ZVq)tRh85<6pq$0{cDmeALt027&Fxbwj(2snHwAkTK5h?18zhn3eUzHt*_ +zYEFAU;_EFkw2sC8B8IL98BA|6J{kplC-8pB7bt@bV2cAT)LSB +zXkRwevhbr>>$itz*y|1vJruN#OdV4T1Yp?C~OvD((34K +z2dq<7aH?9cx7fV8)XMxdkRRORgn>J;S5LpMg+^W3T~OnMmE(MuYN+#i943V+Mjgo4 +ztiy4jx7%ohrm!c#XFsmDDKLXy)sG{od}z;&ZaYc?`D}cXeGRv?GTo7;4(M$(cFzQeQuf)E3`9 +zLD7eS*=A#50Adfw>pXrH$L|!v&tF*nEhb&I*E`PXi3ShewZSk`Xx{Mv*!+CrWrn^g +zd@@d2{89p$^ZEM1+sitj{T7l*J5>k-69+0P=(oiV{P)S(!QqcO +zG-R>5X2b3V1%W)u5(UJCuitkAg?+BqQ0d|c#KcD`Uyy^I9 +z(}Lm3UT%*v1jkBjOO5RXl3E58SiF9z8F*5t!Q`k_H(;WKuTC4kEpF_9X)rhhUwYZL +zr2*zo^Wyp1(cN22N8;jZ%C#OIid9kylyfE$Mb~di#}mA*K>vGGN`eht7PZHyM~dWa +zyb(Xcfg%abc2$H{dv=H3jey1X<4}QthBLv$GMsDVC>bkh?`X5VR(kcN77;WLhUob? +zpnadgVKtL6bV#x~1pUTlkIB-hmpy_q8ivn0u}>Ixd%b}C76h-}2Q(Osz%2-6&)Vkm +zJdGp!M{yKIzFG#>E7WkeK%TgEe&}QXYy53(!Ygd2JBH*q^fh=2htF!X7EHE&F1SEm +zV2ozCW>1V~WSy#srsv3DURUcWxB1spIek%Y!HZlQAa8jf!PftQ*nB`B;#UKPuLxhH05detDO{1MWVx!`?xmWw1ng__wk33uMKwGPKmfKn$0LiQ+mGwsVh(80H57Md-UTC%%C(5WLg;_P9{IVEAW@k-d +zIQHC{^FxST*Mar}i>n9S2zSX!-hN%s-?k#@*g2qQ?T*5UE5@;SUew?SC&OiFD +zGPYi{O~=|QvVr?-8vR0g +zMm&P=%WLWb+6y*pncdmzei3SYDNw^ +zXtTlTb)8Q1jfzkliJp7(JU<*eaKQ8yZda#l%3iVD>_-8W=E9*b{yJlaL5qLCh5JSp +zQ7SzS&^|&7WyJP2kv#dm;3H#Y>q@_r3`X0-l@iCk#ZH3km_i_rNP8p{bxFNJVln5$ +zQ+&{>lFURad2G)ahRcVGCR|?z#{YxVo`aUHG`7<9>!{{{B9@jo +zSs;((wu4OKxKl8F>iXEOPO-mMz+cOnlO3_C+eY!To3lQUM^T=Qb?8aJly=uE4h<5q +zVZ3`|U2x*8x>E~*iNm(~7RaOL=cP|5gJ$n^&Ze}?Oh)%f!i19FkKkiiCD1cVE2jf_ +z91l<}3+A1);5Jk}HEGHyg9FF3^?3s5m;|bTB@q=KjDNlS+DSWme7Qhu4E^3eB=k)! +zB|B<|EUO$kou1v>H$a{!HttdJN0)VM+G0QCC6v9Q@m9{y(*1o3_0P&VD~Q>UR>zmqW;MOWRQ3>T6rKxXeeYe +zD(5|as#&M1?|uT%J`GB6byn!H309@``?cN1#qSF*5Kx13!g=rU%xM(vC4oHMNP(I{ +zK4|7qTz{~YE>5Nm3_Pf;W +z1y@HW*@vFg6rg>k@mDc{j#QId%i@ImmNR>x&mTU^wKn<_eCr&6#4RNR@~njPy*f%A +z8K9bxgr>w?f=2=erD}%}EIpDfZOIsfVD%AO8C!BtMvvWiss6X&tae7yeO=7&e3TgD +znf8YLsUN}S4+m;KgC;xiD~9Jp6(E#%i!)8cQ#5ExEBVrzRejWNLV*5oF;A0_S1m!u +z(_*o{b3j5GUw|@-P~AuxJY=qH?siiG}hs=A2 +zm1pRu0(ow;S*s46A5kJcoS3nE6x>?{84e*?NXJ_H)}fx(MI1n$$IeQ3kxATriD+o# +zeC>_r7v+jH^^V3XcQ|f72(QE%Apca+^+QEDL&D3lphr{eMdSRi{x$t8*jp(d4n@?j +z4#4uOr<=CoLyp_{D37#K$wZ8qrj%QAuQ!??dzm|NT`D=j^oMV2bC)BAvoroRLBCb7 +zMR*#n@Z`lfqOx2rlvRSFix$uy0p2G^jk(&Na)m8gOud7j@rzQd>a~i^Zw+t@`!msk +z<%dE@eRf(6m5J|Gks*uPAfMiofHcrwZfdcJkbE#WYkm#1FB}p?W>=z9%;Dpi$6%0- +zQr3Dw`g?0aEjvu-GV(P3u6L)R-md&J*$-OE-5m>J^@w9<^tFQZTl7fNhg)P`K*?IFEJA5Hu8hq%Tu>fBW=`z;&k2u|fz`i8V?FsE +z?OISBcjc>&9PKHeTJ7N;tvn@OY~ENszkUXmADWOMXw>41b1}yDr6S8>^wiU?@PBw* +zzio>ZxjJ9@sTAnHS@q@!{%7cJ)6ZhUG=YBEt@xoZp#DeUy_c)eb%aJBg$`G+7N?}=xMK@nbDTjlbx +z_WaA_Dt|Cqt +zRB!ATJIZU&WhIl=$B#91RDz%Z@n=aM#lrW_V`yO98ptO) +zdj9Y=N=}Y`x}BM>r)lmN8qRnzF!(9Lq9D1&`|KHzPhL|>RGJR>(rtYIicUQOE0TS^>`pE6@P6dhfFyAf3&*nCJS?rp}LAvK#?ji?|fXr5?*v3KN)bd*>1S^ows8DpDwK<OP2Z +zPe2NKea{0{k7rV64}(Yy`g#36xuuyb9olfMk!#$#z}VYNMB!w+5d-aKP0cs3=)2<5 +zoNO;WtqOt87kYN_4yM4X;4AEBKT4%{K>p3NAbKl4atGV@*v!115)ShQA +zl5>)B9PlryN(viYDnFg(98kE_M%e=Gm+`GfbVw?sEa+6Xv#Ux=HP|#lyD!dbR^827 +z)aB~j0Qtt5y3G(D@t0_jzCNpS&&zgG`svUr&9(U@pUUnk&z}PMCPBC~t=CcHSE!|( +zj5jxA)Rta?_8xCwy!7pSDOJ(I)(cu7zd*CMm}L492iR3UCmD=X#XVs;#E89)L0+>f +zmjwndSPj4fVCoyh0mAA$D!vP=sbdDnYx#ytT*tB0uac!)FuP9U^%q#SK4_JU$8r`ZJE5C)zp6{49Fc&k+MUY;5@Y +zB`kHzkBJ5pud>rEVOk(RmyS7UD;4wcx>b!t7XIqH1*$n>qSWhkS+e#MM+FhE`2N{E +zvW_MM!6LMiqi(J3>*ZsWKt3xM%Je4s+(LPBqI01AFPw(AC~S3dFF&5#hsHurAtYFp +zo(v=-e6M&bYI7w4)~{HEdSCl8?A`LmJ{4Mo%=(h$mm?xY25D~}7H{l&P_ +zkavosDA$4NZN49_N^u%{4_s327zeb1#osXJg5@(y$_?Rr&HIHQiGDQe3)t65l#DP& +z8jU@jOS?0PFWNJ+utjVX;3lmj4fr4ZR+z|2cHci +zoW5Lv8@|!=0U*C~A05(#&@8WG6CiOugMcSF>(%_l!*WAwt?a&A5V(G%b#G6}jOS +zUd^E#$=L$v&vEUAG9Rj%jAaaVSy$X)ac{N=O5uF90J1jRl^-esKafAkwd1T)vl0$_ +ziz+l^NvCyos>`^v(|BuoSxVn@-X#O%Psu(S^=>C&2ni1QlH|?38(wkLdcoGb_Q9n3?l1-`Rf}e#|_)}8;-4;M6ctgg93W3 +z^PQndfIN3W0JnUFG1QnB3Tq@o<9qHDRxBf2q*oyIs24k){Y5}tC@PBoyrfc8g;1K4 +z8j97e1k0#ufdWq6=0tJ~heEXp$g3ehe&f*V>=rmCyqg& +ziR=-OXD%4FCL^typ5J0pyIrx{f6SM^9JARsg&9k)rMPYZrawY6LrZinp0NSra!=DI +ziL{aAqngEdN|IOHM?A&U7B+$Q6`^o$)~>=+CI*lsX1>&7Nj{n6x_KObI6-u`m>=$) +z3FLL@?>O2by?0wgGpR|gtwj_e);UY%FGxF6#OsgIBAx>IwAr$p89xieM;2M! +zfUxnNJu_)}+v1X9&Aa9=K)wi}*k8HrQk6kA;3{cnagpeI<#1i(3tryAV`8jLVz7Qr +z+eZXj$ZS2HVAa0Q9B$L;8fMPT_ijdvY(x_I+NyL_K>NdKvO=)2^|tNmH|Ct`Lq{Kv +z$DX*FPKYy787=8AkAn4oW)5N+f>v!SuI00X;A3IJ77~z3Mo0)83e>M}9k^-0?%!`Y +zSR~`PFRE6InJ~`O9PznJtxsWc1UhU$G_lg*yZ4cc`PpFJtvV +zXWKL{t6Kzf5R5Cp*7rB4>^cwN<=}n2i8FZ35?w*TKFidX4oy+T&9w|He%?#S(T8w1g{ngX>4kK%NSR8An91 +z=3e2(Z35XO)Pp9NuvrNfC)V-cG2UWiLmH5eb0&OiY+l#G5`cJ&AA*r@uU7HNKG`K~ +z=LIsRyeBzWzkNRP-v50TAw>Qv^I>~L21skT2D>0C=qfWNf$Y=k`YfRRG_8FF*mbG8 +zY>EVg0(Fy}kQPcg!q0YlXvrO$WH+OMKwh$jkl540RF-)Qk&nsX1H}rNN2ifDBSIjH +zM(I)rS~ifsYUR#4P1-nr>|Co@h`^GKhFTDCq@3DkiPHe91AQg|yzH97jKU}Gmx?plPkS1Lk9A4UsmIhlo>N&>7!(tW0R9JkRVg)aiB@%Obh2y +zhY`W_P$%N}{bSA?e*DkX(3AeP*9G6t;cbt9G|qf(d_qad3ASDj;V+kjrGZ_Ag(yH_ +zF2F>!+F72tR=0qvnU}&&#g#V)^k>QN3*vLaCDidM_bmHY!IqY}+v#dmGKCYC*|b7q +zeTzU|>P%C9xp&VDg?gip%0zb=vMtESzX#&uBXpz#D)rhSAYb8CC1h$y)RFo^QnG?H +zPKGiXc7}573v@%lg(J@0CRqIK)mh6cD%Ft5k~m)Ol`Ey2${O{F72Jdl$kMMD?g&!a5Rnmj7NZV2SfCjtbhl|FCA2Bk08eSai5 +zl5xF}p&aOFxmWk4#XAD5-k}v%-JFog#`n|ZZ(sI#;#|hkHc4>ev*($J*ASPgx(Bqs +zKQ*lGY>!d0zn8T&q_d=R3HxP(||%Z&TP8EdO6Zl-LtY8&W}HCm8_T7zVq3T*TxPrl~{ofMC2 +z{nr^k)M$2^>YGwiAWvF8!zj~Qts)`1>?~+qHp2Q==4^^qj}JQ>rALl(0j!>)IC&9q +zviah`_anRg)29m$PpOn=S0ApxC&m@xpME0S2im7bT51kOwUdYti<<#`iXzTIxJVjg +zTtSebQwzN$OVt7LyxCbrxTTsIMtQUSIRF40*OV$QN^m(ngecMa{Pzf#oyW_zJFY8Y}oS +zoVY=uN0-QT7R(A?J)PW?`r$&m*ml4^uRcRVYZUHM)3bVe&~EnGLZ7s3%Hmfe +zB{YN8r{yF=ktM#e-za+D6Xa8!r?lZKiRrmp=~l;~jMN>QfUV!P4;A2upCGH17b#8f +z`|>vU&e4101j+Ym7l!q8T#Doa{qLsp;48?6IlKAwf6CFyj@gB=8Lz;K~`=`AX!3;^x$23-8 +z{q1F4CK$|c4oS%p<#I@(OP(q4(D0axf45O^u>Ru>#}Q6f1U>N +z=R_TiH_OeNMDD{+7ib%>%(D~{UQhdGYdT9m9ihHj2J*M!ab;8MmFA)*&L~dObU~l0 +zsd9yj9tTN`QUzZ4#oGdTY}1A&nd)}}UQ;Q%7h_1E%QQbNx;2DDR6aemJ_VmxApZy> +zHHLp=nf&BQ^1Ce`sLiVxF2A$#s9UwsI@-dXS77s#$FQOQ-a&R$(=3>eIkbAO@LsXf +zDJ`3V?qk0jUw{J#(7xnl;b{<7=(|3T3)?&T;7>z|!w#LhTg$2Tar>X5Kd=LNmHZ+3 +zCaMn8$PU>p+a;z}>!>2Yu19=TAmaazBfONh!#9A@8Z~M;F{SMy@ +zj@~HDPJ;@=*b9NYCzpiI;?<_*VflP>ah<16g>tnQJrr8Ns&M}I$M->C@z)zFu^B$E +zeAP7a2DO6uR>fM!o#V|Q+;@TwzAij!cOIbq5NZvG2t?N2211v`qc??Ey0cI_W@kD= +zYMl}S5O{*~Kt4v5twu@1yP+!8KjAhPrWz65+IYp7Ld5i3tsYk(04zU$HTNExAZzoJ +z@JI^?F|X=gLxsScDT?a)?d?6(98wz{(0&5v;5SFeygdar`ue;{Qd_x?hqz)Ls3tfK +z&Du|X?7jf<=|8B2Hv~#sHG*5jzMB82hkt^nsL<)zH^#PeA$ic62jmNC?p7a9>8f7q +z)`o)S1HSofMiZ7s#~i$);k8OxSXtW*S4CW=U(xGj=IQ35%AZhJ+W}6_m +zLY|2X7T;^hm9kK+suN8FXUiG6s_UdbYFmd299xR!Vp?Nj`9BBRABz@S4!L>{os@aP +zMrY`$cId}yKjFb|KKnr*NeYexOi#8D^11h|Fb|&-(+o8awJQ|Ote`ibt6^7Blu{G6 +zcA^07Z}$`Vekxc*E{I_)cx5Z`%FJ2gQqUao#hO0;fvf#2nE$sRW6oD-Bt6|l&;f@; +ziywQ*#9SH;5xGB8EK0JpXP5!)n;C24@Kd7Y27moDdQJ2ulkc;cNa9XjQz&gKjX*1Y +z9gq)zp&Dygb4#WuVsy?Z86bOjz$od`RD)7x22zc|=gCJP4^4ad4XaM&@a?;$moCuk +zjRt}WFQ*g2ZdCD2YAG*u!TL+ki{cP8&qVR#laI1oq&)_Lau9A{9Ynhko2EF)SiQjN +z9XP$jrX2`gZ~AWylFLgeujDt{@l<&|UN=>`qwn$a83Fx4ytGPY?To4H=7`&seP-5p +z;zbp0(7RfKTu8(Hwe1O5KOE^9x$~i7TRV(CzWbJXOXR0-2#6QflVX&-Zy&npv9Eyk +zk&NNtOHZCE*v@l$B>lP-j1T6qHb$NCw89L(a&&1c`|K$EZDUTxpr +z!%92H?wE@79HL)o0C`r`npId0HQqQ0*w60c_GQ{iXlfJnC8O_e#wJ@OmlT0KSHo!i +zb2-gnwFn(MMwjm~bO^0&R(!YAyy9)Fs|SItK%Tc`pN=zv^e}m)fxV?KiKL>8P|ijP +zl_`U^Ta#1U8Z6(Ck#fl=$j@Kk++ovjTD)mt-NR8wQh>@lx@#J>w26)f+E)t06c$gX +zxN7Eax!^B+KM*RIKld(z44-+H`KfEYBA9-v^?&XPbTH3nT6724tg7q2#1)nq8v$y$}voOsT8mymS7BoK`1=7Dm +zZmF2>pGqn!ZYMe7^&^bz=h{Y$q&EedKNg@t%#F_&h$5)uEcK^}&m-a9r)-}Jg8H}V +z-#f80SpxlWP#x7d8ZL;iLcYWdeB(bkd +zrVZI(`MEFRTobZ$L)XZ00ro0q*JPfaTT6)Wk5=5;o)w!fmg+$J{>hS{@rK5+yx^5B +z3<{PXl?V8g{-z?{h>Nv>l`V9`@zWGtP#+1^!Z}5@ +zmWl!Ghv8X2+B9#2Q+f57&`64y@7aRcK@z4K^pE?#={!5p2_PR`ds`lUb{1c18|GvV +z#btL~8cZo0Qo_V5eO|tqdAJMYV+&rY*og4->hvy5Ma*_iyJKWz8T_C2zC0evul;|l +z*(s82jis!^j4gZFmr}MuNW;u9Sw^$iWtSuhl_behBncrzq-2W-5k(?JN|q4vyKj7+ +znwdVI=lgk{=ljR+_j``l%suzab>8PX*LCi5-sjx6b7?18roSRR?y|6?THiZw&fL*{ +z)vaO_j(QW`RnBVLImMHq%FwzOCGkS6Z&{fVKZ7ga@PVY1G#mPZ9kIFg6I*P#`0M+= +zy=DC>D~KjMkfD^%g0H-HU<`0#B{j0Mz&E+p(@c{HdP@D<<{gD{Kc2sR@dYlH=dh#+G~{YGX9HZa +zGD0HQLE<^p`cL@+A5lWrfxBDR_{;SJ+5+{fk +zS=od4u%nSaGkERJDfM#}XC^c~pfWM7Je2a|Y52Of=%dH<^H*(fFSm3J?y=xOoU;1T +z{$y(cAXs)Xii{`FRr~JC;)! +z47A$m3u%LFs=eLe%{qhysNlq7y)XW*RQ!C2^}){L;|_~jd1=$AuvXpKu}XJ^ewX_a +z&Pv(t}xKmVEX3{@mzJt$07*<$IL!9Qq>I_P?(<@K#4E5-186~+4eqf +zG3KbHniwm~>!C((+w>9SAtAxs*$-6n_g(uR*q~3uvbNp@{?qj}xBBUDx+UB}sAUVI +z(b6{v2UY*D%Y#R%aX>XWtR!q-ec>swe#y^mcdKaKt0psAbzZpFQ|colkls2MRU~L3 +ziWUk?w%S*OxD|BjfT%OwnW%p4587sw@(Nhjr>k}MJ@1#uFnE86J>9xhsl6S}no{dK +zLRc(HxK1grC~kh5Gc95DWSxUmqQEp>B0oLDokK9pGwfQtNPDRhrM!mE#X;wQH}I;h +z)lb^?XiA8)T~f6Aa7Z|i&g0zkEul@6^45$y=Gf0m;pFvuOH}VnF4aU2n)Ew<$hJ6t +zD-SXEfa?B^ty@Y1+p5(SpH8iZvWX*HPRB_mZ}(VrCV8Ln*UV#wQQ{vlx?3b9-}g+d +zL89aRL(?C9$M~;19(Rmf;+na1sPlywrTkIXhvKY44Bx)-or`hHPJY4y&);d!h`g2= +zxp`t2r>iNYJkrYH7LO5f*|_y;K!oG>DQGEnayjIN|PWg +zm}kg2qPN;iyCX+78lR_sY?g{Yr&s44GVQKP*K!zx%r|--v}gB|`Px6javZNjOm*zu +zMu}g*`$kwM^Tb6p3voA%&@Xk-pHEtJDJ`tJ_7P^i;*U|aH{jonI#(eAgfEEEs+t-h +z&#K^*n7pOYo&1(LlQbC1c1rxR;QQU0iB1y*g>8C=p&n85+M)(tcMkM?_WhRkmVZ+m +zrTk-WUVg^+_bhj+J1eUsn)pBK;8Pts8*kQ#^VzNNejnBN+e*0DQO{VgJd*DH^3@EC +zSv^61V)!}?uh@fy;5s^BKf`x#=yfN)5O8LtkLWeFmE~%tMUm1}0=5TNI5qCS`?ofSG%3(1^N0fz9 +z-rd?HWbynFtgr5w!32XM-d^J4ggC;9470UgsPH|vnbo=O30lGl-*h}dNL?8& +zj~wP1;eS7O3;s +zw-Ka6zSYLG9y|MYMN_Tkx1k?h5P24v^1aV_be{dt6M8+&C+4Tf8*EP>XF|65WK-%R +zmB<)>5Q&4~tv+^#@#ckyChjq-E049U(Z5^~{%P8qQoh%3+q?^N(FoMqkEz8*eVi>m +z_^pnZzq+n!R5+h*G}ZGetx&lH39C<}rq8z;9ImQ;zVUt1h7t7<{w~bU=b0jm>iNvT +z-YFCGlp)?Q0+t +z?z<>j@*HPIBEif1=pv>3RLS$MCkB3@!+quW__BSSF{NFZUX2kmwaG%U(^r$Jo=^S| +z7L6$%;wc*mXZfCMb|mt?Qn +z%6RCi>NeBcP>UKkOx2$+%)}j>_}Dvg^hCDZlZRhVkQ|iGIw7xVdd;xxNq8>PL5Uy# +z##ceoEZ;OM@|Z_P;BY#AWs=$8(k^~^6v4zawwbE^w}+$$GjoYuzg|DrGXMJad{F{j +z_H%#a{8q*vS7%k@KU3n%h|pfkP_*RJeD5TtNaffA^*Yh2pq^@`Ji$NcC +zjf)5SU)bT1c?&zUn;DSQ(K9!J>g!nRkZ1j|if3-$QT)vtpV%>w6a4I4V`XuSi0gvT +zVYZa`UB$zHk@b!I&SR*ILVsVQM-?Q70oH3;Q*RC$r{5+Birq=eYeoD;)-IRZhon@+ +zFFGwF+y>r@=xjewne<}rur<$ESh#Ro^Io#7;CA^yy*J{6o>b*i^U-MP4W%Daw=Js3 +zwzzP6_fgKzY;OpKg)7=QTdV|@>{YhI|7v!-!=o%lqCTXUDs0T#_^9Bs6c>8;197@f-}St +zC=&&NLX06uAU-M-2vg_<96>$2ffp7+06s$@`+-khK)@$I0E?bRdMz&)@O3TKZdp56 +zcE#sfram?}-==lbJLpzEEiFC062t*OG=!J{NBmkzZfoT@0Jv@drYC@?0(b +zArNXScX?-}*_RzZpx^RptRk@?6KAYwW_p&H=YF$B{s*0Ohe#mTpVb{~2Vf$vyTY3O +zf{;TSxQ>$5M4uGNB%p#7pT+U%LQH=e?J%41N{A5 +zV2;0^i{W3&1-7ccUlurS3_52n@CJ!>CD*H0mDHqPxhdUEsh+)VC1(wrv^Z7K#C0GL8IUYBDIV*gH+mkY3J-0{>nyjrGGd*;~?q;b96)u#&o4C-I{ +z-Ty_;`%QC?^fNWZ5~Q^jC2y}Dy4L)l-K5w~RNi(l{{p0Oz1-FLbzA>Sh5SV(kih`H +z*k~0JMdB`_mrHnjl&A6U@KVP{^xk~q{K6Dpd5rNA4M}PmoSE>Mi>}Sm+;TX`a052Z +z{)~bD@6SR&eb|8)*eF>cO;%hk8BCLyeB--C1DzGOGo5CG-YCwrFzg?~qUV!Mz{!vq +z&V4_YK!lLzJR7Ti$Gp4KR<+52&a|K{&^j*lf?RpxQ5COORzY_4cflFqMowFhp#MVX +zK76!Eu(SBWTU2+8?+b16z6jDMgUNl4as6=Jv-h&Nc#i?n=J}4t42K|k`DI$YT(8fF +z{V~Cm1AYUO6+q~~sonmyi47X!0U!7t_*R#aJ!r;0$RZk)pZ*A@6@AywNd$91*|hFy%V0z+@;7@CKF +z6I_-MeHWH>L*<59nB)1Iu2th2_TjNs;!;9a&~E?%lNd| +znYe*?1MZgrx!t3YbljG-jE}m{`KXt^yni*|ZqxG=>{2{C&0N%akP9I6^e=HLT5&mJ +zl5UkI2a>K-yRn|v_es5$Ko-aPw*plAoU{L5TJhRf9GVKV +znyuv~QTeqM+iE28gl&uLG|e^>J1+5CGDI@O_Dw}K&V5Qq`#8qrS*70c9jB@I-p`Cw +zJy?4Ie~r$(lO!E7-?D|*RBOY{$}W|13wRcK +z@M)YdWxjrT+s!e)IZunC7hH`TJtGR+Y4`CLcMeEp?YbaD!#HAz^<#Vi*Vn&G@@=&) +z81WJnU3FYdVq{xPQBPAVXA65iv#{wV^BG)P?AwB6ofPvMPOOzVA%e>hXBq9}(!Cze +zYS;41YUDq|BQcS!d1xUaCUZV +zzSbz=!_{S`4lwv9sEd<}CrTYkKqK)kZcrG^3kSn^;Jm&4fzKEMj6}ksfKLQ8Q459k +zfhwV#UDQ-jN-!4}1QMo%QQi$hVic8NiWtS+N*EE+?Q*wUGEJwevy%_Y;uX+%n-gq!fZFQ= +zXF7*xWH0Eks1KC-UOv83_DFOKmkv!2FUSgi;-dY$ad;y1pJ00I-Ysx_Z?~f5R3>(^ +zZb_d +zrEBpAu`lU#7Q{AXhzqVb+sFQkVP*2|kj1bXAO0)P&N +zfb&38AUAGJx_hG$&issx6yU~v`Q?BKkJXTFd5K#oeA593wF;@ +zwLXzkQp{f;Ex-4B74gVLllto~bWJL}(c(?4;-QaHYVPv=8Kjmn1=73w$DeAHoh{jI +zup)R{_IgiM&63(}!RE*_O%Fn8c*aI$$dG>D-;n?hfrB{JRU=h?N4?eT#(urfA9pe| +zO??DofJjy)H&Yv84@inut;!3Kfn&W0o;VZ~>*az+19c`+XzJa-OK;wB?V4w2DX-Yj +zn?QDj1@jD#@5uI$?>ynE&p(iZ0h)S2p)e#A0bI~R>0z88)nL<1%d9 +z{`%YuGFRe9gN?A=Bco(+==s?oH#GA`;)zZGg`rSPt=@Uplfv_hpG~lSD3#XOUc6bnZE9N{H@l16AoPB^ +z+ipV!^FN>{cpQ%SuW*$Aq-`3%SDC(d3CYW&BkDJ@94gA}d?hP$B}TVAjL!d*|DkWc +zwG9SIY2qlSdb*phJV_AVv@c3a`VK~hEj9b4O_yHG@#Qn_Nng_ZyS8x#ZKJ5DssmFq +zR92+$#DM_OMfW>~!Bf*%($GU6o}LP+B)A~D@_nuY8a{IDA-D+jvvB||Er5FXJMQL2 +zRMpw>j*<53j|9sbBF(NxB82v+a+S>Z*l(^#yYiY01H9I};EVKz;!$!QKAyl!>t}&#^6TaO_NTR68y&M2k?3y)mtF7w +zp%f+!mvae-uG{bC5jdF_>dJJUuD!xPl1{_(Sn#!p@TpEE@(yd`dUN(5fKvle9d=g?#|q#R>IE-tkSxQ8W={LH?i2xq;h;dbilFLNJ!*!QPPYrDId)d> +zZ0n&JWRKc%=FFW5*%EP%4|%Ds@2V5%_f<(at}=Kkiq?-93^*qDLxaTOj{3edC713gKNh?Q9oxn*Tj?U +zEfqVrCWX07k#jdtDX@0{$Oj6&!9<8?ElNhyWf-4+y)2xxt>fFSh5CIedpBhv1*Nww +z%?Ugm8_zzkN?p@6Py)%t1x+AO!EG*puQr&E)EB91>*;LHd9Y;(f<3QV>lNCeuw@@I +z*IkjUrUax9dTna(E9P&srnq$eQ-tQTiR9!*nNOQTa@(?9oS$@#Cwu8jrCv<< +z{?v@jZvWuU(6wNPhFH9(FA`Xkv_cb!z>^vTfW!h5d=gL)4D-itfh^3-7+;`Oki1!# +zgDEU3fq$mXh#Sq(9=%}LAqGEaIXOHf>bUgrty!>u;=Do*jI8+IvnLl|ofqf%TmEz= +zRkhD5@}BYO{+4H@-??2cZ0@@I?VMk?^;n+$&9l!O^eN#3s3xNz1^V3t@-IsD*_Gd0 +zL)3jVN>VY^zR;qsVWnwXSd~rNS)D();3)N!e9T^(ruqOEFpk1D|EbpYlG|!dCBC>$ +zlHpupnB=7^eO|f!!f|0Hd;I)VHGJ|l$a&w!&;dMp{annRfX6bWWt1y{G!M?y1FthMpi{0T=v2FyOzQ +zSOL#;0si{Ax~4_HQl&N#CPPT$`&xE9W_L=kGm2@KUZqxVW=*g1>!)DszF!9R|B2Ng +zdKzj}2%;)={!V$jLzqc@NaduQeKZ}d-O +ZNd6wq5*P-7Sqcj3izT{2t=7rR{{hJe1j+ya + +literal 0 +HcmV?d00001 + +diff --git a/test/data/create-runtime-policy/allowlist-sha1 b/test/data/create-runtime-policy/allowlist-sha1 +new file mode 100644 +index 0000000..ab7640c +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-sha1 +@@ -0,0 +1 @@ ++009b0d8ee8fb8d890fa70f9c8e02b3f1eded1509 data +diff --git a/test/data/create-runtime-policy/allowlist-sha256 b/test/data/create-runtime-policy/allowlist-sha256 +new file mode 100644 +index 0000000..c757b48 +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-sha256 +@@ -0,0 +1 @@ ++96d7fae8adb7286a419a88f78c13d35fb782d63df654b7db56f154765698b754 data +diff --git a/test/data/create-runtime-policy/allowlist-sha384 b/test/data/create-runtime-policy/allowlist-sha384 +new file mode 100644 +index 0000000..ea64b3c +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-sha384 +@@ -0,0 +1 @@ ++20393b3e109c55b3128ba761bab1c555d8f39afe118eb1a29929ba8e017e8b2cdfc805c304e10d280604a829145ba6f0 data +diff --git a/test/data/create-runtime-policy/allowlist-sha512 b/test/data/create-runtime-policy/allowlist-sha512 +new file mode 100644 +index 0000000..3be6c5c +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-sha512 +@@ -0,0 +1 @@ ++4fb914b8ed30ff69c65551573ce096aaf0c3507896cce44868d7a7e553891043c68a3889f3fc8056ef8c6bc3a54a9db83cd8112928ad51c5cd9e1a4ef332de53 data +diff --git a/test/data/create-runtime-policy/allowlist-sm3_256 b/test/data/create-runtime-policy/allowlist-sm3_256 +new file mode 100644 +index 0000000..b80db1e +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-sm3_256 +@@ -0,0 +1 @@ ++dca3d0b348447a494da13278a2451683e43314a2d2add09f9ee8e028676018dd data +diff --git a/test/data/create-runtime-policy/allowlist-unknown b/test/data/create-runtime-policy/allowlist-unknown +new file mode 100644 +index 0000000..3822185 +--- /dev/null ++++ b/test/data/create-runtime-policy/allowlist-unknown +@@ -0,0 +1 @@ ++38debce1fb87347226e0c14d0084ef5c data +diff --git a/test/data/create-runtime-policy/ima-log-sha1 b/test/data/create-runtime-policy/ima-log-sha1 +new file mode 100644 +index 0000000..3c9acc0 +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-sha1 +@@ -0,0 +1,2 @@ ++10 0000000000000000000000000000000000000000 ima-ng sha1:0000000000000000000000000000000000000000 boot_aggregate ++10 edcfbc3299860219161af60b266f8e2fa1fbd0c0 ima-ng sha1:009b0d8ee8fb8d890fa70f9c8e02b3f1eded1509 /data +diff --git a/test/data/create-runtime-policy/ima-log-sha256 b/test/data/create-runtime-policy/ima-log-sha256 +new file mode 100644 +index 0000000..1a313c3 +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-sha256 +@@ -0,0 +1,2 @@ ++10 6309e2c83b7814367bb3912a55e5473454623535 ima-ng sha256:f4845392eca429a4c941a6a07fc32faf843a88c5c3dfa3b9329ab8f4171d9ce3 boot_aggregate ++10 80255d9c7dad91ef5f21b18560a47642d6f4d653 ima-ng sha256:96d7fae8adb7286a419a88f78c13d35fb782d63df654b7db56f154765698b754 /data +diff --git a/test/data/create-runtime-policy/ima-log-sha384 b/test/data/create-runtime-policy/ima-log-sha384 +new file mode 100644 +index 0000000..bbdd48a +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-sha384 +@@ -0,0 +1,2 @@ ++10 0000000000000000000000000000000000000000 ima-ng sha384:000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 boot_aggregate ++10 75c0136ea25f6ef9ad3e029fc5ffde6a4d293a07 ima-ng sha384:20393b3e109c55b3128ba761bab1c555d8f39afe118eb1a29929ba8e017e8b2cdfc805c304e10d280604a829145ba6f0 /data +diff --git a/test/data/create-runtime-policy/ima-log-sha512 b/test/data/create-runtime-policy/ima-log-sha512 +new file mode 100644 +index 0000000..7f1df7b +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-sha512 +@@ -0,0 +1,2 @@ ++10 0000000000000000000000000000000000000000 ima-ng sha512:00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 boot_aggregate ++10 edc4bec94ffdeb86c4062d9841b41da5c94c7a5d ima-ng sha512:4fb914b8ed30ff69c65551573ce096aaf0c3507896cce44868d7a7e553891043c68a3889f3fc8056ef8c6bc3a54a9db83cd8112928ad51c5cd9e1a4ef332de53 /data +diff --git a/test/data/create-runtime-policy/ima-log-sm3_256 b/test/data/create-runtime-policy/ima-log-sm3_256 +new file mode 100644 +index 0000000..d0c696d +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-sm3_256 +@@ -0,0 +1,2 @@ ++10 0000000000000000000000000000000000000000000000000000000000000000 ima-ng sm3_256:0000000000000000000000000000000000000000000000000000000000000000 boot_aggregate ++10 7dd89e5149a560cd78cd8d6a5e2227fb7b1a48a01a66f431bfa71da36f3cb169 ima-ng sm3_256:96d7fae8adb7286a419a88f78c13d35fb782d63df654b7db56f154765698b754 /data +diff --git a/test/data/create-runtime-policy/ima-log-unknown b/test/data/create-runtime-policy/ima-log-unknown +new file mode 100644 +index 0000000..4be8e6e +--- /dev/null ++++ b/test/data/create-runtime-policy/ima-log-unknown +@@ -0,0 +1,2 @@ ++10 0000000000000000000000000000000000000000000000000000000000000000 ima-ng unknown:0000000000000000000000000000000000000000000000000000000000 boot_aggregate ++10 7dd89e5149a560cd78cd8d6a5e2227fb7b1a48a01a66f431bfa71da36f3cb169 ima-ng unknown:0011223344556677889900112233445577889900112233445566778899 /data +diff --git a/test/data/create-runtime-policy/policy-sha1 b/test/data/create-runtime-policy/policy-sha1 +new file mode 100644 +index 0000000..b40d714 +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-sha1 +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-12 13:44:44.298873"}, "release": 0, "digests": {"/base_policy_data": ["b261df1756a43b2d35c8ca13389c026840961d36"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/create-runtime-policy/policy-sha256 b/test/data/create-runtime-policy/policy-sha256 +new file mode 100644 +index 0000000..5637c6a +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-sha256 +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-12 13:44:44.797331"}, "release": 0, "digests": {"/base_policy_data": ["35f6036dcbb4d819a90cc3282659754ab1a225e60f593a209c27b80174ba3180"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/create-runtime-policy/policy-sha384 b/test/data/create-runtime-policy/policy-sha384 +new file mode 100644 +index 0000000..3ea8718 +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-sha384 +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-12 13:44:45.296199"}, "release": 0, "digests": {"/base_policy_data": ["407272428a6bda6a8a0e450b4ccfe3ba52a2faf5f45853692b05212aa3103d43524e03050b4ee119a0ec0a069cb5794b"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/create-runtime-policy/policy-sha512 b/test/data/create-runtime-policy/policy-sha512 +new file mode 100644 +index 0000000..4861573 +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-sha512 +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-12 13:44:45.797865"}, "release": 0, "digests": {"/base_policy_data": ["5afaedd4458b68515747262bc32e84a3b8c70aaf299f4eeeae027b594d4e9d35850ff2838a9d075ad1d15ee3663c36a9349486b421a3b630401c817c179c6404"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/create-runtime-policy/policy-sm3_256 b/test/data/create-runtime-policy/policy-sm3_256 +new file mode 100644 +index 0000000..1e1b252 +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-sm3_256 +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-19 16:52:59.083787"}, "release": 0, "digests": {"/rootfs_data": ["dca3d0b348447a494da13278a2451683e43314a2d2add09f9ee8e028676018dd"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +\ No newline at end of file +diff --git a/test/data/create-runtime-policy/policy-unknown b/test/data/create-runtime-policy/policy-unknown +new file mode 100644 +index 0000000..6713df1 +--- /dev/null ++++ b/test/data/create-runtime-policy/policy-unknown +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-09-19 16:52:59.083787"}, "release": 0, "digests": {"/rootfs_data": ["00112233445566778899001122334455667788990011223344556677889900"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/create-runtime-policy/rootfs/rootfs_data b/test/data/create-runtime-policy/rootfs/rootfs_data +new file mode 100644 +index 0000000..6764395 +--- /dev/null ++++ b/test/data/create-runtime-policy/rootfs/rootfs_data +@@ -0,0 +1 @@ ++some data owned by root in rootfs +diff --git a/test/data/create-runtime-policy/setup-initrd-tests b/test/data/create-runtime-policy/setup-initrd-tests +new file mode 100755 +index 0000000..8b7af46 +--- /dev/null ++++ b/test/data/create-runtime-policy/setup-initrd-tests +@@ -0,0 +1,161 @@ ++#!/bin/bash ++set -euo pipefail ++ ++# SPDX-License-Identifier: Apache-2.0 ++# Copyright 2024 Red Hat, Inc. ++ ++BASEDIR=${1:-} ++if [ -z "${BASEDIR}" ] || [ ! -d "${BASEDIR}" ]; then ++ echo "Please specify a valid directory to use for setting up the dummy initrds" >&2 ++ exit 1 ++fi ++ ++TREE="${BASEDIR}"/tree ++CPIO="${BASEDIR}"/main.cpio ++EARLY_CPIO="${BASEDIR}"/early ++OUTDIR="${BASEDIR}"/initrd ++INITRD_PREFIX="${OUTDIR}"/initramfs-keylime ++DUMMY_ROOTFS="${BASEDIR}"/dummy-rootfs ++ ++build_fedora_like_early_tree() { ++ [ -n "${TREE}" ] \ ++ || die "Please indicate the dummy initrd tree in the TREE variable" ++ ++ # Let's first create a dummy tree to serve as our "initrd". ++ [ -d "${TREE}" ] && rm -rf "${TREE}" ++ mkdir -p "${TREE}" ++ ++ printf '1\n' > "${TREE}"/early_cpio ++} ++ ++build_debian_like_early_tree() { ++ [ -n "${TREE}" ] \ ++ || die "Please indicate the dummy initrd tree in the TREE variable" ++ ++ # Let's first create a dummy tree to serve as our "initrd". ++ [ -d "${TREE}" ] && rm -rf "${TREE}" ++ mkdir -p "${TREE}" ++ ++ mkdir -p "${TREE}"/kernel/x86/microcode ++ printf 'foobar\n' > "${TREE}"/kernel/x86/microcode/GenuineFooBar.bin ++} ++ ++ ++build_dummy_tree() { ++ [ -n "${TREE}" ] \ ++ || die "Please indicate the dummy initrd tree in the TREE variable" ++ ++ # Let's first create a dummy tree to serve as our "initrd". ++ [ -d "${TREE}" ] && rm -rf "${TREE}" ++ mkdir -p "${TREE}" ++ ++ # Now let's populate it. ++ mkdir -p "${TREE}"/{dev,var/tmp,usr/{bin,sbin,lib,lib64}} ++ ++ ln -s usr/bin "${TREE}"/bin ++ ln -s usr/sbin "${TREE}"/sbin ++ ln -s usr/lib "${TREE}"/lib ++ ln -s usr/lib64 "${TREE}"/lib64 ++ ++ # Add also a couple of dummy scripts. ++ # foo: sha256:18eb0ba043d6fc5b06b6f785b4a411fa0d6d695c4a08d2497e8b07c4043048f7 ++ printf '#!/bin/sh\necho foo\n' > "${TREE}"/usr/bin/foo ++ # bar: sha256:dd2ccf6ebfabbca501864a3ec5aebecfadd69d717ea9d9ddd509b49471d039db ++ printf '#!/bin/sh\necho bar\n' > "${TREE}"/usr/sbin/bar ++ # foobar.so: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ++ printf '' > "${TREE}"/usr/lib/foobar.so ++ # foobar.so: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ++ printf '' > "${TREE}"/usr/lib64/foobar64.so ++ ++ printf '' > "${TREE}/dev/foo bar" ++ # Add a named pipe/FIFO as well. ++ mknod "${TREE}"/usr/fifo p ++} ++ ++make_early_cpio() { ++ for distro in fedora debian; do ++ cpio_f="build_${distro}_like_early_tree" ++ ++ "${cpio_f}" ++ ( ++ cd "${TREE}" && find . -print0 | sort -z \ ++ | cpio --null --quiet -o -H newc \ ++ > "${EARLY_CPIO}-${distro}.cpio" ++ ) ++ done ++} ++ ++make_cpio() { ++ build_dummy_tree ++ # Let's build the CPIO file here too. ++ ( ++ cd "${TREE}" && find . -print0 | sort -z \ ++ | cpio --null --quiet -o -H newc > "${CPIO}" ++ ) ++ build_debian_like_early_tree ++} ++ ++build_dummy_rootfs() { ++ mkdir -p "${DUMMY_ROOTFS}"/{dev,var/tmp,usr/{bin,sbin,lib,lib64},tmp,root,home/foobar} ++ # All dummy files with sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ++ printf '' > "${DUMMY_ROOTFS}"/dev/foobar-sdaX ++ printf '' > "${DUMMY_ROOTFS}"/var/tmp/foobar ++ printf '' > "${DUMMY_ROOTFS}"/usr/bin/foobar-bin ++ printf '' > "${DUMMY_ROOTFS}"/usr/sbin/foobar-sbin ++ printf '' > "${DUMMY_ROOTFS}"/usr/lib/foobar.so ++ printf '' > "${DUMMY_ROOTFS}"/usr/lib64/foobar64.so ++ printf '' > "${DUMMY_ROOTFS}"/usr/lib64/foobar-temp ++ printf '' > "${DUMMY_ROOTFS}"/root/foobar-root ++ printf '' > "${DUMMY_ROOTFS}"/home/foobar/non-root ++ ++ if [ "${EUID}" -eq 0 ]; then ++ # Running as root, let's make sure at least home ++ # is not owned by root. ++ chown 0:0 -R "${DUMMY_ROOTFS}"/ ++ chown 1000:1000 -R "${DUMMY_ROOTFS}"/home/ ++ fi ++} ++ ++build_dummy_rootfs ++make_early_cpio ++make_cpio ++ ++# Now let's compress our CPIO. ++[ -d "${OUTDIR}" ] && rm -rf "${OUTDIR}" ++mkdir -p "${OUTDIR}" ++ ++# Let's get info on the compression available. ++c_missing= ++compression= ++for c in cat gzip zstd bzip2 xz lzma lz4 lzop; do ++ if ! command -v "${c}" >/dev/null 2>/dev/null; then ++ c_missing="${c_missing} ${c}" ++ continue ++ fi ++ compression="${compression} ${c}" ++done ++ ++if [ -n "${c_missing}" ]; then ++ echo "WARN: not testing with the following compression because it was not found in the path:${c_missing}" >&2 ++fi ++ ++for distro in debian fedora; do ++ for compress in ${compression}; do ++ if ! command -v "${compress}" >/dev/null 2>/dev/null; then ++ echo "WARN: not compressing with '${compress}' because it was not found in the PATH" >&2 ++ continue ++ fi ++ ++ cmd="${compress} -c" ++ [ "${compress}" = "cat" ] && cmd="${compress}" ++ ++ # Version concatenated with the early_cpio. ++ dst="${INITRD_PREFIX}-early-${distro}-${compress}".img ++ cp "${EARLY_CPIO}-${distro}".cpio "${dst}" ++ ${cmd} < "${CPIO}" >> "${dst}" ++ ++ # Without the early_cpio. ++ dst="${INITRD_PREFIX}-${distro}-${compress}".img ++ ${cmd} < "${CPIO}" >> "${dst}" ++ done ++done +diff --git a/test/data/create-runtime-policy/setup-rpm-tests b/test/data/create-runtime-policy/setup-rpm-tests +new file mode 100755 +index 0000000..708438c +--- /dev/null ++++ b/test/data/create-runtime-policy/setup-rpm-tests +@@ -0,0 +1,345 @@ ++#!/bin/bash ++set -euo pipefail ++ ++# SPDX-License-Identifier: Apache-2.0 ++# Copyright 2024 Red Hat, Inc. ++ ++die() { ++ echo "${0} ERROR: ${1}" >&2 ++ exit "${2:-1}" ++} ++ ++BASEDIR=${1:-} ++if [ -z "${BASEDIR}" ] || [ ! -d "${BASEDIR}" ]; then ++ die "Please specify a valid directory to use for setting up the dummy rpms" ++fi ++ ++BASEDIR="$(realpath "${BASEDIR}")" ++ ++# rpmbuild ++RPMBUILD="${BASEDIR}"/rpmbuild ++SPECDIR="${RPMBUILD}"/SPECS ++SRCDIR="${RPMBUILD}"/SOURCES ++BUILDDIR="${RPMBUILD}"/BUILD ++BUILDROOTDIR="${RPMBUILD}"/BUILDROOT ++RPMSDIR="${RPMBUILD}"/RPMS ++SRPMSDIR="${RPMBUILD}"/SRPMS ++SPECFILE="${BASEDIR}"/dummy-template.spec ++EMPTY_SPECFILE="${BASEDIR}"/dummy-empty-template.spec ++ ++MACROS_RC="${BASEDIR}"/rpmbuild-macros ++MACROS_RC_SIG="${BASEDIR}"/rpmbuild-macros-sig ++# gpg ++GPGDIR_RSA="${BASEDIR}"/gnupg/rsa ++GPGDIR_ECC="${BASEDIR}"/gnupg/ecc ++GPGRSA="gpg --homedir ${GPGDIR_RSA} --batch --yes" ++GPGECC="gpg --homedir ${GPGDIR_ECC} --batch --yes" ++ ++# IMA signing keys. ++IMA_KEYSDIR="${BASEDIR}"/ima-keys ++IMA_KEYS_CFG="${IMA_KEYSDIR}"/config ++IMA_PRIV_KEY="${IMA_KEYSDIR}"/privkey.pem ++IMA_PUB_KEY="${IMA_KEYSDIR}"/pubkey.pem ++IMA_KEYS_CERT_DER="${IMA_KEYSDIR}"/x509.der ++ ++# test repositories ++RPM_REPO_UNSIGNED="${BASEDIR}"/repo/unsigned ++RPM_REPO_SIGNED_ECC="${BASEDIR}"/repo/signed-ecc ++RPM_REPO_SIGNED_RSA="${BASEDIR}"/repo/signed-rsa ++RPM_REPO_SIGNED_MISMATCH="${BASEDIR}"/repo/signed-mismatch ++RPM_REPO_SIGNED_NO_REPOMD="${BASEDIR}"/repo/no-repomd ++RPM_REPO_SIGNED_NO_KEY="${BASEDIR}"/repo/signed-no-key ++RPM_REPO_FILELIST_EXT_MISMATCH="${BASEDIR}"/repo/filelist-ext-mismatch ++RPM_REPO_UNSUPPORTED_COMPRESSION="${BASEDIR}"/repo/unsupported-compression ++ ++sanity_check() { ++ # We need the following programs available for this to work. ++ _progs="gpg rpmbuild rpmsign createrepo_c openssl" ++ for _p in ${_progs}; do ++ command -v "${_p}" >/dev/null || die "'${_p}' NOT available" 77 ++ done ++} ++ ++create_ima_keys() { ++ mkdir -p "${IMA_KEYSDIR}" ++ ++ cat << EOF > "${IMA_KEYS_CFG}" ++[ req ] ++default_bits = 3072 ++default_md = sha256 ++distinguished_name = req_distinguished_name ++prompt = no ++string_mask = utf8only ++x509_extensions = myexts ++ ++[ req_distinguished_name ] ++O = Keylime Test Suite ++CN = Executable Signing Key ++emailAddress = keylime@example.com ++ ++[ myexts ] ++basicConstraints=critical,CA:FALSE ++keyUsage=digitalSignature ++subjectKeyIdentifier=hash ++authorityKeyIdentifier=keyid ++EOF ++ ++ openssl req -x509 -new -nodes -utf8 -days 90 -batch -x509 \ ++ -config "${IMA_KEYS_CFG}" -outform DER \ ++ -out "${IMA_KEYS_CERT_DER}" -keyout "${IMA_PRIV_KEY}" ++ openssl rsa -pubout -in "${IMA_PRIV_KEY}" -out "${IMA_PUB_KEY}" ++} ++ ++create_gpg_rsa_key() { ++ mkdir -p "${GPGDIR_RSA}" ++ chmod 700 "${GPGDIR_RSA}" ++ ++ ${GPGRSA} --gen-key < "${_dst}" ++%global source_date_epoch_from_changelog 0 ++Name: DUMMY-%{dummy_name} ++Version: %{dummy_version} ++Release: %{dummy_release} ++Summary: Dummy package for testing purposes ++Provides: %{dummy_name} ++BuildArch: noarch ++License: CC0 ++%description ++Dummy package for testing purposes, not intended to be installed. ++%install ++mkdir -p %{buildroot}%{_bindir} ++printf 'foo' > %{buildroot}%{_bindir}/dummy-foobar ++mkdir -p %{buildroot}%{_sysconfdir} ++printf 'bar' > %{buildroot}%{_sysconfdir}/dummy-foobar.conf ++%files ++%{_bindir}/dummy-foobar ++%{_sysconfdir}/dummy-foobar.conf ++EOF ++} ++ ++save_empty_spec_template() { ++ _dst="${1}" ++cat << EOF > "${_dst}" ++%global source_date_epoch_from_changelog 0 ++Name: DUMMY-%{dummy_name} ++Version: %{dummy_version} ++Release: %{dummy_release} ++Summary: Dummy package for testing purposes ++Provides: %{dummy_name} ++BuildArch: noarch ++License: CC0 ++%description ++Dummy package for testing purposes, not intended to be installed. ++%files ++EOF ++} ++ ++create_rpmbuild_macros() { ++ _dst="${1}" ++cat << EOF > "${_dst}" ++%_sourcedir ${SRCDIR} ++%_rpmdir ${RPMSDIR} ++%_srcrpmdir ${SRPMSDIR} ++%_specdir ${SPECDIR} ++%_builddir ${BUILDDIR} ++EOF ++} ++ ++create_rpmbuild_macros_sig() { ++ _dst="${1}" ++ create_rpmbuild_macros "${_dst}" ++ ++ cat << EOF >> "${_dst}" ++%_signature ${GPGRSA} ++%_gpg_path ${GPGDIR_RSA} ++%_gpg_name keylime@example.com ++%_gpgbin /usr/bin/gpg2 ++%__gpg_sign_cmd %{__gpg} ${GPGRSA} --force-v3-sigs --verbose --no-armor --no-secmem-warning -u "%{_gpg_name}" -sbo %{__signature_filename} --digest-algo sha256 %{__plaintext_filename}' ++%_file_signing_key ${IMA_PRIV_KEY} ++EOF ++} ++ ++create_rpm() { ++ _name="${1}" ++ _version="${2}" ++ _rel="${3}" ++ _spec="${4}" ++ _signed=${5:-} ++ ++ _macros="${MACROS_RC}" ++ [ -n "${_signed}" ] && _macros="${MACROS_RC_SIG}" ++ ++ rpmbuild --define "dummy_name ${_name}" \ ++ --define "dummy_version ${_version}" \ ++ --define "dummy_release ${_rel}" \ ++ --load="${_macros}" \ ++ -bb "${_spec}" ++ ++ # Make sure rpm was created at the right place. ++ # From the following commit, it seems rpmbuild will not honor ++ # the custom settings defined via the macros and will build ++ # into ~/rpmbuild regardless. ++ # https://github.com/rpm-software-management/rpm/commit/96467dce18f264b278e17ffe1859c88d9b5aa4b6 ++ _pkgname="DUMMY-${_name}-${_version}-${_rel}.noarch.rpm" ++ ++ _expected_pkg="${RPMSDIR}/noarch/${_pkgname}" ++ [ -e "${_expected_pkg}" ] && return 0 ++ ++ # OK, the package was not built where it should. Let us see if ++ # it was built in ~/rpmbuild instead, and if that is the case, ++ # copy it to the expected location. ++ _bad_location_pkg="${HOME}/rpmbuild/RPMS/noarch/${_pkgname}" ++ if [ -e "${_bad_location_pkg}" ]; then ++ echo "WARNING: the package ${_pkgname} was built into ~/rpmbuild despite rpmbuild being instructed to build it at a different location. Probably a fallout from https://github.com/rpm-software-management/rpm/commit/96467dce" >&2 ++ install -D -m644 "${_bad_location_pkg}" "${_expected_pkg}" ++ return 0 ++ fi ++ ++ # Should not be here. ++ return 1 ++} ++ ++prepare_rpms() { ++ save_spec_template "${SPECFILE}" ++ save_empty_spec_template "${EMPTY_SPECFILE}" ++ ++ # Create the required rpmbuild directories. ++ mkdir -p "${SPECDIR}" "${SRCDIR}" "${BUILDDIR}" \ ++ "${BUILDROOTDIR}" "${RPMSDIR}" "${SRPMSDIR}" ++ ++ # And the directories for the repositories. ++ for _repodir in "${RPM_REPO_UNSIGNED}" \ ++ "${RPM_REPO_SIGNED_RSA}" \ ++ "${RPM_REPO_SIGNED_ECC}" \ ++ "${RPM_REPO_SIGNED_MISMATCH}"; do ++ [ -d "${_repodir}" ] && rm -rf "${_repodir}" ++ mkdir -p "${_repodir}" ++ done ++ ++ # Now let us build the RPMs. ++ create_rpmbuild_macros "${MACROS_RC}" ++ create_rpmbuild_macros_sig "${MACROS_RC_SIG}" ++ _version=42.0.0 ++ _rel=el42 ++ for _pn in foo bar; do ++ create_rpm "${_pn}" "${_version}" "${_rel}" "${SPECFILE}" ++ done ++ ++ # Create an empty rpm as well. ++ create_rpm "empty" "${_version}" "${_rel}" "${EMPTY_SPECFILE}" ++ ++ # And copy them to the "unsigned" repo. ++ find "${RPMSDIR}" -type f -name '*.rpm' -exec cp {} "${RPM_REPO_UNSIGNED}"/ \; ++ pushd "${RPM_REPO_UNSIGNED}" >/dev/null ++ createrepo_c --general-compress-type=gz . ++ popd >/dev/null ++ ++ # Now we can copy the content over to the signed versions. ++ for _repodir in "${RPM_REPO_SIGNED_RSA}" \ ++ "${RPM_REPO_SIGNED_ECC}"; do ++ cp -a "${RPM_REPO_UNSIGNED}"/* "${_repodir}"/ ++ done ++ ++ # --filelists-ext was introduced in createrepo_c 0.21; some distros ++ # - e.g. CentOS Stream 9 at the time of writing - have an older ++ # version of it, so that option is not available. ++ fext= ++ crepo_maj="$(createrepo_c --version | cut -f2 -d' ' | cut -f1 -d'.')" ++ crepo_min="$(createrepo_c --version | cut -f2 -d' ' | cut -f2 -d'.')" ++ if [ "${crepo_maj}" -gt 0 ] || [ "${crepo_min}" -ge 21 ]; then ++ fext=--filelists-ext ++ fi ++ ++ # For ${RPM_REPO_SIGNED_RSA}", let us also pass --filelist-ext ++ # to createrepo_c, if it is supported. ++ pushd "${RPM_REPO_SIGNED_RSA}" >/dev/null ++ createrepo_c --general-compress-type=gz ${fext} . ++ popd >/dev/null ++ ++ # Sign the repo metadata for the signed repos with both an RSA ++ # and an ECC gpg key.. ++ ${GPGRSA} --detach-sign \ ++ --armor "${RPM_REPO_SIGNED_RSA}"/repodata/repomd.xml ++ ${GPGRSA} --output "${RPM_REPO_SIGNED_RSA}"/repodata/repomd.xml.key \ ++ --armor --export keylime@example.com ++ ++ ${GPGECC} --detach-sign \ ++ --armor "${RPM_REPO_SIGNED_ECC}"/repodata/repomd.xml ++ ${GPGECC} --output "${RPM_REPO_SIGNED_ECC}"/repodata/repomd.xml.key \ ++ --armor --export keylime@example.com ++ ++ # For the mismatched one, let's use the asc file from the RSA repo ++ # and the key from the ECC one. ++ cp "${RPM_REPO_SIGNED_RSA}"/* -a "${RPM_REPO_SIGNED_MISMATCH}"/ ++ cp -f "${RPM_REPO_SIGNED_ECC}"/repodata/repomd.xml.key \ ++ "${RPM_REPO_SIGNED_MISMATCH}"/repodata/repomd.xml.key ++ ++ # A repo without the repomd.xml file. ++ mkdir -p "${RPM_REPO_SIGNED_NO_REPOMD}"/repodata/ ++ ++ # Now a signed repo without the key. ++ mkdir -p "${RPM_REPO_SIGNED_NO_KEY}" ++ cp "${RPM_REPO_SIGNED_RSA}"/* -a "${RPM_REPO_SIGNED_NO_KEY}"/ ++ rm -f "${RPM_REPO_SIGNED_NO_KEY}"/repodata/repomd.xml.key ++ ++ # If createrepo_c does not support --filelists-ext, let us not ++ # test for mismatch. ++ if [ -n "${fext}" ]; then ++ # And a repo without the filelists-ext file, although it indicates ++ # it has one. ++ mkdir -p "${RPM_REPO_FILELIST_EXT_MISMATCH}" ++ cp "${RPM_REPO_SIGNED_RSA}"/* -a "${RPM_REPO_FILELIST_EXT_MISMATCH}"/ ++ rm -f "${RPM_REPO_FILELIST_EXT_MISMATCH}"/repodata/*-filelists-ext.xml* ++ fi ++ ++ # Add a repo using non-supported compression for the files. ++ # We currently support only gzip. ++ mkdir -p "${RPM_REPO_UNSUPPORTED_COMPRESSION}" ++ find "${RPMSDIR}" -type f -name '*.rpm' -exec cp {} "${RPM_REPO_UNSUPPORTED_COMPRESSION}"/ \; ++ pushd "${RPM_REPO_UNSUPPORTED_COMPRESSION}" >/dev/null ++ createrepo_c --general-compress-type=xz . ++ popd >/dev/null ++ ++ # Now let us add IMA signatures to the rpms in RPM_REPO_SIGNED_RSA. ++ find "${RPM_REPO_SIGNED_RSA}" -type f -name '*.rpm' -exec \ ++ rpmsign --load="${MACROS_RC_SIG}" --addsign --signfiles {} \; ++} ++ ++sanity_check ++create_keys ++prepare_rpms +diff --git a/test/data/sign-runtime-policy/ec-p521-private.pem b/test/data/sign-runtime-policy/ec-p521-private.pem +new file mode 100644 +index 0000000..bee7175 +--- /dev/null ++++ b/test/data/sign-runtime-policy/ec-p521-private.pem +@@ -0,0 +1,7 @@ ++-----BEGIN EC PRIVATE KEY----- ++MIHcAgEBBEIACRQTZMczcGmLjWwXOOFagOxN0hshxs4Dpx0M0/ntpMjwtR7bUXVN ++A4s4wW/wS5DZMw1Dc+ZYN6Bi44zVTLRTgBCgBwYFK4EEACOhgYkDgYYABAArfSAS ++ZUMydrOGPIcF4MSDwPKXHXvJWYpezrN2JFZ26YuuCoweb7TXR6uhmLa6RDKn3WlM ++5PlyJZoSdIo5roL4cgEkklwtTfaO7AFIvRee2OXO0VVN/22SG3Ur3jqFMO/46p2b ++nhawwfdsYf+afoDTPxfiaMegziXSo6zuPaD45eCCSg== ++-----END EC PRIVATE KEY----- +diff --git a/test/data/sign-runtime-policy/ec-p521-public.pem b/test/data/sign-runtime-policy/ec-p521-public.pem +new file mode 100644 +index 0000000..1724ecd +--- /dev/null ++++ b/test/data/sign-runtime-policy/ec-p521-public.pem +@@ -0,0 +1,6 @@ ++-----BEGIN PUBLIC KEY----- ++MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAK30gEmVDMnazhjyHBeDEg8Dylx17 ++yVmKXs6zdiRWdumLrgqMHm+010eroZi2ukQyp91pTOT5ciWaEnSKOa6C+HIBJJJc ++LU32juwBSL0XntjlztFVTf9tkht1K946hTDv+Oqdm54WsMH3bGH/mn6A0z8X4mjH ++oM4l0qOs7j2g+OXggko= ++-----END PUBLIC KEY----- +diff --git a/test/data/sign-runtime-policy/rsa-4096-private.pem b/test/data/sign-runtime-policy/rsa-4096-private.pem +new file mode 100644 +index 0000000..ae90865 +--- /dev/null ++++ b/test/data/sign-runtime-policy/rsa-4096-private.pem +@@ -0,0 +1,52 @@ ++-----BEGIN PRIVATE KEY----- ++MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCr40rhK6H5FVqk ++40OODGqL6L4Sz6Y7gOCR6kMwI9KhNdryPXO5Q5DQtTuturBSGoFpF0QLXoOePAlw ++ctJsxmauThPXI8GaiMJoDfN4YCxwab7h3TI92m5e2bPfrgB56GBtaPcnkLXFmyFW ++mRYL8OTbzNM8Lnrl+oO3TU/cFW7WYaVZVPX4m8o9tAaaBtv8BhpE71WhAU6LABIu ++DG6ASb65HoBUGW9llsjol4l1GTi9SqGnDq1zZQ2884N9am/dvP5rD0IzcGNtkBgw ++gl9czut9Z9494kOI+NE00LnMauiSox/XL+TR7mCEN4riWS2lOJUs3IgLRgg1+vkM ++xQEm2fsZ44DmWlxexOaFRsJjoDw0Pat3RzfGUccA7XL/spOKz9SGh8g1lsnnqMfi ++C92pp34+p42/H5LnjlXSVIAPeVF918vXDw27oLGHRDHgYs8sb5ehU6tcsFLMWEaJ ++5AyCwmUclKtyCzea0pktRuhKP0igOp+Ccyaxz2rYJF8PSJ8zRBt6BP1gonwkmqf5 ++Jq6W1kxMgm0DkcKllFBEvfR2TZFYrhze63yWZtoJUSIsI7mUBR7x3q2/QkEbs//p ++DiIio2v95ENc9sK3C6IFva2xjr+O5ev0ulZ8+ckFSIsRb3hnGelSDtHdFEENhND9 ++oAdcNzgBLx2QmSHBYmJSUvBz2/xRJwIDAQABAoICAFOxj+e0raDRN3oa2G1leUmX ++cVlKd0f42B/kP4PEpY3uYaXaqyrVjcE5xY2xXaIUmtd3Rx/8DL0ywWvZ1MC3GA8e ++IEX8eIvMtFAjHt4JggnYS2PBpvF8FoqjmRQ8LqotLiqH1fQTX5aQkvqN/rCgwmaN ++xHrErP2m7ZSGlfB1FStYj0DAgCXlgWNq8V31Ig8ET60qFfkX4E0JI1D9p55aif2s ++LtMEMXv3H+s0we7c8YYlvTFREb6iwT40Gd2qwj5nBEBPxS2ztw4Ff+m6yUEw1XQR ++66IZNkYO1RrQ2O8CTSz9NYc7IEDhv2gRfVP6FSIiO2yFEs7ecigKhFKl6AopUDw9 ++6v+HlX/pMgHyRLJKSxdHFuExCerrRHSsVHF0oGLTtIve4Pl6qLitXwXrEdPFrJll ++k9nudwRXFa2pDnrtEcnn53/F180/MT3NZsTidZYwsDnltxDixSF9WDg4+42hOaJu ++qSSI7ZQCF3BmEe8Gijths1czTMWJ35ZT1idJYs7+VRmX6QMcGNuakxStyT+X0IPh ++6buBjmBF2ycsr/6/VBUGH5WiDGnlSFoKYT13td/i9K8+CjwoA5tulB4fgr34LReS ++1eIhLCOraV6v/zIFPga34mPLQEYDHnMzfNIXFIw40JwyJnVKMQg66c0x6iUKp/rP ++EiVlFB3ropOsZbHpFgM9AoIBAQDYewaBswmEleP7FEp3qmaEn3Q6Idd7YEg3pxnh ++RLf7+7ML6rUXL5r3y2Tz+cuLxVx0pZw3xVKIZWBHnbec5DMHsyDWaIPH4UE7N3wo ++BkqT6SE++lmv7eFtVsqri2/C84rGLS10deGGW2928T7wRwmvHNvDGFYpQ8r03MAc ++Hg+qTtxIAnCctz79Xq9RKG9wZqGtVNTCebi21j4ddW6qR0LBvmpnsKQikLyIPdEl ++5oAX+NTH9TWwn3+xlXynX4r2uVt9Un0mVByLDOynpwYxBaZQGCrq39i1XVEkbsNc ++ABkKRK6D6i9z9vhrg3kHASSwUT+ub0xPpcEJVhidVHr/lkjtAoIBAQDLREcAj5Gp ++oll38VZ+faWLhr60YXXl1FQcSUQd5j5Qv9Pci31dV1+q7YAa4Wj16+fdg5M0fW1w ++jqqDreWLf7U7pF986m1QAMX0qSNSf88F89l+0QpeEW0R52EHFGrbeheWH3szeWHK ++reIYAPuzgE+lr4/AHPv0y6VwpbksizZS8siT89+o7wmE9O4x4QFFWWfLITJbeVFm ++hULVJWdXyqaAhcBA69cKgLNHDPVNsXN1e0tWGCKbXlBJxVkfWXd0ALLfbR44ZIVF ++Pn++AkHFcqtrPUQSO9Cl9CzAOh6n/gMxW9fYyBUUCd4QuwcCPm0ssmnr2lOFFT3x ++LWEzN7aW3mPjAoIBAQCKRkLIs5hfxPJDXRmXyBMFoWg2/YjK+JwIWEbymdc/L+2U ++rg2PysucfaF1KVg3ZhABPgtDoFlwX/iYpggzQZvKzkMqUMXj9WCaN32pfyNByQaA ++WJ7S/gogsfBwlHcFulQo85B2NBminWLcQYonJejsnm1M942JfBD2SM645wawjQCt ++SkmuAGpOop0vJLnRvOyGb4oa0m8G8l/opyrRvwTIBh2nL7AXmAWkrH+cStgE997J ++/jmWILsKrmZZ7nAv2gOzT+46fj1hIBdFmy0cQPSTcmxvbB3a8skykUFd0X4tDeO+ ++xOYcl2LkIa07qSJLMQqRG+xNb52WnhTI4b5S5SadAoIBAB6mSvaAr3weqNL3Jw2n ++sICbbsIQOBDRtWUWfxjrENZoNm/48IxUkm5ZpYloF8WNLscuAdJWX1wrhvaYEBa6 ++yP8XR/az+ounsjG0F0lnUDYbkN4YczG/+uxKMU05/OrfPBM85/zY/q/rDD6GV223 ++mUrocAqN1EgTNP0JYCShUS2swiXdDgqREKRlskz/Gov0cTwl40v7vRRyX6N4Il8Q ++hY63nEetyB0jTuUVxR9T9I7wammu9fjrfbvB8DBT8zkgqdgawgXK0nWgeO6TtIsr ++QUHIWS1Oq6V6bg0GNn0qRhz5NlDPyq3c1wk6ylHCTAADXwlbQGC+1To6l397lSKm ++ofUCggEAI1gDK/e/P3rwtVB2L63HMcJhHi0MwR2w7Dx3eDXeBHo9DsVhJL9Eq3cH ++agHL66OjuXUa5M0twYDdUJYn9hh7S+U8F3eo4Anev0iPTQiHIeqDM2QH8reAADsJ ++DLsuX1adhgvceuf64TJFNHaSLV7MSDB5ZnHmbhD7dTCV8F6czl0cmtc1w0PU6+3Y ++/US2q1R+KbgpNH5fofhEuuMz2zTcW9Bn0uwNZ0By0gOdJWj7+3yOaVIEOfTOcR+2 ++xRqhzDawX9wSSicWrYyZLSSWbP4/yG0B2dLpxYPYlE2ndIK1u2vAhXiFDX4a/uT/ ++CpW4760EcKIseYHRaET2cunG61NqHg== ++-----END PRIVATE KEY----- +diff --git a/test/data/sign-runtime-policy/runtime-policy-bogus.json b/test/data/sign-runtime-policy/runtime-policy-bogus.json +new file mode 100644 +index 0000000..c2d8c7f +--- /dev/null ++++ b/test/data/sign-runtime-policy/runtime-policy-bogus.json +@@ -0,0 +1,2 @@ ++{ ++]; +diff --git a/test/data/sign-runtime-policy/runtime-policy-empty.json b/test/data/sign-runtime-policy/runtime-policy-empty.json +new file mode 100644 +index 0000000..0e93775 +--- /dev/null ++++ b/test/data/sign-runtime-policy/runtime-policy-empty.json +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-10-09 18:45:19.878611"}, "release": 0, "digests": {}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/data/sign-runtime-policy/runtime-policy.json b/test/data/sign-runtime-policy/runtime-policy.json +new file mode 100644 +index 0000000..4cf7903 +--- /dev/null ++++ b/test/data/sign-runtime-policy/runtime-policy.json +@@ -0,0 +1 @@ ++{"meta": {"version": 1, "generator": 3, "timestamp": "2024-10-09 18:47:35.306609"}, "release": 0, "digests": {"/usr/sbin/foobar-sbin": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/usr/lib64/foobar-temp": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/usr/lib/foobar.so": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/root/foobar-root": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/usr/bin/foobar-bin": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/usr/lib64/foobar64.so": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], "/home/foobar/non-root": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"]}, "excludes": [], "keyrings": {}, "ima": {"ignored_keyrings": [], "log_hash_alg": "sha1", "dm_policy": null}, "ima-buf": {}, "verification-keys": ""} +diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py +new file mode 100644 +index 0000000..eaed0e3 +--- /dev/null ++++ b/test/test_create_mb_policy.py +@@ -0,0 +1,602 @@ ++""" ++SPDX-License-Identifier: Apache-2.0 ++Copyright 2024 Red Hat, Inc. ++""" ++ ++import argparse ++import os ++import unittest ++ ++from keylime.policy import create_mb_policy ++ ++DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data", "create-mb-policy")) ++ ++ ++class CreateMeasuredBootPolicy_Test(unittest.TestCase): ++ def test_event_to_sha256(self): ++ test_cases = [ ++ {"event": {"Digests": [{"AlgorithmId": "sha256", "Digest": "foobar"}]}, "expected": {"sha256": "0xfoobar"}}, ++ { ++ "event": { ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227", ++ } ++ ] ++ }, ++ "expected": {"sha256": "0x5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227"}, ++ }, ++ {"event": "bogus", "expected": {}}, ++ {"event": {}, "expected": {}}, ++ {"event": {"Digests": [{"Digest": "foobar"}]}, "expected": {}}, ++ ] ++ ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.event_to_sha256(c["event"]), c["expected"]) ++ ++ def test_get_s_crtm(self): ++ field = "scrtm" ++ test_cases = [ ++ {"events": [], "expected": {}}, ++ {"events": "foobar", "expected": {}}, ++ {"events": [{}, {"EventType": "not this one"}], "expected": {}}, ++ {"events": [{}, {"EventType": "EV_S_CRTM_VERSION"}], "expected": {field: {}}}, ++ { ++ "events": [ ++ {}, ++ {"EventType": "EV_S_CRTM_VERSION", "Digests": [{"AlgorithmId": "sha1", "Digest": "foobar"}]}, ++ ], ++ "expected": {field: {}}, ++ }, ++ { ++ "events": [ ++ {}, ++ { ++ "EventType": "EV_S_CRTM_VERSION", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227", ++ } ++ ], ++ }, ++ ], ++ "expected": {field: {"sha256": "0x5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227"}}, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.get_s_crtm(c["events"]), c["expected"]) ++ ++ def test_get_platform_firmware(self): ++ field = "platform_firmware" ++ test_cases = [ ++ {"events": [], "expected": {field: []}}, ++ {"events": "foobar", "expected": {field: []}}, ++ {"events": [{}, {"EventType": "not this one"}], "expected": {field: []}}, ++ {"events": [{}, {"EventType": "EV_S_CRTM_VERSION"}], "expected": {field: []}}, ++ {"events": [{}, {"EventType": "EV_EFI_PLATFORM_FIRMWARE_BLOB"}], "expected": {field: [{}]}}, ++ { ++ "events": [ ++ {}, ++ { ++ "EventType": "EV_EFI_PLATFORM_FIRMWARE_BLOB", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227", ++ } ++ ], ++ }, ++ ], ++ "expected": {field: [{"sha256": "0x5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227"}]}, ++ }, ++ {"events": [{}, {"EventType": "EV_EFI_PLATFORM_FIRMWARE_BLOB2"}], "expected": {field: [{}]}}, ++ { ++ "events": [ ++ {}, ++ { ++ "EventType": "EV_EFI_PLATFORM_FIRMWARE_BLOB2", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227", ++ } ++ ], ++ }, ++ ], ++ "expected": {field: [{"sha256": "0x5a16d6659724873d146bdb8cf1c9f9fdb5473040107f16493fd2051ca71a1227"}]}, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.get_platform_firmware(c["events"]), c["expected"]) ++ ++ def test_variabledata_to_signature(self): ++ test_cases = [ ++ {"data": [], "expected": []}, ++ {"data": ["foobar"], "expected": []}, ++ {"data": [{"Keys": []}], "expected": []}, ++ {"data": [{"Keys": [{}]}], "expected": []}, ++ {"data": [{"Keys": "foobar"}], "expected": []}, ++ {"data": [{"Keys": [{"SignatureOwner": "sig-owner"}]}], "expected": []}, ++ {"data": [{"Keys": [{"SignatureData": "sig-data"}]}], "expected": []}, ++ { ++ "data": [{"Keys": [{"SignatureOwner": "sig-owner", "SignatureData": "sig-data"}]}], ++ "expected": [{"SignatureData": "0xsig-data", "SignatureOwner": "sig-owner"}], ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertListEqual(create_mb_policy.variabledata_to_signature(c["data"]), c["expected"]) ++ ++ def test_get_keys(self): ++ test_cases = [ ++ {"events": [], "expected": {"db": [], "dbx": [], "kek": [], "pk": []}}, ++ { ++ "events": [ ++ { ++ "EventNum": 12, ++ "PCRIndex": 7, ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "115aa827dbccfb44d216ad9ecfda56bdea620b860a94bed5b7a27bba1c4d02d8", ++ } ++ ], ++ "Event": {"UnicodeName": "SecureBoot", "VariableData": {"Enabled": "No"}}, ++ } ++ ], ++ "expected": {"db": [], "dbx": [], "kek": [], "pk": []}, ++ }, ++ # Good event! ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "ddd2fe434fee03440d49850277556d148b75d7cafdc4dc59e8a67cccecad1a3e", ++ } ++ ], ++ "Event": { ++ "UnicodeName": "PK", ++ "VariableData": [ ++ { ++ "SignatureType": "sig-type", ++ "Keys": [{"SignatureOwner": "sig-owner", "SignatureData": "sig-data"}], ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": { ++ "pk": [{"SignatureOwner": "sig-owner", "SignatureData": "0xsig-data"}], ++ "kek": [], ++ "db": [], ++ "dbx": [], ++ }, ++ }, ++ # Missing event["EventType"]. ++ { ++ "events": [ ++ { ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "ddd2fe434fee03440d49850277556d148b75d7cafdc4dc59e8a67cccecad1a3e", ++ } ++ ], ++ "Event": { ++ "UnicodeName": "PK", ++ "VariableData": [ ++ { ++ "SignatureType": "sig-type", ++ "Keys": [{"SignatureOwner": "sig-owner", "SignatureData": "sig-data"}], ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": {"db": [], "dbx": [], "kek": [], "pk": []}, ++ }, ++ # Bad event name, expected is EV_EFI_VARIABLE_DRIVER_CONFIG. ++ { ++ "events": [ ++ { ++ "EventType": "WRONG_EVENT", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "ddd2fe434fee03440d49850277556d148b75d7cafdc4dc59e8a67cccecad1a3e", ++ } ++ ], ++ "Event": { ++ "UnicodeName": "PK", ++ "VariableData": [ ++ { ++ "SignatureType": "sig-type", ++ "Keys": [{"SignatureOwner": "sig-owner", "SignatureData": "sig-data"}], ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": {"db": [], "dbx": [], "kek": [], "pk": []}, ++ }, ++ # Missing event["Event"]. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "ddd2fe434fee03440d49850277556d148b75d7cafdc4dc59e8a67cccecad1a3e", ++ } ++ ], ++ } ++ ], ++ "expected": {"db": [], "dbx": [], "kek": [], "pk": []}, ++ }, ++ # Missing event["Event"]["UnicodeName"]. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "ddd2fe434fee03440d49850277556d148b75d7cafdc4dc59e8a67cccecad1a3e", ++ } ++ ], ++ "Event": { ++ "VariableData": [ ++ { ++ "SignatureType": "sig-type", ++ "Keys": [{"SignatureOwner": "sig-owner", "SignatureData": "sig-data"}], ++ } ++ ] ++ }, ++ } ++ ], ++ "expected": {"db": [], "dbx": [], "kek": [], "pk": []}, ++ }, ++ ] ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.get_keys(c["events"]), c["expected"]) ++ ++ def test_secureboot_enabled(self): ++ test_cases = [ ++ {"events": [], "expected": False}, ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Event": {"UnicodeName": "SecureBoot", "VariableData": {"Enabled": "Yes"}}, ++ } ++ ], ++ "expected": True, ++ }, ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Event": {"UnicodeName": "SecureBoot", "VariableData": {"Enabled": "No"}}, ++ } ++ ], ++ "expected": False, ++ }, ++ # No variable data. ++ { ++ "events": [{"EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", "Event": {"UnicodeName": "SecureBoot"}}], ++ "expected": False, ++ }, ++ # Bad event. ++ { ++ "events": [ ++ { ++ "EventType": "WRONG_EVENT", ++ "Event": {"UnicodeName": "SecureBoot", "VariableData": {"Enabled": "Yes"}}, ++ } ++ ], ++ "expected": False, ++ }, ++ # No EventType. ++ { ++ "events": [{"Event": {"UnicodeName": "SecureBoot", "VariableData": {"Enabled": "No"}}}], ++ "expected": False, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertEqual(create_mb_policy.secureboot_enabled(c["events"]), c["expected"]) ++ ++ def test_get_mok(self): ++ test_cases = [ ++ {"events": [], "expected": {"mokdig": [], "mokxdig": []}}, ++ { ++ "events": [ ++ { ++ "EventType": "EV_IPL", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest"}], ++ "Event": {"String": "MokListX"}, ++ } ++ ], ++ "expected": {"mokdig": [], "mokxdig": [{"sha256": "0xdigest"}]}, ++ }, ++ { ++ "events": [ ++ { ++ "EventType": "EV_IPL", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest"}], ++ "Event": {"String": "MokList"}, ++ } ++ ], ++ "expected": {"mokdig": [{"sha256": "0xdigest"}], "mokxdig": []}, ++ }, ++ # No EventType. ++ { ++ "events": [ ++ {"Digests": [{"AlgorithmId": "sha256", "Digest": "digest"}], "Event": {"String": "MokList"}} ++ ], ++ "expected": {"mokdig": [], "mokxdig": []}, ++ }, ++ # No event. ++ { ++ "events": [{"EventType": "EV_IPL", "Digests": [{"AlgorithmId": "sha256", "Digest": "digest"}]}], ++ "expected": {"mokdig": [], "mokxdig": []}, ++ }, ++ # No event["Event"]["String"]. ++ { ++ "events": [ ++ {"EventType": "EV_IPL", "Digests": [{"AlgorithmId": "sha256", "Digest": "digest"}], "Event": {}} ++ ], ++ "expected": {"mokdig": [], "mokxdig": []}, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.get_mok(c["events"]), c["expected"]) ++ ++ def test_get_kernel(self): ++ test_cases = [ ++ {"events": [], "secureboot": False, "expected": {}}, ++ # No secure boot. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-2"}], ++ "Event": { ++ "DevicePath": "PciRoot(0x0)/Pci(0x6,0x0)/Pci(0x0,0x0)/NVMe(0x1,00-25-38-B2-21-D1-37-50)/HD(1,GPT,b8f6bee9-bc10-4c72-b34a-6db8fd8f772c,0x800,0x80000)/\\EFI\fedora\\shimx64.efi" ++ }, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-3"}], ++ "Event": {"DevicePath": "\\EFI\fedora\\grubx64.efi"}, ++ }, ++ ], ++ "secureboot": False, ++ "expected": {"kernels": [{"shim_authcode_sha256": "0xdigest-2", "grub_authcode_sha256": "0xdigest-3"}]}, ++ }, ++ # Similar to the previous one, but now it also has an ++ # application mathcing a a path that should be ignored ++ # as we have no reference value for it. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest1"}], ++ "Event": { ++ "DevicePath": "FvVol(8fc151ae-c96f-4bc9-8c33-107992c7735b)/FvFile(821aca26-29ea-4993-839f-597fc021708d)" ++ }, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-2"}], ++ "Event": { ++ "DevicePath": "PciRoot(0x0)/Pci(0x6,0x0)/Pci(0x0,0x0)/NVMe(0x1,00-25-38-B2-21-D1-37-50)/HD(1,GPT,b8f6bee9-bc10-4c72-b34a-6db8fd8f772c,0x800,0x80000)/\\EFI\fedora\\shimx64.efi" ++ }, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-3"}], ++ "Event": {"DevicePath": "\\EFI\fedora\\grubx64.efi"}, ++ }, ++ { ++ "PCRIndex": 9, ++ "EventType": "EV_IPL", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-4"}], ++ "Event": {"String": "(hd0,gpt2)/vmlinuz-5.14.0-347.el9.x86_64"}, ++ }, ++ { ++ "PCRIndex": 8, ++ "EventType": "EV_IPL", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "digest-5", ++ } ++ ], ++ "Event": { ++ "String": "kernel_cmdline: (hd0,gpt2)/vmlinuz-5.14.0-347.el9.x86_64 root=UUID=f0e4ae11-6b98-42f9-ab3b-3b962e8b4253 ro resume=UUID=ba40b3f3-e38d-42f7-8f81-4394e84f41a6 console=ttyS0,115200 ima_appraise=fix ima_canonical_fmt ima_policy=tcb ima_template=ima-ng" ++ }, ++ }, ++ ], ++ "secureboot": False, ++ "expected": { ++ "kernels": [ ++ { ++ "shim_authcode_sha256": "0xdigest-2", ++ "grub_authcode_sha256": "0xdigest-3", ++ "vmlinuz_plain_sha256": "0xdigest-4", ++ "kernel_cmdline": "\\(hd0,gpt2\\)/vmlinuz\\-5\\.14\\.0\\-347\\.el9\\.x86_64\\ root=UUID=f0e4ae11\\-6b98\\-42f9\\-ab3b\\-3b962e8b4253\\ ro\\ resume=UUID=ba40b3f3\\-e38d\\-42f7\\-8f81\\-4394e84f41a6\\ console=ttyS0,115200\\ ima_appraise=fix\\ ima_canonical_fmt\\ ima_policy=tcb\\ ima_template=ima\\-ng", ++ } ++ ] ++ }, ++ }, ++ # Only one UEFI application; 2 are expected. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-2"}], ++ "Event": { ++ "DevicePath": "PciRoot(0x0)/Pci(0x6,0x0)/Pci(0x0,0x0)/NVMe(0x1,00-25-38-B2-21-D1-37-50)/HD(1,GPT,b8f6bee9-bc10-4c72-b34a-6db8fd8f772c,0x800,0x80000)/\\EFI\fedora\\shimx64.efi" ++ }, ++ } ++ ], ++ "secureboot": False, ++ "expected": {}, ++ }, ++ # Now with Secure Boot. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-1"}], ++ "Event": { ++ "DevicePath": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)/HD(1,GPT,a88ed452-9a52-45c4-91ce-3da7707caaab,0x800,0x12c000)/\\EFI\redhat\\shimx64.efi" ++ }, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-2"}], ++ "Event": {"DevicePath": "\\EFI\redhat\\grubx64.efi"}, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-3"}], ++ "Event": {"DevicePath": ""}, ++ }, ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "digest-4", ++ } ++ ], ++ "Event": { ++ "UnicodeName": "SecureBoot", ++ "VariableData": {"Enabled": "Yes"}, ++ }, ++ }, ++ { ++ "PCRIndex": 9, ++ "EventType": "EV_IPL", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-5"}], ++ "Event": {"String": "(hd0,gpt2)/initramfs-5.14.0-347.el9.x86_64.img"}, ++ }, ++ ], ++ "secureboot": True, ++ "expected": { ++ "kernels": [ ++ { ++ "kernel_authcode_sha256": "0xdigest-3", ++ "shim_authcode_sha256": "0xdigest-1", ++ "grub_authcode_sha256": "0xdigest-2", ++ "initrd_plain_sha256": "0xdigest-5", ++ } ++ ] ++ }, ++ }, ++ # Secure Boot with only 2 applications (shim, kernel), without ++ # grub. 3 are expected. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-1"}], ++ "Event": { ++ "DevicePath": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)/HD(1,GPT,a88ed452-9a52-45c4-91ce-3da7707caaab,0x800,0x12c000)/\\EFI\redhat\\shimx64.efi" ++ }, ++ }, ++ { ++ "EventType": "EV_EFI_BOOT_SERVICES_APPLICATION", ++ "Digests": [{"AlgorithmId": "sha256", "Digest": "digest-3"}], ++ "Event": {"DevicePath": ""}, ++ }, ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Digests": [ ++ { ++ "AlgorithmId": "sha256", ++ "Digest": "digest-4", ++ } ++ ], ++ "Event": { ++ "UnicodeName": "SecureBoot", ++ "VariableData": {"Enabled": "Yes"}, ++ }, ++ }, ++ ], ++ "secureboot": True, ++ "expected": {}, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertEqual(create_mb_policy.secureboot_enabled(c["events"]), c["secureboot"]) ++ self.assertDictEqual(create_mb_policy.get_kernel(c["events"], c["secureboot"]), c["expected"]) ++ ++ def test_create_mb_refstate(self): ++ # Create an argument parser. ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = create_mb_policy.get_arg_parser(subparser, parent_parser) ++ ++ event_log_secureboot_enabled = os.path.join(DATA_DIR, "binary_bios_measurements-secureboot") ++ event_log_secureboot_disabled = os.path.join(DATA_DIR, "binary_bios_measurements") ++ event_log_bogus = os.path.join(DATA_DIR, "binary_bios_measurements-bogus") ++ event_log_empty = os.path.join(DATA_DIR, "binary_bios_measurements-empty") ++ ++ test_cases = [ ++ {"valid": False, "missing_params": True}, ++ {"valid": True, "missing_params": False, "-e": event_log_secureboot_enabled}, ++ {"valid": False, "missing_params": False, "-e": event_log_bogus}, ++ {"valid": False, "missing_params": False, "-e": event_log_empty}, ++ # The next one has secure boot disabled but we will not ++ # indicate it (hence, it will not provide the -i flag), ++ # so it should fail. ++ {"valid": False, "missing_params": False, "-e": event_log_secureboot_disabled}, ++ # Now let's indicate secure boot is disabled. ++ {"valid": True, "missing_params": False, "-e": event_log_secureboot_disabled, "secureboot_disabled": True}, ++ # And now we have a log with secure boot enabled, but let's ++ # indicate it has it disabled, and it would be valid, but we ++ # would get a warning. ++ {"valid": True, "missing_params": False, "-e": event_log_secureboot_enabled, "secureboot_disabled": True}, ++ ] ++ ++ for case in test_cases: ++ expected = case["valid"] ++ del case["valid"] ++ missing_params = case["missing_params"] ++ del case["missing_params"] ++ secureboot_disabled = case.get("secureboot_disabled") ++ if secureboot_disabled: ++ del case["secureboot_disabled"] ++ ++ # pylint: disable=consider-using-dict-items ++ cli_args = " ".join(f"{arg} {case[arg]}" for arg in case).split() ++ ++ if secureboot_disabled: ++ cli_args.append("-i") ++ ++ args = None ++ if missing_params: ++ # When required params are missing, it exits with with SystemExit. ++ with self.assertRaises(SystemExit): ++ args = parser.parse_args(cli_args) ++ else: ++ args = parser.parse_args(cli_args) ++ self.assertTrue(args is not None) ++ ++ mb_policy = create_mb_policy.create_mb_refstate(args) ++ self.assertEqual(mb_policy is not None, expected, msg=f"args = {args}") +diff --git a/test/test_create_runtime_policy.py b/test/test_create_runtime_policy.py +new file mode 100644 +index 0000000..eb9c19a +--- /dev/null ++++ b/test/test_create_runtime_policy.py +@@ -0,0 +1,985 @@ ++import argparse ++import copy ++import os ++import pathlib ++import shutil ++import subprocess ++import sys ++import tempfile ++import unittest ++from importlib import util ++from test.utils import assertDigestsEqual, keylimePolicyAssertLogs ++ ++from keylime.common import algorithms ++from keylime.ima import ima ++from keylime.policy import create_runtime_policy, initrd ++from keylime.policy.logger import Logger ++ ++_HAS_LIBARCHIVE = util.find_spec("libarchive") is not None ++ ++HELPER_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data", "create-runtime-policy")) ++ ++# The test initrds have the following content. ++INITRD_LEGACY_ALLOWLIST = """18eb0ba043d6fc5b06b6f785b4a411fa0d6d695c4a08d2497e8b07c4043048f7 /usr/bin/foo ++e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 /usr/lib/foobar.so ++e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 /usr/lib64/foobar64.so ++dd2ccf6ebfabbca501864a3ec5aebecfadd69d717ea9d9ddd509b49471d039db /usr/sbin/bar ++e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 /dev/foo bar ++""" ++ ++INITRD_DIGESTS_SHA256 = { ++ "/dev/foo_bar": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], ++ "/usr/bin/foo": ["18eb0ba043d6fc5b06b6f785b4a411fa0d6d695c4a08d2497e8b07c4043048f7"], ++ "/usr/lib/foobar.so": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], ++ "/usr/lib64/foobar64.so": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"], ++ "/usr/sbin/bar": ["dd2ccf6ebfabbca501864a3ec5aebecfadd69d717ea9d9ddd509b49471d039db"], ++} ++ ++INITRD_DIGESTS_SHA1 = { ++ "/dev/foo_bar": ["da39a3ee5e6b4b0d3255bfef95601890afd80709"], ++ "/usr/bin/foo": ["a26ce416a048883cd6ca8e890f6b0a62a8031e8a"], ++ "/usr/lib/foobar.so": ["da39a3ee5e6b4b0d3255bfef95601890afd80709"], ++ "/usr/lib64/foobar64.so": ["da39a3ee5e6b4b0d3255bfef95601890afd80709"], ++ "/usr/sbin/bar": ["ec6705ccdaafdf57261e35e40379cc339d36a204"], ++} ++ ++ ++EXCLUDE_LIST = """ ++boot_aggregate ++/usr/sbin/bar ++/dev/foo bar ++""" ++ ++ ++class CreateRuntimePolicy_Test(unittest.TestCase): ++ dirpath = "" ++ logger = Logger() ++ ++ @classmethod ++ def setUpClass(cls): ++ cls.dirpath = tempfile.mkdtemp(prefix="keylime-create-runtime-policy-test") ++ ++ setup_script = os.path.abspath(os.path.join(HELPER_DIR, "setup-initrd-tests")) ++ ++ result = subprocess.run( ++ [setup_script, cls.dirpath], ++ stdout=subprocess.PIPE, ++ stderr=subprocess.PIPE, ++ check=False, ++ ) ++ print("STDOUT:", result.stdout.decode("UTF-8"), file=sys.stderr) ++ print("STDERR:", result.stderr.decode("UTF-8"), file=sys.stderr) ++ CreateRuntimePolicy_Test().assertEqual(result.returncode, 0) ++ ++ @classmethod ++ def tearDownClass(cls): ++ if cls.dirpath is not None: ++ shutil.rmtree(cls.dirpath) ++ ++ def test_InitrdReader(self): ++ initrd_dir = os.path.join(self.dirpath, "initrd") ++ for initrd_file in create_runtime_policy.list_initrds(basedir=initrd_dir): ++ ii = initrd.InitrdReader(initrd_file) ++ digests = create_runtime_policy.path_digests(ii.contents(), remove_prefix=True) ++ ++ # Now let's validate the digests. ++ assertDigestsEqual(digests, INITRD_DIGESTS_SHA256) ++ ++ @unittest.skipUnless(_HAS_LIBARCHIVE, "libarchive not available") ++ def test_InitrdReader_extract_at_offset_methods(self): ++ initrd_dir = os.path.join(self.dirpath, "initrd") ++ ++ libarchive_digests = None ++ fallback_digests = None ++ cwd = os.getcwd() ++ ++ for initrd_file in create_runtime_policy.list_initrds(basedir=initrd_dir): ++ with open(initrd_file, "rb") as infile: ++ offset = initrd.InitrdReader.skip_cpio(infile) ++ ++ with tempfile.TemporaryDirectory() as libarchive_dir: ++ os.chdir(libarchive_dir) ++ try: ++ initrd.InitrdReader.extract_at_offset_libarchive(infile, offset) ++ digests = create_runtime_policy.path_digests(libarchive_dir, remove_prefix=True) ++ if libarchive_digests is None: ++ libarchive_digests = digests ++ assertDigestsEqual(digests, libarchive_digests) ++ except Exception as e: ++ self.fail(f"No exception expected while testing libarchive extraction: {e}") ++ finally: ++ os.chdir(cwd) ++ ++ with tempfile.TemporaryDirectory() as fallback_dir: ++ os.chdir(fallback_dir) ++ try: ++ initrd.InitrdReader.extract_at_offset_fallback(infile, offset) ++ digests = create_runtime_policy.path_digests(fallback_dir, remove_prefix=True) ++ if fallback_digests is None: ++ fallback_digests = digests ++ assertDigestsEqual(digests, fallback_digests) ++ except Exception as e: ++ self.fail(f"No exception expected while testing fallback extraction: {e}") ++ finally: ++ os.chdir(cwd) ++ ++ # Finally, let's make sure the result of libarchive and the fallback ++ # method are the same. ++ assertDigestsEqual(libarchive_digests, fallback_digests) ++ ++ # Now let's check a "bad" file. ++ bad_file = os.path.abspath(os.path.join(HELPER_DIR, "setup-initrd-tests")) ++ with open(bad_file, "rb") as infile: ++ self.assertRaises( ++ Exception, ++ initrd.InitrdReader.extract_at_offset_libarchive, ++ infile, ++ 0, ++ ) ++ self.assertRaises( ++ Exception, ++ initrd.InitrdReader.extract_at_offset_fallback, ++ infile, ++ 0, ++ ) ++ ++ def test_boot_aggregate(self): ++ test_cases = [ ++ {"input": "", "boot_aggregate": "", "alg": "invalid"}, ++ { ++ "input": "10 0000000000000000000000000000000000000000 ima 0000000000000000000000000000000000000000 boot_aggregate", ++ "boot_aggregate": "0000000000000000000000000000000000000000", ++ "alg": "sha1", ++ }, ++ { ++ "input": "10 0000000000000000000000000000000000000000 ima a00000000000000000000000000000000000000b boot_aggregate", ++ "boot_aggregate": "a00000000000000000000000000000000000000b", ++ "alg": "sha1", ++ }, ++ { ++ "input": "10 0000000000000000000000000000000000000000 ima a00000000000000000000000000000000000000bcc boot_aggregate", ++ "boot_aggregate": "", ++ "alg": "invalid", ++ }, ++ { ++ "input": "FOO BAR", ++ "boot_aggregate": "", ++ "alg": "invalid", ++ }, ++ { ++ "input": "10 8d814e778e1fca7c551276523ac44455da1dc420 ima-ng sha256:0bc72531a41dbecb38557df75af4bc194e441e71dc677c659a1b179ac9b3e6ba boot_aggregate", ++ "boot_aggregate": "0bc72531a41dbecb38557df75af4bc194e441e71dc677c659a1b179ac9b3e6ba", ++ "alg": "sha256", ++ }, ++ { ++ "input": "10 8d814e778e1fca7c551276523ac44455da1dc420 ima-ng sha1:0bc72531a41dbecb38557df75af4bc194e441e71dc677c659a1b179ac9b3e6ba boot_aggregate", ++ "boot_aggregate": "", ++ "alg": "invalid", ++ }, ++ { ++ "input": "10 8d814e778e1fca7c551276523ac44455da1dc420 ima-ng unknown:0bc72531a41dbecb38557df75af4bc194e441e71dc677c659a1b179ac9b3e6ba boot_aggregate", ++ "boot_aggregate": "", ++ "alg": "invalid", ++ }, ++ ] ++ ++ with tempfile.TemporaryDirectory() as tmpdir: ++ agg_file = os.path.join(tmpdir, "measurements") ++ for c in test_cases: ++ alg, aggregate = create_runtime_policy.boot_aggregate_parse(c["input"]) ++ self.assertEqual(alg, c["alg"], msg=f"alg={alg}, digest={aggregate}") ++ self.assertEqual(aggregate, c["boot_aggregate"]) ++ ++ # Now parsing it from a file. ++ with open(agg_file, "w", encoding="UTF-8") as mfile: ++ mfile.write(c["input"]) ++ ++ alg, aggregate = create_runtime_policy.boot_aggregate_from_file(agg_file) ++ self.assertEqual(alg, c["alg"], msg=f"{c['input']}") ++ self.assertEqual(aggregate, c["boot_aggregate"]) ++ ++ # Now let's parse some bogus entries. ++ # These should throw an exception. ++ bad_entries = [ ++ "pcr pcr-value img-ng sha999:fff boot_aggregate", ++ "pcr pcr-value img-ng sha1:fff boot_aggregate", ++ "pcr pcr-value img-ng sha256:fff boot_aggregate", ++ "pcr pcr-value ima fff boot_aggregate", ++ ] ++ for line in bad_entries: ++ alg, aggregate = create_runtime_policy.boot_aggregate_parse(line) ++ self.assertEqual(alg, "invalid", msg=f"line = {line}") ++ self.assertEqual(aggregate, "", msg=f"line = {line}") ++ ++ def test_file_digest(self): ++ initrd_file = os.path.join(self.dirpath, "initrd", "initramfs-keylime-fedora-cat.img") ++ r = initrd.InitrdReader(initrd_file) ++ ++ file_path = os.path.join(r.contents(), "usr/bin/foo") ++ test_cases = [ ++ { ++ "file": file_path, ++ "alg": "sha1", ++ "digest": "a26ce416a048883cd6ca8e890f6b0a62a8031e8a", ++ }, ++ { ++ "file": file_path, ++ "alg": "sha384", ++ "digest": "d2fcda9b029aa42f511b2d954e4bebaff2f4f6431374c111ec8efa59204c74164491e14e43e144a3b18e98bf6043cf75", ++ }, ++ { ++ "file": file_path, ++ "alg": "sha512", ++ "digest": "2f979b08be70d85814d56ff5e21628ab79de93e1e88facdb975c71237ea46c47afc61d39d2eb089a4f7e5faafc05d5c11ee38db9c65167ac22b8cc4ad89f080c", ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertTrue(algorithms.Hash.is_recognized(c["alg"])) ++ self.assertEqual( ++ algorithms.Hash(c["alg"]).file_digest(c["file"]), ++ c["digest"], ++ ) ++ ++ def test_get_initrds_digests(self): ++ initrd_dir = os.path.join(self.dirpath, "initrd") ++ test_cases = [ ++ { ++ "algo": "sha1", ++ "expected": INITRD_DIGESTS_SHA1, ++ }, ++ { ++ "algo": "sha256", ++ "expected": INITRD_DIGESTS_SHA256, ++ }, ++ ] ++ ++ for c in test_cases: ++ digests = create_runtime_policy.get_initrds_digests(initrd_dir, {}, c["algo"]) ++ assertDigestsEqual(digests, c["expected"]) ++ ++ def test_process_flat_allowlist(self): ++ with tempfile.TemporaryDirectory() as tmpdir: ++ allowlist = os.path.join(tmpdir, "allowlist") ++ with open(allowlist, "w", encoding="UTF-8") as mfile: ++ mfile.write(INITRD_LEGACY_ALLOWLIST) ++ ++ digests, ok = create_runtime_policy.process_flat_allowlist(allowlist, {}) ++ self.assertTrue(ok) ++ assertDigestsEqual(digests, INITRD_DIGESTS_SHA256) ++ ++ malformed_allowlist = """checksum file oops ++# ++checksum-2 ++checksum-3 foo bar file 01 ++checksum-4 \ ++ bar foo file 02 ++ ++ ++ ++ ++""" ++ with open(allowlist, "w", encoding="UTF-8") as mfile: ++ mfile.write(malformed_allowlist) ++ digests, ok = create_runtime_policy.process_flat_allowlist(allowlist, {}) ++ self.assertTrue(ok) ++ # 3 valid entries there, with some lines skipped: ++ # file oops -> with checksum: checksum ++ # foo bar file 01 -> with checksum: checksum-3 ++ # bar foo file 02 -> with checksum: checksum-4 ++ self.assertEqual(len(digests), 3) ++ ++ # Now let's test some invalid file. ++ digests, ok = create_runtime_policy.process_flat_allowlist("/some/invalid/non/existing/file/here", {}) ++ self.assertFalse(ok) ++ self.assertEqual(len(digests), 0) ++ ++ def test_path_digest_owned_by_root(self): ++ homedir = os.path.join(self.dirpath, "dummy-rootfs", "home") ++ fpath = os.path.join("/", "foobar", "non-root") # homedir becomes the rootfs "/" ++ ++ test_cases = [ ++ { ++ "path": [fpath], ++ "checksum": {fpath: ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"]}, ++ "algo": "sha256", ++ "owned_by_root": False, ++ }, ++ {"path": [], "checksum": {}, "algo": "sha256", "owned_by_root": True}, ++ ] ++ ++ for c in test_cases: ++ digests = create_runtime_policy.path_digests(homedir, alg=c["algo"], only_owned_by_root=c["owned_by_root"]) ++ self.assertEqual(len(digests), len(c["path"])) ++ for ff in digests: ++ self.assertTrue(ff in c["path"]) ++ assertDigestsEqual(digests, c["checksum"]) ++ ++ def test_rootfs_absolute_path(self): ++ homedir = os.path.join(self.dirpath, "dummy-rootfs", "home") ++ digests = create_runtime_policy.path_digests(homedir) ++ for ff in digests: ++ self.assertFalse(pathlib.PurePath(ff).is_relative_to(homedir)) ++ ++ def test_path_digest_dirs_to_exclude(self): ++ rootfsdir = os.path.join(self.dirpath, "dummy-rootfs") ++ homedir = os.path.join(rootfsdir, "home") ++ ++ digests = create_runtime_policy.path_digests(homedir) ++ self.assertEqual(len(digests), 1, msg=f"digests = {digests}") ++ ++ digests = create_runtime_policy.path_digests(homedir, dirs_to_exclude=None) ++ self.assertEqual(len(digests), 1, msg=f"digests={digests}, dirs_to_exclude=None") ++ ++ digests = create_runtime_policy.path_digests(homedir, dirs_to_exclude=[]) ++ self.assertEqual(len(digests), 1, msg=f"digests = {digests}, dirs_to_exclude=[]") ++ ++ digests = create_runtime_policy.path_digests(homedir, dirs_to_exclude=["/foobar"]) ++ self.assertEqual(len(digests), 0, msg=f"digests = {digests}, dirs_to_exclude=['/foobar']") ++ ++ digests = create_runtime_policy.path_digests(homedir, dirs_to_exclude=["/non-existing"]) ++ self.assertEqual(len(digests), 1, msg=f"digests = {digests}, dirs_to_exclude=['/non-existing']") ++ ++ def test_process_exclude_list(self): ++ test_cases = [ ++ { ++ "line": "boot_aggregate", ++ "valid": True, ++ }, ++ { ++ "line": "boot.aggreg*$", ++ "valid": True, ++ }, ++ { ++ "line": "*", ++ "valid": False, ++ }, ++ { ++ "line": "foobar.so(.*)?", ++ "valid": True, ++ }, ++ { ++ "line": "", ++ "valid": True, ++ }, ++ ] ++ ++ for c in test_cases: ++ _, ok = create_runtime_policy.process_exclude_list_line(c["line"]) ++ self.assertEqual(ok, c["valid"]) ++ ++ test_cases = [ ++ { ++ "lines": """boot_aggregate ++boot.aggreg*$ ++* ++foobar.so(.*)? ++ ++""", ++ "expected": [], ++ "valid": False, ++ }, ++ { ++ "lines": """boot_aggregate ++boot.aggreg*$ ++foobar.so(.*)? ++""", ++ "expected": ["boot_aggregate", "boot.aggreg*$", "foobar.so(.*)?"], ++ "valid": True, ++ }, ++ { ++ "lines": """ ++ ++ ++""", ++ "expected": [], ++ "valid": True, ++ }, ++ ] ++ with tempfile.TemporaryDirectory() as tmpdir: ++ excludelist = os.path.join(tmpdir, "excludelist") ++ for c in test_cases: ++ with open(excludelist, "w", encoding="UTF-8") as mfile: ++ mfile.write(c["lines"]) ++ ++ exclude, ok = create_runtime_policy.process_exclude_list_file(excludelist, []) ++ self.assertEqual(ok, c["valid"], msg=f"lines = {c['lines']}") ++ self.assertEqual(sorted(c["expected"]), sorted(exclude), msg=f"lines = {c['lines']}") ++ ++ # Now let's test some invalid file. ++ exclude, ok = create_runtime_policy.process_exclude_list_file("/some/invalid/non/existing/file/here", []) ++ self.assertFalse(ok) ++ self.assertEqual(len(exclude), 0) ++ ++ def test_merge_lists(self): ++ test_cases = [ ++ { ++ "a": [], ++ "b": [], ++ "expected": [], ++ }, ++ { ++ "a": ["a"], ++ "b": [], ++ "expected": ["a"], ++ }, ++ { ++ "a": [], ++ "b": ["b"], ++ "expected": ["b"], ++ }, ++ { ++ "a": ["a"], ++ "b": ["a"], ++ "expected": ["a"], ++ }, ++ { ++ "a": ["a", "b"], ++ "b": ["b"], ++ "expected": ["a", "b"], ++ }, ++ { ++ "a": ["a", "b", "c"], ++ "b": ["b", "e"], ++ "expected": ["a", "b", "c", "e"], ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertEqual(create_runtime_policy.merge_lists(c["a"], c["b"]), c["expected"]) ++ ++ def test_merge_maplists(self): ++ test_cases = [ ++ { ++ "a": {}, ++ "b": {}, ++ "expected": {}, ++ }, ++ {"a": {}, "b": {"file": ["checksum"]}, "expected": {"file": ["checksum"]}}, ++ {"a": {"file": ["checksum"]}, "b": {}, "expected": {"file": ["checksum"]}}, ++ { ++ "a": {"file": ["checksum"]}, ++ "b": {"file": ["checksum"]}, ++ "expected": {"file": ["checksum"]}, ++ }, ++ { ++ "a": {"file": ["checksum-1"]}, ++ "b": {"file": ["checksum-2"]}, ++ "expected": {"file": ["checksum-1", "checksum-2"]}, ++ }, ++ { ++ "a": {"file": ["checksum-1", "checksum-2", "checksum-3"]}, ++ "b": {"file": ["checksum-2"], "file-2": ["checksum-4"]}, ++ "expected": { ++ "file": ["checksum-1", "checksum-2", "checksum-3"], ++ "file-2": ["checksum-4"], ++ }, ++ }, ++ ] ++ for c in test_cases: ++ self.assertEqual(create_runtime_policy.merge_maplists(c["a"], c["b"]), c["expected"]) ++ ++ def test_get_hashes_from_measurement_list(self): ++ test_cases = [ ++ { ++ "ima-list": """ ++ ++""", ++ "expected": {}, ++ "valid": True, ++ }, ++ { ++ "ima-list": """10 0adefe762c149c7cec19da62f0da1297fcfbffff ima-ng sha256:0000000000000000000000000000000000000000000000000000000000000000 boot_aggregate ++10 cff3da2ff339a1f07bb0dbcbc0381e794ed09555 ima-ng sha256:3e5e8ad9d8b4dd191413aba6166c7a975c3eab903d1fad77ecfa2d5810d6585c /usr/bin/kmod ++10 13d5b414e08a45698ce9e3c66545b25ba694046c ima-ng sha256:f51de8688a2903b94016c06f186cf1f053ececd2a88a5f349f29b35a06e94c43 /usr/lib64/ld-linux-x86-64.so.2 ++""", ++ "expected": { ++ "boot_aggregate": ["0000000000000000000000000000000000000000000000000000000000000000"], ++ "/usr/bin/kmod": ["3e5e8ad9d8b4dd191413aba6166c7a975c3eab903d1fad77ecfa2d5810d6585c"], ++ "/usr/lib64/ld-linux-x86-64.so.2": [ ++ "f51de8688a2903b94016c06f186cf1f053ececd2a88a5f349f29b35a06e94c43" ++ ], ++ }, ++ "valid": True, ++ }, ++ { ++ "ima-list": "", ++ "expected": {}, ++ "valid": True, ++ }, ++ { ++ "ima-list": "10 cff3da2ff339a1f07bb0dbcbc0381e794ed09555 ima-ng sha256:3e5e8ad9d8b4dd191413aba6166c7a975c3eab903d1fad77ecfa2d5810d6585c", ++ "expected": {}, ++ "valid": True, ++ }, ++ { ++ "ima-list": "10 6f3474e730fb7da4bb26cad2d8f5d9d5482735f6 ima-buf sha256:571016c9f57363c80e08dd4346391c4e70227e41b0247b8a3aa2240a178d3d14 dm_table_load 646d5f76657273696f6e3d342e34362e303b6e616d653d7268656c2d726f6f742c757569643d4c564d2d79543538654c3268616470746a55396c565131573078315035544679454e35627450416b375963386779586633446667647a6a7554466b4a39503661746868582c6d616a6f723d3235332c6d696e6f723d302c6d696e6f725f636f756e743d312c6e756d5f746172676574733d313b7461726765745f696e6465783d302c7461726765745f626567696e3d302c7461726765745f6c656e3d3133333133363338342c7461726765745f6e616d653d6c696e6561722c7461726765745f76657273696f6e3d312e342e302c6465766963655f6e616d653d3235323a332c73746172743d37333234363732303b", ++ "expected": {}, ++ "valid": True, ++ }, ++ ] ++ ++ with tempfile.TemporaryDirectory() as tmpdir: ++ ima_list = os.path.join(tmpdir, "ascii_runtime_measurements") ++ for c in test_cases: ++ with open(ima_list, "w", encoding="UTF-8") as mfile: ++ mfile.write(c["ima-list"]) ++ ++ hashes, ok = create_runtime_policy.get_hashes_from_measurement_list(ima_list, {}) ++ self.assertEqual(ok, c["valid"], msg=f"ima-list: ({c['ima-list']})") ++ print("HASHES", hashes) ++ self.assertEqual(hashes, c["expected"], msg=f"ima-list: ({c['ima-list']})") ++ ++ # Try non-existing file. ++ hashes, ok = create_runtime_policy.get_hashes_from_measurement_list( ++ "/some/invalid/non/existing/ima/list/here", {} ++ ) ++ self.assertFalse(ok) ++ self.assertEqual(len(hashes), 0) ++ ++ def test_update_base_policy(self): ++ # TODO: add now some actual good cases, to test the more ++ # important flow. ++ # XXX: Need to clarify whether "verification-keys" is correct ++ # being a single string instead of an array of strings. ++ test_cases = [ ++ # Base policy is an invalid JSON ++ { ++ "base-policy": "not-valid-json", ++ "expected": None, ++ }, ++ # Base policy is a valid JSON with a field matching the current ++ # format, but with an invalid content according to current schema ++ { ++ "base-policy": '{"valid": "json", "verification-keys": "invalid"}', ++ "expected": None, ++ }, ++ # Base policy is a valid JSON without any matching field against the ++ # current schema ++ { ++ "base-policy": '{"valid": "json", "invalid": "policy"}', ++ "expected": ima.empty_policy(), ++ }, ++ ] ++ ++ with tempfile.TemporaryDirectory() as tmpdir: ++ base_policy = os.path.join(tmpdir, "base-policy") ++ for c in test_cases: ++ with open(base_policy, "w", encoding="UTF-8") as mfile: ++ mfile.write(c["base-policy"]) ++ ++ policy = create_runtime_policy.update_base_policy(base_policy) ++ self.assertEqual(policy, c["expected"]) ++ ++ # Try non-existing file. ++ policy = create_runtime_policy.update_base_policy("/some/invalid/non/existing/policy/here") ++ self.assertEqual(policy, None) ++ ++ def test_get_digest_algorithm_from_hex(self): ++ """Test that the algorithm guessing works as expected""" ++ ++ test_cases = [ ++ { ++ "digest": "0001020304050607080900010203040506070809", ++ "expected_algorithm": "sha1", ++ }, ++ { ++ "digest": "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", ++ "expected_algorithm": "sha256", ++ }, ++ { ++ "digest": "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f", ++ "expected_algorithm": "sha384", ++ }, ++ { ++ "digest": "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f", ++ "expected_algorithm": "sha512", ++ }, ++ { ++ "digest": "0001020304050607080900", ++ "expected_algorithm": "invalid", ++ }, ++ ] ++ ++ for case in test_cases: ++ algorithm = create_runtime_policy._get_digest_algorithm_from_hex( # pylint: disable=protected-access ++ case["digest"] ++ ) ++ self.assertEqual(algorithm, case["expected_algorithm"]) ++ ++ def test_get_digest_algorithm_from_map_list(self): ++ """Test that the algorithm guessing works as expected""" ++ ++ test_cases = [ ++ { ++ "digests": {"key": ["0001020304050607080900010203040506070809"]}, ++ "expected_algorithm": "sha1", ++ }, ++ { ++ "digests": {"key": ["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"]}, ++ "expected_algorithm": "sha256", ++ }, ++ { ++ "digests": { ++ "key": [ ++ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f" ++ ] ++ }, ++ "expected_algorithm": "sha384", ++ }, ++ { ++ "digests": { ++ "key": [ ++ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" ++ ] ++ }, ++ "expected_algorithm": "sha512", ++ }, ++ { ++ "digests": {"key": ["0001020304050607080900"]}, ++ "expected_algorithm": "invalid", ++ }, ++ ] ++ ++ for case in test_cases: ++ algorithm = create_runtime_policy._get_digest_algorithm_from_map_list( # pylint: disable=protected-access ++ case["digests"] ++ ) ++ self.assertEqual(algorithm, case["expected_algorithm"]) ++ ++ def test_rootfs_with_symbolic_links(self): ++ test_cases = [ ++ # Test that symlinks and files in the excluded directory are ignored ++ { ++ "dirs": ["root", "root/excluded", "root/included", "root/included/nested_excluded"], ++ "files": ["root/a", "root/included/b", "root/excluded/c", "root/included/nested_excluded/d", "outside"], ++ "symlinks": [ ++ ("root/sa", "root/a"), ++ ("root/sb", "root/excluded/c"), ++ ("root/sc", "outside"), ++ ("root/sd", "root/included/nested_excluded/d"), ++ ], ++ "root": "root", ++ "dirs_to_exclude": ["/excluded", "/included/nested_excluded"], ++ "algorithm": "sha256", ++ "expected_out": { ++ "/a": ["f86309c6fecb020efe59a73666162b69e43035da434c7c92df293553810e9907"], ++ "/included/b": ["5b5b4bcb3b77ca3017d9f3ff9424f777389116c70e4b57c88a3ee857182a3d43"], ++ }, ++ }, ++ ] ++ ++ for case in test_cases: ++ with tempfile.TemporaryDirectory() as tmpdir: ++ for d in case["dirs"]: ++ os.makedirs(os.path.join(tmpdir, d)) ++ ++ for f in case["files"]: ++ with open(os.path.join(tmpdir, f), "w", encoding="UTF-8") as fd: ++ fd.write(f"some content in {f}") ++ ++ for symlink, target in case["symlinks"]: ++ os.symlink(os.path.join(tmpdir, target), os.path.join(tmpdir, symlink)) ++ ++ digests = create_runtime_policy.path_digests( ++ os.path.join(tmpdir, case["root"]), ++ alg=case["algorithm"], ++ dirs_to_exclude=case["dirs_to_exclude"], ++ ) ++ ++ self.assertEqual(digests, case["expected_out"]) ++ ++ def test_digest_algorithm_priority(self): ++ """Test that the priority for the algorithm selection follows the ++ expected source order: --algo option > base policy > allowlist > ima log""" ++ ++ test_cases = [] ++ ++ rootfs = os.path.join(HELPER_DIR, "rootfs") ++ # Prepare test cases ++ for algo in ["sha1", "sha256", "sha384", "sha512", "sm3_256"]: ++ base_policy = os.path.join(HELPER_DIR, f"policy-{algo}") ++ allowlist = os.path.join(HELPER_DIR, f"allowlist-{algo}") ++ ima_log = os.path.join(HELPER_DIR, f"ima-log-{algo}") ++ ++ # Case where the algorithm from the IMA measurement list should be ++ # kept ++ test_cases.append( ++ { ++ "algo_opt": [], ++ "base_policy": [], ++ "allowlist": [], ++ "ima_log": ["--ima-measurement-list", ima_log], ++ "rootfs": [], ++ "expected_algo": f"{algo}", ++ "expected_source": "IMA measurement list", ++ } ++ ) ++ ++ # Cases where the algorithm from the allowlist should be kept ++ for il in [[], ["--ima-measurement-list", ima_log]]: ++ for rfs in [[], ["--rootfs", rootfs]]: ++ # Skip the exceptional cases when the algorithm from the ++ # allowlist is ambiguous ++ if algo not in [algorithms.Hash.SHA256, algorithms.Hash.SM3_256]: ++ test_cases.append( ++ { ++ "algo_opt": [], ++ "base_policy": [], ++ "allowlist": ["--allowlist", allowlist], ++ "ima_log": il, ++ "rootfs": rfs, ++ "expected_algo": f"{algo}", ++ "expected_source": "allowlist", ++ } ++ ) ++ ++ # Cases where the algorithm from the base policy should be kept ++ for al in [[], ["--allowlist", allowlist]]: ++ # Skip the exceptional cases when the algorithm from the ++ # base policy is ambiguous ++ if algo not in [algorithms.Hash.SHA256, algorithms.Hash.SM3_256]: ++ test_cases.append( ++ { ++ "algo_opt": [], ++ "base_policy": ["--base-policy", base_policy], ++ "allowlist": al, ++ "ima_log": il, ++ "rootfs": rfs, ++ "expected_algo": f"{algo}", ++ "expected_source": "base policy", ++ } ++ ) ++ ++ # Cases where the algorithm from the --algo option should be kept ++ for bp in [[], ["--base-policy", base_policy]]: ++ test_cases.append( ++ { ++ "algo_opt": ["--algo", algo], ++ "base_policy": bp, ++ "allowlist": al, ++ "ima_log": il, ++ "rootfs": ["--rootfs", rootfs], ++ "expected_algo": f"{algo}", ++ "expected_source": "--algo option", ++ } ++ ) ++ ++ # Create an argument parser ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = create_runtime_policy.get_arg_parser(subparser, parent_parser) ++ ++ for case in test_cases: ++ cli_args = ["--verbose"] ++ # Prepare argument input ++ for k in ["algo_opt", "base_policy", "allowlist", "ima_log", "rootfs"]: ++ cli_args.extend(case.get(k, [])) ++ ++ args = parser.parse_args(cli_args) ++ expected_algo = case["expected_algo"] ++ expected_source = case["expected_source"] ++ ++ with keylimePolicyAssertLogs() as logs: ++ _policy = create_runtime_policy.create_runtime_policy(args) ++ self.assertIn( ++ f"Using digest algorithm '{expected_algo}' obtained from the {expected_source}", ++ logs.getvalue(), ++ msg=f"ARGS: {' '.join(cli_args)}", ++ ) ++ ++ def test_digest_algorithm_priority_exceptions(self): ++ """Test priority algorithms exceptions""" ++ ++ test_cases = [] ++ ++ bp_sha256 = os.path.join(HELPER_DIR, "policy-sha256") ++ bp_sm3 = os.path.join(HELPER_DIR, "policy-sm3_256") ++ al_sha256 = os.path.join(HELPER_DIR, "allowlist-sha256") ++ al_sm3 = os.path.join(HELPER_DIR, "allowlist-sm3_256") ++ ++ # Prepare test cases ++ for algo in ["sha256", "sm3_256"]: ++ ima_log = os.path.join(HELPER_DIR, f"ima-log-{algo}") ++ ++ for bp in [[], ["--base-policy", bp_sha256], ["--base-policy", bp_sm3]]: ++ for al in [[], ["--allowlist", al_sha256], ["--allowlist", al_sm3]]: ++ test_cases.append( ++ { ++ "base_policy": bp, ++ "allowlist": al, ++ "ima_log": ["--ima-measurement-list", ima_log], ++ "expected_algo": f"{algo}", ++ "expected_source": "IMA measurement list", ++ "expected_mismatch": False, ++ } ++ ) ++ ++ # Prepare test cases ++ for algo in ["sha1", "sha384", "sha512"]: ++ ima_log = os.path.join(HELPER_DIR, f"ima-log-{algo}") ++ ++ for bp in [["--base-policy", bp_sha256], ["--base-policy", bp_sm3]]: ++ for al in [["--allowlist", al_sha256], ["--allowlist", al_sm3]]: ++ test_cases.append( ++ { ++ "base_policy": bp, ++ "allowlist": al, ++ "ima_log": ["--ima-measurement-list", ima_log], ++ "expected_algo": "sha256_or_sm3_256", ++ "expected_source": "", ++ "expected_mismatch": True, ++ } ++ ) ++ ++ # Create an argument parser ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = create_runtime_policy.get_arg_parser(subparser, parent_parser) ++ ++ for case in test_cases: ++ cli_args = ["--verbose"] ++ # Prepare argument input ++ for k in ["base_policy", "allowlist", "ima_log"]: ++ cli_args.extend(case.get(k, [])) ++ ++ args = parser.parse_args(cli_args) ++ expected_algo = case["expected_algo"] ++ expected_source = case["expected_source"] ++ ++ with keylimePolicyAssertLogs() as logs: ++ _policy = create_runtime_policy.create_runtime_policy(args) ++ if case["expected_mismatch"]: ++ self.assertIn( ++ f"The digest algorithm in the IMA measurement list does not match the previously set '{expected_algo}' algorithm", ++ logs.getvalue(), ++ ) ++ else: ++ self.assertIn( ++ f"Using digest algorithm '{expected_algo}' obtained from the {expected_source}", ++ logs.getvalue(), ++ ) ++ ++ def test_mixed_algorithms_sources(self): ++ """Test that mixing digests from different algorithms is not allowed""" ++ test_cases = [] ++ ++ policy_sha1 = os.path.join(HELPER_DIR, "policy-sha1") ++ allowlist_sha1 = os.path.join(HELPER_DIR, "allowlist-sha1") ++ ima_log_sha1 = os.path.join(HELPER_DIR, "ima-log-sha1") ++ ++ rootfs = os.path.join(HELPER_DIR, "rootfs") ++ ++ base_test = { ++ "algo_opt": ["--algo", "sha1"], ++ "base policy": ["--base-policy", policy_sha1], ++ "allowlist": ["--allowlist", allowlist_sha1], ++ "IMA measurement list": ["--ima-measurement-list", ima_log_sha1], ++ "rootfs": ["--rootfs", rootfs], ++ "source": "", ++ } ++ ++ rootfs = os.path.join(HELPER_DIR, "rootfs") ++ # Prepare test cases ++ for algo in ["sha256", "sha384", "sha512", "sm3_256"]: ++ base_policy = ["--base-policy", os.path.join(HELPER_DIR, f"policy-{algo}")] ++ allowlist = ["--allowlist", os.path.join(HELPER_DIR, f"allowlist-{algo}")] ++ ima_log = [ ++ "--ima-measurement-list", ++ os.path.join(HELPER_DIR, f"ima-log-{algo}"), ++ ] ++ ++ for source, argument in [ ++ ("base policy", base_policy), ++ ("allowlist", allowlist), ++ ("IMA measurement list", ima_log), ++ ]: ++ case = copy.deepcopy(base_test) ++ case[source] = argument ++ case["source"] = source ++ test_cases.append(case) ++ ++ # Create an argument parser ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = create_runtime_policy.get_arg_parser(subparser, parent_parser) ++ ++ for case in test_cases: ++ cli_args = [] ++ # Prepare argument input ++ for k in ["algo_opt", "base policy", "allowlist", "IMA measurement list", "rootfs"]: ++ cli_args.extend(case.get(k, [])) ++ ++ args = parser.parse_args(cli_args) ++ ++ with keylimePolicyAssertLogs() as logs: ++ policy = create_runtime_policy.create_runtime_policy(args) ++ self.assertIn( ++ f"The digest algorithm in the {case['source']} does not match the previously set 'sha1' algorithm", ++ logs.getvalue(), ++ ) ++ self.assertEqual(policy, None) ++ ++ def test_unknown_algorithm_sources(self): ++ """Test that input with digests from unknown algorithms are not allowed""" ++ ++ test_cases = [] ++ ++ policy_sha1 = os.path.join(HELPER_DIR, "policy-sha1") ++ allowlist_sha1 = os.path.join(HELPER_DIR, "allowlist-sha1") ++ ima_log_sha1 = os.path.join(HELPER_DIR, "ima-log-sha1") ++ ++ policy_unknown = ["--base-policy", os.path.join(HELPER_DIR, "policy-unknown")] ++ allowlist_unknown = ["--allowlist", os.path.join(HELPER_DIR, "allowlist-unknown")] ++ ima_log_unknown = [ ++ "--ima-measurement-list", ++ os.path.join(HELPER_DIR, "ima-log-unknown"), ++ ] ++ ++ rootfs = os.path.join(HELPER_DIR, "rootfs") ++ ++ base_test = { ++ "algo_opt": ["--algo", "sha1"], ++ "base policy": ["--base-policy", policy_sha1], ++ "allowlist": ["--allowlist", allowlist_sha1], ++ "IMA measurement list": ["--ima-measurement-list", ima_log_sha1], ++ "rootfs": ["--rootfs", rootfs], ++ "source": "", ++ } ++ ++ rootfs = os.path.join(HELPER_DIR, "rootfs") ++ # Prepare test cases ++ for source, argument in [ ++ ("base policy", policy_unknown), ++ ("allowlist", allowlist_unknown), ++ ("IMA measurement list", ima_log_unknown), ++ ]: ++ case = copy.deepcopy(base_test) ++ case[source] = argument ++ case["source"] = source ++ test_cases.append(case) ++ ++ # Create an argument parser ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = create_runtime_policy.get_arg_parser(subparser, parent_parser) ++ ++ for case in test_cases: ++ cli_args = ["--verbose"] ++ # Prepare argument input ++ for k in ["algo_opt", "base policy", "allowlist", "IMA measurement list", "rootfs"]: ++ cli_args.extend(case.get(k, [])) ++ ++ args = parser.parse_args(cli_args) ++ ++ with keylimePolicyAssertLogs() as logs: ++ policy = create_runtime_policy.create_runtime_policy(args) ++ self.assertIn( ++ f"Invalid digest algorithm found in the {case['source']}", ++ logs.getvalue(), ++ ) ++ self.assertEqual(policy, None) +diff --git a/test/test_sign_runtime_policy.py b/test/test_sign_runtime_policy.py +new file mode 100644 +index 0000000..7af03f8 +--- /dev/null ++++ b/test/test_sign_runtime_policy.py +@@ -0,0 +1,194 @@ ++""" ++SPDX-License-Identifier: Apache-2.0 ++Copyright 2024 Red Hat, Inc. ++""" ++ ++import argparse ++import os ++import tempfile ++import unittest ++ ++from cryptography.hazmat.backends import default_backend ++from cryptography.hazmat.primitives.asymmetric import ec ++from cryptography.hazmat.primitives.serialization import load_pem_private_key ++ ++from keylime.cert_utils import is_x509_cert ++from keylime.policy import sign_runtime_policy ++from keylime.policy.logger import Logger ++from keylime.signing import verify_dsse_envelope ++ ++DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data", "sign-runtime-policy")) ++EC_PRIVKEY = os.path.join(DATA_DIR, "ec-p521-private.pem") ++EC_PUBKEY = os.path.join(DATA_DIR, "ec-p521-public.pem") ++RSA_PRIVKEY = os.path.join(DATA_DIR, "rsa-4096-private.pem") ++ ++POLICY = os.path.join(DATA_DIR, "runtime-policy.json") ++POLICY_EMPTY = os.path.join(DATA_DIR, "runtime-policy-empty.json") ++POLICY_BOGUS = os.path.join(DATA_DIR, "runtime-policy-bogus.json") ++ ++ ++class SignRuntimePolicy_Test(unittest.TestCase): ++ def test__get_signer(self): ++ # Enable verbose logging, so we see the debug messages. ++ Logger().enableVerbose() ++ ++ test_cases = [ ++ {"backend": "", "keyfile": "", "keypath": "", "outcertfile": "", "valid": False}, ++ {"backend": None, "keyfile": None, "keypath": None, "outcertfile": None, "valid": False}, ++ {"backend": "x509", "keyfile": "foo", "keypath": "bar", "outcertfile": None, "valid": False}, ++ {"backend": "ecdsa", "keyfile": EC_PRIVKEY, "keypath": None, "outcertfile": None, "valid": True}, ++ {"backend": "ecdsa", "keyfile": RSA_PRIVKEY, "keypath": None, "outcertfile": None, "valid": False}, ++ { ++ "backend": "ecdsa", ++ "keyfile": EC_PRIVKEY, ++ "keypath": "something here", ++ "outcertfile": None, ++ "valid": False, ++ }, ++ {"backend": "ecdsa", "keyfile": None, "keypath": None, "outcertfile": None, "valid": True}, ++ {"backend": "x509", "keyfile": None, "keypath": None, "outcertfile": None, "valid": False}, ++ {"backend": "x509", "keyfile": None, "keypath": None, "outcertfile": "cert.x509", "valid": True}, ++ {"backend": "x509", "keyfile": EC_PRIVKEY, "keypath": None, "outcertfile": "cert.x509", "valid": True}, ++ {"backend": "x509", "keyfile": RSA_PRIVKEY, "keypath": None, "outcertfile": "cert.x509", "valid": False}, ++ ] ++ ++ cwd = os.getcwd() ++ try: ++ with tempfile.TemporaryDirectory() as temp_dir: ++ os.chdir(temp_dir) ++ ++ for c in test_cases: ++ keypath = None ++ if c["keypath"] is not None and c["keypath"] != "": ++ keypath = os.path.join(temp_dir, c["keypath"]) ++ ++ out_certfile = None ++ if c["outcertfile"] is not None and c["outcertfile"] != "": ++ out_certfile = os.path.join(temp_dir, c["outcertfile"]) ++ ++ # pylint: disable=protected-access ++ signer = sign_runtime_policy._get_signer( ++ backend=c["backend"], ++ in_ec_keyfile_path=c["keyfile"], ++ out_keyfile_path=keypath, ++ out_certfile=out_certfile, ++ ) ++ ++ self.assertEqual(signer is not None, c["valid"]) ++ ++ if c["valid"] and keypath: ++ self.assertTrue(os.path.exists(keypath)) ++ ++ # Now let us check it is actually an EC privkey. ++ with open(keypath, "rb") as f: ++ pem_data = f.read() ++ key = load_pem_private_key(pem_data, None, default_backend()) ++ ++ self.assertTrue(isinstance(key, ec.EllipticCurvePrivateKey)) ++ ++ if c["valid"] and out_certfile: ++ self.assertTrue(os.path.exists(out_certfile)) ++ ++ # And now we make sure it is a valid x509 cert. ++ with open(out_certfile, "rb") as f: ++ cert_data = f.read() ++ self.assertTrue(is_x509_cert(cert_data)) ++ finally: ++ os.chdir(cwd) ++ ++ def test__sign_policy(self): ++ # Enable verbose logging, so we see the debug messages. ++ Logger().enableVerbose() ++ ++ signer_params = [ ++ {"backend": "ecdsa", "keyfile": EC_PRIVKEY, "keypath": None, "out_certfile": None}, ++ {"backend": "x509", "keyfile": EC_PRIVKEY, "keypath": None, "out_certfile": "cert.x509"}, ++ ] ++ ++ test_cases = [ ++ {"policy": "some-non-existing-file", "valid": False}, ++ {"policy": POLICY_BOGUS, "valid": False}, ++ {"policy": POLICY_EMPTY, "valid": True}, ++ {"policy": POLICY, "valid": True}, ++ ] ++ ++ with open(EC_PUBKEY, "rb") as f: ++ ec_pubkey = f.read() ++ ++ cwd = os.getcwd() ++ try: ++ with tempfile.TemporaryDirectory() as temp_dir: ++ os.chdir(temp_dir) ++ ++ signers = {} ++ for c in signer_params: ++ # pylint: disable=protected-access ++ signers[c["backend"]] = sign_runtime_policy._get_signer( ++ backend=c["backend"], ++ in_ec_keyfile_path=c["keyfile"], ++ out_keyfile_path=c["keypath"], ++ out_certfile=c["out_certfile"], ++ ) ++ ++ for c in test_cases: ++ for backend, signer in signers.items(): ++ # pylint: disable=protected-access ++ signed = sign_runtime_policy._sign_policy(signer, c["policy"]) ++ self.assertEqual(signed is not None, c["valid"], msg=f"backend = {backend}, policy = {c['policy']}") ++ ++ # Let's also check that the policy was properly signed. ++ if signed: ++ verified = verify_dsse_envelope(signed.encode("UTF-8"), ec_pubkey) ++ self.assertTrue(verified is not None) ++ finally: ++ os.chdir(cwd) ++ ++ def test_sign_runtime_policy(self): ++ # Create an argument parser ++ parent_parser = argparse.ArgumentParser(add_help=False) ++ main_parser = argparse.ArgumentParser() ++ subparser = main_parser.add_subparsers(title="actions") ++ parser = sign_runtime_policy.get_arg_parser(subparser, parent_parser) ++ ++ test_cases = [ ++ {"valid": False, "missing_params": True}, ++ {"--runtime-policy": POLICY, "valid": True, "missing_params": False}, ++ { ++ "--runtime-policy": POLICY, ++ "valid": False, ++ "--keyfile": "foo", ++ "--keypath": "bar", ++ "missing_params": False, ++ }, ++ {"--runtime-policy": POLICY, "valid": True, "missing_params": False, "--keyfile": EC_PRIVKEY}, ++ {"--runtime-policy": POLICY, "valid": False, "missing_params": False, "--keyfile": RSA_PRIVKEY}, ++ ] ++ ++ cwd = os.getcwd() ++ try: ++ with tempfile.TemporaryDirectory() as temp_dir: ++ os.chdir(temp_dir) ++ ++ for case in test_cases: ++ expected = case["valid"] ++ del case["valid"] ++ missing_params = case["missing_params"] ++ del case["missing_params"] ++ ++ # pylint: disable=consider-using-dict-items ++ cli_args = " ".join(f"{arg} {case[arg]}" for arg in case).split() ++ ++ args = None ++ if missing_params: ++ # When required params are missing, it exits with with SystemExit. ++ with self.assertRaises(SystemExit): ++ args = parser.parse_args(cli_args) ++ else: ++ args = parser.parse_args(cli_args) ++ self.assertTrue(args is not None) ++ ++ signed = sign_runtime_policy.sign_runtime_policy(args) ++ self.assertEqual(signed is not None, expected, msg=f"args = {args}") ++ ++ finally: ++ os.chdir(cwd) +diff --git a/test/utils.py b/test/utils.py +new file mode 100644 +index 0000000..e14f3d8 +--- /dev/null ++++ b/test/utils.py +@@ -0,0 +1,64 @@ ++import sys ++import unittest ++from io import StringIO ++ ++from keylime.policy import logger ++ ++ ++def assertDigestsEqual(d1, d2): ++ # Ensuring we have only unique values in the digest lists. ++ d1_unique = {k: sorted(list(set(v))) for k, v in d1.items()} ++ d2_unique = {k: sorted(list(set(v))) for k, v in d2.items()} ++ ++ unittest.TestCase().assertEqual(len(d1_unique), len(d2_unique), msg="number of files must match") ++ ++ for file in d1_unique: ++ unittest.TestCase().assertTrue(file in d2_unique) ++ unittest.TestCase().assertEqual( ++ len(d1_unique[file]), ++ len(d2_unique[file]), ++ msg=f"number of files/digests for {file}", ++ ) ++ ++ for d in d1_unique[file]: ++ unittest.TestCase().assertTrue(d in d2_unique[file], msg=f"file={file} digest={d}") ++ ++ ++# keylime policy logging. ++class _KeylimePolicyAssertLogsContext: ++ """A context manager for assertLogs() and assertNoLogs()""" ++ ++ def __init__(self, no_logs): ++ self.logger = logger.Logger(verbose=True) ++ self.no_logs = no_logs ++ self.stderr = StringIO() ++ ++ def __enter__(self): ++ self.logger.setStream(self.stderr) ++ return self.stderr ++ ++ def __exit__(self, exc_type, exc_value, _tb): ++ self.logger.setStream(sys.stderr) ++ ++ if exc_type is not None: ++ # Let unexpected exceptions pass through. ++ return False ++ ++ logs = "\n".join(self.stderr.getvalue()) ++ ++ if self.no_logs: ++ # assertNoLogs ++ if len(logs) > 0: ++ raise ValueError(f"Unexpected logs found: {logs}") ++ else: ++ if len(logs) == 0: ++ raise ValueError("No logs triggered on keylime-policy") ++ return True ++ ++ ++def keylimePolicyAssertNoLogs(): ++ return _KeylimePolicyAssertLogsContext(no_logs=True) ++ ++ ++def keylimePolicyAssertLogs(): ++ return _KeylimePolicyAssertLogsContext(no_logs=False) +-- +2.43.5 + diff --git a/SOURCES/0016-Use-TLS-on-revocation-notification-webhook.patch b/SOURCES/0016-Use-TLS-on-revocation-notification-webhook.patch new file mode 100644 index 0000000..6544844 --- /dev/null +++ b/SOURCES/0016-Use-TLS-on-revocation-notification-webhook.patch @@ -0,0 +1,167 @@ +From 4bd644b74719fdbb6c521d3d5eb2430d8dc18b36 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Wed, 5 Feb 2025 16:16:25 +0000 +Subject: [PATCH 16/16] Use TLS on revocation notification webhook + +--- + keylime/requests_client.py | 5 ++ + keylime/revocation_notifier.py | 91 +++++++++++++++++++++++----------- + 2 files changed, 68 insertions(+), 28 deletions(-) + +diff --git a/keylime/requests_client.py b/keylime/requests_client.py +index 85a175c..e993fbc 100644 +--- a/keylime/requests_client.py ++++ b/keylime/requests_client.py +@@ -1,3 +1,4 @@ ++import re + import ssl + from typing import Any, Dict, Optional + +@@ -15,6 +16,10 @@ class RequestsClient: + ignore_hostname: bool = True, + **kwargs: Any, + ) -> None: ++ # Remove eventual "http?://" from the base url ++ if base_url.startswith("http"): ++ base_url = re.sub(r"https?://", "", base_url) ++ + if tls_enabled: + self.base_url = f"https://{base_url}" + else: +diff --git a/keylime/revocation_notifier.py b/keylime/revocation_notifier.py +index 5cc8b1a..434bf64 100644 +--- a/keylime/revocation_notifier.py ++++ b/keylime/revocation_notifier.py +@@ -9,8 +9,9 @@ from typing import Any, Callable, Dict, Optional, Set + + import requests + +-from keylime import config, crypto, json, keylime_logging ++from keylime import config, crypto, json, keylime_logging, web_util + from keylime.common import retry ++from keylime.requests_client import RequestsClient + + logger = keylime_logging.init_logging("revocation_notifier") + broker_proc: Optional[Process] = None +@@ -109,7 +110,10 @@ def notify(tosend: Dict[str, Any]) -> None: + exponential_backoff = config.getboolean("verifier", "exponential_backoff") + next_retry = retry.retry_time(exponential_backoff, interval, i, logger) + logger.debug( +- "Unable to publish revocation message %d times, trying again in %f seconds: %s", i, next_retry, e ++ "Unable to publish revocation message %d times, trying again in %f seconds: %s", ++ i, ++ next_retry, ++ e, + ) + time.sleep(next_retry) + mysock.close() +@@ -132,30 +136,50 @@ def notify_webhook(tosend: Dict[str, Any]) -> None: + def worker_webhook(tosend: Dict[str, Any], url: str) -> None: + interval = config.getfloat("verifier", "retry_interval") + exponential_backoff = config.getboolean("verifier", "exponential_backoff") +- with requests.Session() as session: +- logger.info("Sending revocation event via webhook...") +- for i in range(config.getint("verifier", "max_retries")): +- next_retry = retry.retry_time(exponential_backoff, interval, i, logger) ++ ++ max_retries = config.getint("verifier", "max_retries") ++ if max_retries <= 0: ++ logger.info("Invalid value found in 'max_retries' option for verifier, using default value") ++ max_retries = 5 ++ ++ # Get TLS options from the configuration ++ (cert, key, trusted_ca, key_password), verify_server_cert = web_util.get_tls_options( ++ "verifier", is_client=True, logger=logger ++ ) ++ ++ # Generate the TLS context using the obtained options ++ tls_context = web_util.generate_tls_context(cert, key, trusted_ca, key_password, is_client=True, logger=logger) ++ ++ logger.info("Sending revocation event via webhook to %s ...", url) ++ for i in range(max_retries): ++ next_retry = retry.retry_time(exponential_backoff, interval, i, logger) ++ ++ with RequestsClient( ++ url, ++ verify_server_cert, ++ tls_context, ++ ) as client: + try: +- response = session.post(url, json=tosend, timeout=5) +- if response.status_code in [200, 202]: +- break +- +- logger.debug( +- "Unable to publish revocation message %d times via webhook, " +- "trying again in %d seconds. " +- "Server returned status code: %s", +- i, +- next_retry, +- response.status_code, +- ) +- except requests.exceptions.RequestException as e: +- logger.debug( +- "Unable to publish revocation message %d times via webhook, trying again in %d seconds: %s", +- i, +- next_retry, +- e, +- ) ++ res = client.post("", json=tosend, timeout=5) ++ except requests.exceptions.SSLError as ssl_error: ++ if "TLSV1_ALERT_UNKNOWN_CA" in str(ssl_error): ++ logger.warning( ++ "Keylime does not recognize certificate from peer. Check if verifier 'trusted_server_ca' is configured correctly" ++ ) ++ ++ raise ssl_error from ssl_error ++ ++ if res and res.status_code in [200, 202]: ++ break ++ ++ logger.debug( ++ "Unable to publish revocation message %d times via webhook, " ++ "trying again in %d seconds. " ++ "Server returned status code: %s", ++ i + 1, ++ next_retry, ++ res.status_code, ++ ) + + time.sleep(next_retry) + +@@ -167,7 +191,11 @@ def notify_webhook(tosend: Dict[str, Any]) -> None: + cert_key = None + + +-def process_revocation(revocation: Dict[str, Any], callback: Callable[[Dict[str, Any]], None], cert_path: str) -> None: ++def process_revocation( ++ revocation: Dict[str, Any], ++ callback: Callable[[Dict[str, Any]], None], ++ cert_path: str, ++) -> None: + global cert_key + + if cert_key is None: +@@ -179,10 +207,17 @@ def process_revocation(revocation: Dict[str, Any], callback: Callable[[Dict[str, + cert_key = crypto.x509_import_pubkey(certpem) + + if cert_key is None: +- logger.warning("Unable to check signature of revocation message: %s not available", cert_path) ++ logger.warning( ++ "Unable to check signature of revocation message: %s not available", ++ cert_path, ++ ) + elif "signature" not in revocation or revocation["signature"] == "none": + logger.warning("No signature on revocation message from server") +- elif not crypto.rsa_verify(cert_key, revocation["msg"].encode("utf-8"), revocation["signature"].encode("utf-8")): ++ elif not crypto.rsa_verify( ++ cert_key, ++ revocation["msg"].encode("utf-8"), ++ revocation["signature"].encode("utf-8"), ++ ): + logger.error("Invalid revocation message siganture %s", revocation) + else: + message = json.loads(revocation["msg"]) +-- +2.47.1 + diff --git a/SPECS/keylime.spec b/SPECS/keylime.spec index f106cb3..792f10a 100644 --- a/SPECS/keylime.spec +++ b/SPECS/keylime.spec @@ -9,7 +9,7 @@ Name: keylime Version: 7.3.0 -Release: 9%{?dist} +Release: 15%{?dist} Summary: Open source TPM software for Bootstrapping and Maintaining Trust URL: https://github.com/keylime/keylime @@ -28,6 +28,11 @@ Patch: 0008-verifier-should-read-parameters-from-verifier.conf-o.patch Patch: 0009-CVE-2023-38201.patch Patch: 0010-CVE-2023-38200.patch Patch: 0011-Automatically-update-agent-API-version.patch +Patch: 0012-Restore-create-allowlist.patch +Patch: 0013-Set-generator-and-timestamp-in-create-policy.patch +Patch: 0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch +Patch: 0015-Backport-keylime-policy-tool.patch +Patch: 0016-Use-TLS-on-revocation-notification-webhook.patch License: ASL 2.0 and MIT @@ -100,6 +105,7 @@ Requires: python3-lark-parser Requires: python3-pyasn1 Requires: python3-pyasn1-modules Requires: python3-jsonschema +Requires: python3-psutil Requires: tpm2-tools Requires: openssl @@ -183,13 +189,19 @@ done # Ship some scripts. mkdir -p %{buildroot}/%{_datadir}/%{srcname}/scripts -for s in create_runtime_policy.sh \ - create_mb_refstate \ +for s in create_mb_refstate \ ek-openssl-verify; do install -Dpm 755 scripts/${s} \ %{buildroot}/%{_datadir}/%{srcname}/scripts/${s} done +# On RHEL 9.3, install create_runtime_policy.sh as create_allowlist.sh +# The convert_runtime_policy.py script to convert allowlist and excludelist into +# runtime policy is not called anymore. +# See: https://issues.redhat.com/browse/RHEL-11866 +install -Dpm 755 scripts/create_runtime_policy.sh \ + %{buildroot}/%{_datadir}/%{srcname}/scripts/create_allowlist.sh + # Ship configuration templates. cp -r ./templates %{buildroot}%{_datadir}/%{srcname}/templates/ @@ -339,6 +351,7 @@ fi %{_bindir}/keylime_create_policy %{_bindir}/keylime_sign_runtime_policy %{_bindir}/keylime_userdata_encrypt +%{_bindir}/keylime-policy %files base %license LICENSE @@ -353,7 +366,7 @@ fi %attr(400,%{srcname},%{srcname}) %{_sharedstatedir}/%{srcname}/tpm_cert_store/*.pem %{_tmpfilesdir}/%{srcname}.conf %{_sysusersdir}/%{srcname}.conf -%{_datadir}/%{srcname}/scripts/create_runtime_policy.sh +%{_datadir}/%{srcname}/scripts/create_allowlist.sh %{_datadir}/%{srcname}/scripts/ek-openssl-verify %{_datadir}/%{srcname}/templates %{_bindir}/keylime_upgrade_config @@ -362,6 +375,37 @@ fi %license LICENSE %changelog +* Wed Feb 05 2025 Sergio Correia - 7.3.0-15 +- Use TLS on revocation notification webhook +- Include system installed CA certificates when verifying webhook + server certificate +- Include the CA certificates added via configuration file option + 'trusted_server_ca' + Resolves: RHEL-78057 + Resolves: RHEL-78313 + Resolves: RHEL-78316 + +* Fri Jan 10 2025 Sergio Correia - 7.3.0-14 +- Backport keylime-policy tool + Resolves: RHEL-75797 + +* Fri Jan 05 2024 Sergio Correia - 7.3.0-13 +- Backport fix for CVE-2023-3674 + Resolves: RHEL-21013 + +* Tue Oct 17 2023 Anderson Toshiyuki Sasaki - 7.3.0-12 +- Set the generator and timestamp in create_policy.py + Related: RHEL-11866 + +* Mon Oct 09 2023 Anderson Toshiyuki Sasaki - 7.3.0-11 +- Suppress unnecessary error message + Related: RHEL-11866 + +* Fri Oct 06 2023 Anderson Toshiyuki Sasaki - 7.3.0-10 +- Restore allowlist generation script + Resolves: RHEL-11866 + Resolves: RHEL-11867 + * Wed Sep 06 2023 Sergio Correia - 7.3.0-9 - Rebuild for properly tagging the resulting build Resolves: RHEL-1898