From de71c1e8d0087edb4f77384a806b8d7ab44f020a Mon Sep 17 00:00:00 2001 From: eabdullin Date: Tue, 11 Nov 2025 18:06:58 +0000 Subject: [PATCH] import CS keylime-7.12.1-11.el9_7.2 --- .gitignore | 4 +- .keylime.metadata | 4 +- ...e-keylime-compatible-with-python-3.9.patch | 628 +++++ ...e-of-Required-NotRequired-typing_ext.patch | 104 - ...rver_t-tcp-connect-to-several-domain.patch | 27 - ...epo-tests-from-create-runtime-policy.patch | 58 + ...as-the-minimum-for-the-configuration.patch | 51 - ...red-boot-related-tests-for-s390x-and.patch | 52 + ...-str_to_version-for-the-upgrade-tool.patch | 88 - ...ate-str_to_version-in-the-adjust-scr.patch | 52 + ...HEL-9-version-of-create_allowlist.sh.patch | 404 +++ ...le-add-ignores-for-EV_PLATFORM_CONFI.patch | 50 - ...erver_key_password-for-verifier-regi.patch | 66 + SOURCES/0006-Revert-mapping-changes.patch | 43 - ...ession-close-using-a-session-manager.patch | 90 - SOURCES/0007-fix_db_connection_leaks.patch | 2208 +++++++++++++++++ ...EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch | 29 + ...read-parameters-from-verifier.conf-o.patch | 31 - SOURCES/0009-CVE-2023-38201.patch | 48 - ...r_db-as-logged-by-newer-shim-version.patch | 356 +++ SOURCES/0010-CVE-2023-38200.patch | 69 - ...rifier-Gracefully-shutdown-on-signal.patch | 42 + ...tomatically-update-agent-API-version.patch | 244 -- ...ry-to-send-notifications-on-shutdown.patch | 308 +++ SOURCES/0012-Restore-create-allowlist.patch | 59 - ...close-the-session-at-the-end-of-the-.patch | 45 + ...rator-and-timestamp-in-create-policy.patch | 44 - .../0013-fix-malformed-certs-workaround.patch | 1265 ++++++++++ ...-a-logger.error-with-an-Exception-in.patch | 80 - SOURCES/keylime.tmpfiles | 39 + SPECS/keylime.spec | 205 +- 31 files changed, 5721 insertions(+), 1072 deletions(-) create mode 100644 SOURCES/0001-Make-keylime-compatible-with-python-3.9.patch delete mode 100644 SOURCES/0001-Remove-usage-of-Required-NotRequired-typing_ext.patch delete mode 100644 SOURCES/0002-Allow-keylime_server_t-tcp-connect-to-several-domain.patch create mode 100644 SOURCES/0002-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch delete mode 100644 SOURCES/0003-Use-version-2.0-as-the-minimum-for-the-configuration.patch create mode 100644 SOURCES/0003-tests-skip-measured-boot-related-tests-for-s390x-and.patch delete mode 100644 SOURCES/0004-Duplicate-str_to_version-for-the-upgrade-tool.patch create mode 100644 SOURCES/0004-templates-duplicate-str_to_version-in-the-adjust-scr.patch create mode 100644 SOURCES/0005-Restore-RHEL-9-version-of-create_allowlist.sh.patch delete mode 100644 SOURCES/0005-elchecking-example-add-ignores-for-EV_PLATFORM_CONFI.patch create mode 100644 SOURCES/0006-Revert-default-server_key_password-for-verifier-regi.patch delete mode 100644 SOURCES/0006-Revert-mapping-changes.patch delete mode 100644 SOURCES/0007-Handle-session-close-using-a-session-manager.patch create mode 100644 SOURCES/0007-fix_db_connection_leaks.patch create mode 100644 SOURCES/0008-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch delete mode 100644 SOURCES/0008-verifier-should-read-parameters-from-verifier.conf-o.patch delete mode 100644 SOURCES/0009-CVE-2023-38201.patch create mode 100644 SOURCES/0009-mb-support-vendor_db-as-logged-by-newer-shim-version.patch delete mode 100644 SOURCES/0010-CVE-2023-38200.patch create mode 100644 SOURCES/0010-verifier-Gracefully-shutdown-on-signal.patch delete mode 100644 SOURCES/0011-Automatically-update-agent-API-version.patch create mode 100644 SOURCES/0011-revocations-Try-to-send-notifications-on-shutdown.patch delete mode 100644 SOURCES/0012-Restore-create-allowlist.patch create mode 100644 SOURCES/0012-requests_client-close-the-session-at-the-end-of-the-.patch delete mode 100644 SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch create mode 100644 SOURCES/0013-fix-malformed-certs-workaround.patch delete mode 100644 SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch create mode 100644 SOURCES/keylime.tmpfiles diff --git a/.gitignore b/.gitignore index 39b0b2b..aaf9c11 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/keylime-selinux-1.2.0.tar.gz -SOURCES/v7.3.0.tar.gz +SOURCES/keylime-selinux-42.1.2.tar.gz +SOURCES/v7.12.1.tar.gz diff --git a/.keylime.metadata b/.keylime.metadata index 89a2596..8274479 100644 --- a/.keylime.metadata +++ b/.keylime.metadata @@ -1,2 +1,2 @@ -9130beade415b8e3b02aac8d06678f2c45b939fe SOURCES/keylime-selinux-1.2.0.tar.gz -400e2b019060b8a6cc255dbfc14c582121acbee1 SOURCES/v7.3.0.tar.gz +36672155770ce6690e59d97764072f9629af716d SOURCES/keylime-selinux-42.1.2.tar.gz +3db2aa10ee0a005bf5d0a1214cd08e2604da0429 SOURCES/v7.12.1.tar.gz diff --git a/SOURCES/0001-Make-keylime-compatible-with-python-3.9.patch b/SOURCES/0001-Make-keylime-compatible-with-python-3.9.patch new file mode 100644 index 0000000..7239692 --- /dev/null +++ b/SOURCES/0001-Make-keylime-compatible-with-python-3.9.patch @@ -0,0 +1,628 @@ +From f7c32aec9c44a176124d982d942391ed3d50e846 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Tue, 3 Jun 2025 21:23:09 +0100 +Subject: [PATCH 1/6] Make keylime compatible with python 3.9 + +Signed-off-by: Sergio Correia +--- + keylime/ima/types.py | 33 ++++---- + keylime/models/base/basic_model.py | 4 +- + keylime/models/base/basic_model_meta.py | 4 +- + keylime/models/base/field.py | 4 +- + keylime/models/base/persistable_model.py | 4 +- + keylime/models/base/type.py | 4 +- + keylime/models/base/types/base64_bytes.py | 4 +- + keylime/models/base/types/certificate.py | 92 +++++++++++---------- + keylime/models/base/types/dictionary.py | 4 +- + keylime/models/base/types/one_of.py | 6 +- + keylime/models/registrar/registrar_agent.py | 31 +++---- + keylime/policy/create_runtime_policy.py | 2 +- + keylime/registrar_client.py | 8 +- + keylime/web/base/action_handler.py | 7 +- + keylime/web/base/controller.py | 78 ++++++++--------- + tox.ini | 10 +++ + 16 files changed, 154 insertions(+), 141 deletions(-) + +diff --git a/keylime/ima/types.py b/keylime/ima/types.py +index 99f0aa7..a0fffdf 100644 +--- a/keylime/ima/types.py ++++ b/keylime/ima/types.py +@@ -6,11 +6,6 @@ if sys.version_info >= (3, 8): + else: + from typing_extensions import Literal, TypedDict + +-if sys.version_info >= (3, 11): +- from typing import NotRequired, Required +-else: +- from typing_extensions import NotRequired, Required +- + ### Types for tpm_dm.py + + RuleAttributeType = Optional[Union[int, str, bool]] +@@ -51,7 +46,7 @@ class Rule(TypedDict): + + + class Policies(TypedDict): +- version: Required[int] ++ version: int + match_on: MatchKeyType + rules: Dict[str, Rule] + +@@ -60,27 +55,27 @@ class Policies(TypedDict): + + + class RPMetaType(TypedDict): +- version: Required[int] +- generator: NotRequired[int] +- timestamp: NotRequired[str] ++ version: int ++ generator: int ++ timestamp: str + + + class RPImaType(TypedDict): +- ignored_keyrings: Required[List[str]] +- log_hash_alg: Required[Literal["sha1", "sha256", "sha384", "sha512"]] ++ ignored_keyrings: List[str] ++ log_hash_alg: Literal["sha1", "sha256", "sha384", "sha512"] + dm_policy: Optional[Policies] + + + RuntimePolicyType = TypedDict( + "RuntimePolicyType", + { +- "meta": Required[RPMetaType], +- "release": NotRequired[int], +- "digests": Required[Dict[str, List[str]]], +- "excludes": Required[List[str]], +- "keyrings": Required[Dict[str, List[str]]], +- "ima": Required[RPImaType], +- "ima-buf": Required[Dict[str, List[str]]], +- "verification-keys": Required[str], ++ "meta": RPMetaType, ++ "release": int, ++ "digests": Dict[str, List[str]], ++ "excludes": List[str], ++ "keyrings": Dict[str, List[str]], ++ "ima": RPImaType, ++ "ima-buf": Dict[str, List[str]], ++ "verification-keys": str, + }, + ) +diff --git a/keylime/models/base/basic_model.py b/keylime/models/base/basic_model.py +index 68a126e..6f5de83 100644 +--- a/keylime/models/base/basic_model.py ++++ b/keylime/models/base/basic_model.py +@@ -407,7 +407,9 @@ class BasicModel(ABC, metaclass=BasicModelMeta): + if max and length > max: + self._add_error(field, msg or f"should be at most {length} {element_type}(s)") + +- def validate_number(self, field: str, *expressions: tuple[str, int | float], msg: Optional[str] = None) -> None: ++ def validate_number( ++ self, field: str, *expressions: tuple[str, Union[int, float]], msg: Optional[str] = None ++ ) -> None: + value = self.values.get(field) + + if not value: +diff --git a/keylime/models/base/basic_model_meta.py b/keylime/models/base/basic_model_meta.py +index 353e004..84617d4 100644 +--- a/keylime/models/base/basic_model_meta.py ++++ b/keylime/models/base/basic_model_meta.py +@@ -1,6 +1,6 @@ + from abc import ABCMeta + from types import MappingProxyType +-from typing import Any, Callable, Mapping, TypeAlias, Union ++from typing import Any, Callable, Mapping, Union + + from sqlalchemy.types import TypeEngine + +@@ -40,7 +40,7 @@ class BasicModelMeta(ABCMeta): + + # pylint: disable=bad-staticmethod-argument, no-value-for-parameter, using-constant-test + +- DeclaredFieldType: TypeAlias = Union[ModelType, TypeEngine, type[ModelType], type[TypeEngine]] ++ DeclaredFieldType = Union[ModelType, TypeEngine, type[ModelType], type[TypeEngine]] + + @classmethod + def _is_model_class(mcs, cls: type) -> bool: # type: ignore[reportSelfClassParameterName] +diff --git a/keylime/models/base/field.py b/keylime/models/base/field.py +index 7fb3dcb..d1e3bc3 100644 +--- a/keylime/models/base/field.py ++++ b/keylime/models/base/field.py +@@ -1,6 +1,6 @@ + import re + from inspect import isclass +-from typing import TYPE_CHECKING, Any, Optional, TypeAlias, Union ++from typing import TYPE_CHECKING, Any, Optional, Union + + from sqlalchemy.types import TypeEngine + +@@ -23,7 +23,7 @@ class ModelField: + [2] https://docs.python.org/3/library/functions.html#property + """ + +- DeclaredFieldType: TypeAlias = Union[ModelType, TypeEngine, type[ModelType], type[TypeEngine]] ++ DeclaredFieldType = Union[ModelType, TypeEngine, type[ModelType], type[TypeEngine]] + + FIELD_NAME_REGEX = re.compile(r"^[A-Za-z_]+[A-Za-z0-9_]*$") + +diff --git a/keylime/models/base/persistable_model.py b/keylime/models/base/persistable_model.py +index 18f7d0d..015d661 100644 +--- a/keylime/models/base/persistable_model.py ++++ b/keylime/models/base/persistable_model.py +@@ -1,4 +1,4 @@ +-from typing import Any, Mapping, Optional, Sequence ++from typing import Any, Mapping, Optional, Sequence, Union + + from keylime.models.base.basic_model import BasicModel + from keylime.models.base.db import db_manager +@@ -165,7 +165,7 @@ class PersistableModel(BasicModel, metaclass=PersistableModelMeta): + else: + return None + +- def __init__(self, data: Optional[dict | object] = None, process_associations: bool = True) -> None: ++ def __init__(self, data: Optional[Union[dict, object]] = None, process_associations: bool = True) -> None: + if isinstance(data, type(self).db_mapping): + super().__init__({}, process_associations) + self._init_from_mapping(data, process_associations) +diff --git a/keylime/models/base/type.py b/keylime/models/base/type.py +index 2520f72..e4d924c 100644 +--- a/keylime/models/base/type.py ++++ b/keylime/models/base/type.py +@@ -1,7 +1,7 @@ + from decimal import Decimal + from inspect import isclass + from numbers import Real +-from typing import Any, TypeAlias, Union ++from typing import Any, Union + + from sqlalchemy.engine.interfaces import Dialect + from sqlalchemy.types import TypeEngine +@@ -99,7 +99,7 @@ class ModelType: + you should instead set ``_type_engine`` to ``None`` and override the ``get_db_type`` method. + """ + +- DeclaredTypeEngine: TypeAlias = Union[TypeEngine, type[TypeEngine]] ++ DeclaredTypeEngine = Union[TypeEngine, type[TypeEngine]] + + def __init__(self, type_engine: DeclaredTypeEngine) -> None: + if isclass(type_engine) and issubclass(type_engine, TypeEngine): +diff --git a/keylime/models/base/types/base64_bytes.py b/keylime/models/base/types/base64_bytes.py +index b9b4b13..a1eeced 100644 +--- a/keylime/models/base/types/base64_bytes.py ++++ b/keylime/models/base/types/base64_bytes.py +@@ -1,6 +1,6 @@ + import base64 + import binascii +-from typing import Optional, TypeAlias, Union ++from typing import Optional, Union + + from sqlalchemy.types import Text + +@@ -62,7 +62,7 @@ class Base64Bytes(ModelType): + b64_str = Base64Bytes().cast("MIIE...") + """ + +- IncomingValue: TypeAlias = Union[bytes, str, None] ++ IncomingValue = Union[bytes, str, None] + + def __init__(self) -> None: + super().__init__(Text) +diff --git a/keylime/models/base/types/certificate.py b/keylime/models/base/types/certificate.py +index 2c27603..0f03169 100644 +--- a/keylime/models/base/types/certificate.py ++++ b/keylime/models/base/types/certificate.py +@@ -1,7 +1,7 @@ + import base64 + import binascii + import io +-from typing import Optional, TypeAlias, Union ++from typing import Optional, Union + + import cryptography.x509 + from cryptography.hazmat.primitives.serialization import Encoding +@@ -78,7 +78,7 @@ class Certificate(ModelType): + cert = Certificate().cast("-----BEGIN CERTIFICATE-----\nMIIE...") + """ + +- IncomingValue: TypeAlias = Union[cryptography.x509.Certificate, bytes, str, None] ++ IncomingValue = Union[cryptography.x509.Certificate, bytes, str, None] + + def __init__(self) -> None: + super().__init__(Text) +@@ -195,18 +195,19 @@ class Certificate(ModelType): + """ + + try: +- match self.infer_encoding(value): +- case "decoded": +- return None +- case "der": +- cryptography.x509.load_der_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] +- case "pem": +- cryptography.x509.load_pem_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] +- case "base64": +- der_value = base64.b64decode(value, validate=True) # type: ignore[reportArgumentType, arg-type] +- cryptography.x509.load_der_x509_certificate(der_value) +- case _: +- raise Exception ++ encoding_inf = self.infer_encoding(value) ++ if encoding_inf == "decoded": ++ return None ++ ++ if encoding_inf == "der": ++ cryptography.x509.load_der_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] ++ elif encoding_inf == "pem": ++ cryptography.x509.load_pem_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] ++ elif encoding_inf == "base64": ++ der_value = base64.b64decode(value, validate=True) # type: ignore[reportArgumentType, arg-type] ++ cryptography.x509.load_der_x509_certificate(der_value) ++ else: ++ raise Exception + except Exception: + return False + +@@ -227,37 +228,38 @@ class Certificate(ModelType): + if not value: + return None + +- match self.infer_encoding(value): +- case "decoded": +- return value # type: ignore[reportReturnType, return-value] +- case "der": +- try: +- return self._load_der_cert(value) # type: ignore[reportArgumentType, arg-type] +- except PyAsn1Error as err: +- raise ValueError( +- f"value cast to certificate appears DER encoded but cannot be deserialized as such: {value!r}" +- ) from err +- case "pem": +- try: +- return self._load_pem_cert(value) # type: ignore[reportArgumentType, arg-type] +- except PyAsn1Error as err: +- raise ValueError( +- f"value cast to certificate appears PEM encoded but cannot be deserialized as such: " +- f"'{str(value)}'" +- ) from err +- case "base64": +- try: +- return self._load_der_cert(base64.b64decode(value, validate=True)) # type: ignore[reportArgumentType, arg-type] +- except (binascii.Error, PyAsn1Error) as err: +- raise ValueError( +- f"value cast to certificate appears Base64 encoded but cannot be deserialized as such: " +- f"'{str(value)}'" +- ) from err +- case _: +- raise TypeError( +- f"value cast to certificate is of type '{value.__class__.__name__}' but should be one of 'str', " +- f"'bytes' or 'cryptography.x509.Certificate': '{str(value)}'" +- ) ++ encoding_inf = self.infer_encoding(value) ++ if encoding_inf == "decoded": ++ return value # type: ignore[reportReturnType, return-value] ++ ++ if encoding_inf == "der": ++ try: ++ return self._load_der_cert(value) # type: ignore[reportArgumentType, arg-type] ++ except PyAsn1Error as err: ++ raise ValueError( ++ f"value cast to certificate appears DER encoded but cannot be deserialized as such: {value!r}" ++ ) from err ++ elif encoding_inf == "pem": ++ try: ++ return self._load_pem_cert(value) # type: ignore[reportArgumentType, arg-type] ++ except PyAsn1Error as err: ++ raise ValueError( ++ f"value cast to certificate appears PEM encoded but cannot be deserialized as such: " ++ f"'{str(value)}'" ++ ) from err ++ elif encoding_inf == "base64": ++ try: ++ return self._load_der_cert(base64.b64decode(value, validate=True)) # type: ignore[reportArgumentType, arg-type] ++ except (binascii.Error, PyAsn1Error) as err: ++ raise ValueError( ++ f"value cast to certificate appears Base64 encoded but cannot be deserialized as such: " ++ f"'{str(value)}'" ++ ) from err ++ else: ++ raise TypeError( ++ f"value cast to certificate is of type '{value.__class__.__name__}' but should be one of 'str', " ++ f"'bytes' or 'cryptography.x509.Certificate': '{str(value)}'" ++ ) + + def generate_error_msg(self, _value: IncomingValue) -> str: + return "must be a valid X.509 certificate in PEM format or otherwise encoded using Base64" +diff --git a/keylime/models/base/types/dictionary.py b/keylime/models/base/types/dictionary.py +index 7d9e811..d9ffec3 100644 +--- a/keylime/models/base/types/dictionary.py ++++ b/keylime/models/base/types/dictionary.py +@@ -1,5 +1,5 @@ + import json +-from typing import Optional, TypeAlias, Union ++from typing import Optional, Union + + from sqlalchemy.types import Text + +@@ -50,7 +50,7 @@ class Dictionary(ModelType): + kv_pairs = Dictionary().cast('{"key": "value"}') + """ + +- IncomingValue: TypeAlias = Union[dict, str, None] ++ IncomingValue = Union[dict, str, None] + + def __init__(self) -> None: + super().__init__(Text) +diff --git a/keylime/models/base/types/one_of.py b/keylime/models/base/types/one_of.py +index 479d417..faf097d 100644 +--- a/keylime/models/base/types/one_of.py ++++ b/keylime/models/base/types/one_of.py +@@ -1,6 +1,6 @@ + from collections import Counter + from inspect import isclass +-from typing import Any, Optional, TypeAlias, Union ++from typing import Any, Optional, Union + + from sqlalchemy.engine.interfaces import Dialect + from sqlalchemy.types import Float, Integer, String, TypeEngine +@@ -65,8 +65,8 @@ class OneOf(ModelType): + incoming PEM value would not be cast to a certificate object and remain a string. + """ + +- Declaration: TypeAlias = Union[str, int, float, ModelType, TypeEngine, type[ModelType], type[TypeEngine]] +- PermittedList: TypeAlias = list[Union[str, int, float, ModelType]] ++ Declaration = Union[str, int, float, ModelType, TypeEngine, type[ModelType], type[TypeEngine]] ++ PermittedList = list[Union[str, int, float, ModelType]] + + def __init__(self, *args: Declaration) -> None: + # pylint: disable=super-init-not-called +diff --git a/keylime/models/registrar/registrar_agent.py b/keylime/models/registrar/registrar_agent.py +index 560c188..b232049 100644 +--- a/keylime/models/registrar/registrar_agent.py ++++ b/keylime/models/registrar/registrar_agent.py +@@ -153,21 +153,22 @@ class RegistrarAgent(PersistableModel): + names = ", ".join(non_compliant_certs) + names = " and".join(names.rsplit(",", 1)) + +- match config.get("registrar", "malformed_cert_action"): +- case "ignore": +- return +- case "reject": +- logger.error( +- "Certificate(s) %s may not conform to strict ASN.1 DER encoding rules and were rejected due to " +- "config ('malformed_cert_action = reject')", +- names, +- ) +- case _: +- logger.warning( +- "Certificate(s) %s may not conform to strict ASN.1 DER encoding rules and were re-encoded before " +- "parsing by python-cryptography", +- names, +- ) ++ cfg = config.get("registrar", "malformed_cert_action") ++ if cfg == "ignore": ++ return ++ ++ if cfg == "reject": ++ logger.error( ++ "Certificate(s) %s may not conform to strict ASN.1 DER encoding rules and were rejected due to " ++ "config ('malformed_cert_action = reject')", ++ names, ++ ) ++ else: ++ logger.warning( ++ "Certificate(s) %s may not conform to strict ASN.1 DER encoding rules and were re-encoded before " ++ "parsing by python-cryptography", ++ names, ++ ) + + def _bind_ak_to_iak(self, iak_attest, iak_sign): + # The ak-iak binding should only be verified when either aik_tpm or iak_tpm is changed +diff --git a/keylime/policy/create_runtime_policy.py b/keylime/policy/create_runtime_policy.py +index 6a412c4..8e1c687 100644 +--- a/keylime/policy/create_runtime_policy.py ++++ b/keylime/policy/create_runtime_policy.py +@@ -972,7 +972,7 @@ def create_runtime_policy(args: argparse.Namespace) -> Optional[RuntimePolicyTyp + ) + abort = True + else: +- if a not in algorithms.Hash: ++ if a not in set(algorithms.Hash): + if a == SHA256_OR_SM3: + algo = a + else: +diff --git a/keylime/registrar_client.py b/keylime/registrar_client.py +index 705ff12..97fbc2a 100644 +--- a/keylime/registrar_client.py ++++ b/keylime/registrar_client.py +@@ -13,12 +13,6 @@ if sys.version_info >= (3, 8): + else: + from typing_extensions import TypedDict + +-if sys.version_info >= (3, 11): +- from typing import NotRequired +-else: +- from typing_extensions import NotRequired +- +- + class RegistrarData(TypedDict): + ip: Optional[str] + port: Optional[str] +@@ -27,7 +21,7 @@ class RegistrarData(TypedDict): + aik_tpm: str + ek_tpm: str + ekcert: Optional[str] +- provider_keys: NotRequired[Dict[str, str]] ++ provider_keys: Dict[str, str] + + + logger = keylime_logging.init_logging("registrar_client") +diff --git a/keylime/web/base/action_handler.py b/keylime/web/base/action_handler.py +index b20de89..e7b5888 100644 +--- a/keylime/web/base/action_handler.py ++++ b/keylime/web/base/action_handler.py +@@ -1,4 +1,5 @@ + import re ++import sys + import time + import traceback + from inspect import iscoroutinefunction +@@ -48,7 +49,11 @@ class ActionHandler(RequestHandler): + + # Take the list of strings returned by format_exception, where each string ends in a newline and may contain + # internal newlines, and split the concatenation of all the strings by newline +- message = "".join(traceback.format_exception(err)) ++ if sys.version_info < (3, 10): ++ message = "".join(traceback.format_exception(err, None, None)) ++ else: ++ message = "".join(traceback.format_exception(err)) ++ + lines = message.split("\n") + + for line in lines: +diff --git a/keylime/web/base/controller.py b/keylime/web/base/controller.py +index f1ac3c5..153535e 100644 +--- a/keylime/web/base/controller.py ++++ b/keylime/web/base/controller.py +@@ -2,7 +2,7 @@ import http.client + import json + import re + from types import MappingProxyType +-from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, TypeAlias, Union ++from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union + + from tornado.escape import parse_qs_bytes + from tornado.httputil import parse_body_arguments +@@ -15,14 +15,16 @@ if TYPE_CHECKING: + from keylime.models.base.basic_model import BasicModel + from keylime.web.base.action_handler import ActionHandler + +-PathParams: TypeAlias = Mapping[str, str] +-QueryParams: TypeAlias = Mapping[str, str | Sequence[str]] +-MultipartParams: TypeAlias = Mapping[str, Union[str, bytes, Sequence[str | bytes]]] +-FormParams: TypeAlias = Union[QueryParams, MultipartParams] +-JSONConvertible: TypeAlias = Union[str, int, float, bool, None, "JSONObjectConvertible", "JSONArrayConvertible"] +-JSONObjectConvertible: TypeAlias = Mapping[str, JSONConvertible] +-JSONArrayConvertible: TypeAlias = Sequence[JSONConvertible] # pyright: ignore[reportInvalidTypeForm] +-Params: TypeAlias = Mapping[str, Union[str, bytes, Sequence[str | bytes], JSONObjectConvertible, JSONArrayConvertible]] ++PathParams = Mapping[str, str] ++QueryParams = Mapping[str, Union[str, Sequence[str]]] ++MultipartParams = Mapping[str, Union[str, bytes, Union[Sequence[str], Sequence[bytes]]]] ++FormParams = Union[QueryParams, MultipartParams] ++JSONConvertible = Union[str, int, float, bool, None, "JSONObjectConvertible", "JSONArrayConvertible"] ++JSONObjectConvertible = Mapping[str, JSONConvertible] ++JSONArrayConvertible = Sequence[JSONConvertible] # pyright: ignore[reportInvalidTypeForm] ++Params = Mapping[ ++ str, Union[str, bytes, Union[Sequence[str], Sequence[bytes]], JSONObjectConvertible, JSONArrayConvertible] ++] + + + class Controller: +@@ -77,7 +79,7 @@ class Controller: + VERSION_REGEX = re.compile("^\\/v(\\d+)(?:\\.(\\d+))*") + + @staticmethod +- def decode_url_query(query: str | bytes) -> QueryParams: ++ def decode_url_query(query: Union[str, bytes]) -> QueryParams: + """Parses a binary query string (whether from a URL or HTTP body) into a dict of Unicode strings. If multiple + instances of the same key are present in the string, their values are collected into a list. + +@@ -135,8 +137,8 @@ class Controller: + + @staticmethod + def prepare_http_body( +- body: Union[str, JSONObjectConvertible | JSONArrayConvertible, Any], content_type: Optional[str] = None +- ) -> tuple[Optional[bytes | Any], Optional[str]]: ++ body: Union[str, Union[JSONObjectConvertible, JSONArrayConvertible], Any], content_type: Optional[str] = None ++ ) -> tuple[Optional[Union[bytes, Any]], Optional[str]]: + """Prepares an object to be included in the body of an HTTP request or response and infers the appropriate + media type unless provided. ``body`` will be serialised into JSON if it contains a ``dict`` or ``list`` which is + serialisable unless a ``content_type`` other than ``"application/json"`` is provided. +@@ -155,32 +157,34 @@ class Controller: + if content_type: + content_type = content_type.lower().strip() + +- body_out: Optional[bytes | Any] +- content_type_out: Optional[str] +- +- match (body, content_type): +- case (None, _): +- body_out = None +- content_type_out = content_type +- case ("", _): +- body_out = b"" +- content_type_out = "text/plain; charset=utf-8" +- case (_, "text/plain"): ++ body_out: Optional[bytes | Any] = None ++ content_type_out: Optional[str] = None ++ ++ if body is None: ++ body_out = None ++ content_type_out = content_type ++ elif body == "": ++ body_out = b"" ++ content_type_out = "text/plain; charset=utf-8" ++ else: ++ if content_type == "text/plain": + body_out = str(body).encode("utf-8") + content_type_out = "text/plain; charset=utf-8" +- case (_, "application/json") if isinstance(body, str): +- body_out = body.encode("utf-8") +- content_type_out = "application/json" +- case (_, "application/json"): +- body_out = json.dumps(body, allow_nan=False, indent=4).encode("utf-8") +- content_type_out = "application/json" +- case (_, None) if isinstance(body, str): +- body_out = body.encode("utf-8") +- content_type_out = "text/plain; charset=utf-8" +- case (_, None) if isinstance(body, (dict, list)): +- body_out = json.dumps(body, allow_nan=False, indent=4).encode("utf-8") +- content_type_out = "application/json" +- case (_, _): ++ elif content_type == "application/json": ++ if isinstance(body, str): ++ body_out = body.encode("utf-8") ++ content_type_out = "application/json" ++ else: ++ body_out = json.dumps(body, allow_nan=False, indent=4).encode("utf-8") ++ content_type_out = "application/json" ++ elif content_type is None: ++ if isinstance(body, str): ++ body_out = body.encode("utf-8") ++ content_type_out = "text/plain; charset=utf-8" ++ elif isinstance(body, (dict, list)): ++ body_out = json.dumps(body, allow_nan=False, indent=4).encode("utf-8") ++ content_type_out = "application/json" ++ else: + body_out = body + content_type_out = content_type + +@@ -248,7 +252,7 @@ class Controller: + self, + code: int = 200, + status: Optional[str] = None, +- data: Optional[JSONObjectConvertible | JSONArrayConvertible] = None, ++ data: Optional[Union[JSONObjectConvertible, JSONArrayConvertible]] = None, + ) -> None: + """Converts a Python data structure to JSON and wraps it in the following boilerplate JSON object which is + returned by all v2 endpoints: +diff --git a/tox.ini b/tox.ini +index 031ac54..ce3974c 100644 +--- a/tox.ini ++++ b/tox.ini +@@ -51,3 +51,13 @@ commands = black --diff ./keylime ./test + deps = + isort + commands = isort --diff --check ./keylime ./test ++ ++ ++[testenv:pylint39] ++basepython = python3.9 ++deps = ++ -r{toxinidir}/requirements.txt ++ -r{toxinidir}/test-requirements.txt ++ pylint ++commands = bash scripts/check_codestyle.sh ++allowlist_externals = bash +-- +2.47.1 + diff --git a/SOURCES/0001-Remove-usage-of-Required-NotRequired-typing_ext.patch b/SOURCES/0001-Remove-usage-of-Required-NotRequired-typing_ext.patch deleted file mode 100644 index 5782252..0000000 --- a/SOURCES/0001-Remove-usage-of-Required-NotRequired-typing_ext.patch +++ /dev/null @@ -1,104 +0,0 @@ -Subject: [PATCH] Remove usage of Required/NotRequired typing_ext - -Since we do not yet have typing_extensions packaged, let us not -use its functionality yet. ---- - keylime/ima/types.py | 33 ++++++++++++++------------------- - keylime/registrar_client.py | 8 +------- - 2 files changed, 15 insertions(+), 26 deletions(-) - -diff --git a/keylime/ima/types.py b/keylime/ima/types.py -index 99f0aa7..a0fffdf 100644 ---- a/keylime/ima/types.py -+++ b/keylime/ima/types.py -@@ -6,11 +6,6 @@ if sys.version_info >= (3, 8): - else: - from typing_extensions import Literal, TypedDict - --if sys.version_info >= (3, 11): -- from typing import NotRequired, Required --else: -- from typing_extensions import NotRequired, Required -- - ### Types for tpm_dm.py - - RuleAttributeType = Optional[Union[int, str, bool]] -@@ -51,7 +46,7 @@ class Rule(TypedDict): - - - class Policies(TypedDict): -- version: Required[int] -+ version: int - match_on: MatchKeyType - rules: Dict[str, Rule] - -@@ -60,27 +55,27 @@ class Policies(TypedDict): - - - class RPMetaType(TypedDict): -- version: Required[int] -- generator: NotRequired[int] -- timestamp: NotRequired[str] -+ version: int -+ generator: int -+ timestamp: str - - - class RPImaType(TypedDict): -- ignored_keyrings: Required[List[str]] -- log_hash_alg: Required[Literal["sha1", "sha256", "sha384", "sha512"]] -+ ignored_keyrings: List[str] -+ log_hash_alg: Literal["sha1", "sha256", "sha384", "sha512"] - dm_policy: Optional[Policies] - - - RuntimePolicyType = TypedDict( - "RuntimePolicyType", - { -- "meta": Required[RPMetaType], -- "release": NotRequired[int], -- "digests": Required[Dict[str, List[str]]], -- "excludes": Required[List[str]], -- "keyrings": Required[Dict[str, List[str]]], -- "ima": Required[RPImaType], -- "ima-buf": Required[Dict[str, List[str]]], -- "verification-keys": Required[str], -+ "meta": RPMetaType, -+ "release": int, -+ "digests": Dict[str, List[str]], -+ "excludes": List[str], -+ "keyrings": Dict[str, List[str]], -+ "ima": RPImaType, -+ "ima-buf": Dict[str, List[str]], -+ "verification-keys": str, - }, - ) -diff --git a/keylime/registrar_client.py b/keylime/registrar_client.py -index ab28977..ea5341b 100644 ---- a/keylime/registrar_client.py -+++ b/keylime/registrar_client.py -@@ -13,12 +13,6 @@ if sys.version_info >= (3, 8): - else: - from typing_extensions import TypedDict - --if sys.version_info >= (3, 11): -- from typing import NotRequired --else: -- from typing_extensions import NotRequired -- -- - class RegistrarData(TypedDict): - ip: Optional[str] - port: Optional[str] -@@ -27,7 +21,7 @@ class RegistrarData(TypedDict): - aik_tpm: str - ek_tpm: str - ekcert: Optional[str] -- provider_keys: NotRequired[Dict[str, str]] -+ provider_keys: Dict[str, str] - - - logger = keylime_logging.init_logging("registrar_client") --- -2.41.0 - diff --git a/SOURCES/0002-Allow-keylime_server_t-tcp-connect-to-several-domain.patch b/SOURCES/0002-Allow-keylime_server_t-tcp-connect-to-several-domain.patch deleted file mode 100644 index c54b161..0000000 --- a/SOURCES/0002-Allow-keylime_server_t-tcp-connect-to-several-domain.patch +++ /dev/null @@ -1,27 +0,0 @@ -From e8a1fa55ff0892ee2380e832ac94abc629b401d6 Mon Sep 17 00:00:00 2001 -From: Patrik Koncity -Date: Thu, 10 Aug 2023 07:47:04 -0400 -Subject: [PATCH 2/2] Allow keylime_server_t tcp connect to several domains - ---- - keylime-selinux-1.2.0/keylime.te | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/keylime-selinux-1.2.0/keylime.te b/keylime-selinux-1.2.0/keylime.te -index 8d47d26..8e6487b 100644 ---- a/keylime-selinux-1.2.0/keylime.te -+++ b/keylime-selinux-1.2.0/keylime.te -@@ -83,6 +83,10 @@ allow keylime_server_t self:udp_socket create_stream_socket_perms; - manage_dirs_pattern(keylime_server_t, keylime_log_t, keylime_log_t) - manage_files_pattern(keylime_server_t, keylime_log_t, keylime_log_t) - -+corenet_tcp_connect_http_cache_port(keylime_server_t) -+corenet_tcp_connect_mysqld_port(keylime_server_t) -+corenet_tcp_connect_postgresql_port(keylime_server_t) -+ - fs_getattr_all_fs(keylime_server_t) - fs_rw_inherited_tmpfs_files(keylime_server_t) - --- -2.39.3 - diff --git a/SOURCES/0002-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch b/SOURCES/0002-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch new file mode 100644 index 0000000..5735f6c --- /dev/null +++ b/SOURCES/0002-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch @@ -0,0 +1,58 @@ +From 5c5c7f7f7180111485b24061af4c0395476958b5 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Thu, 22 May 2025 11:25:15 -0400 +Subject: [PATCH 2/6] tests: fix rpm repo tests from create-runtime-policy + +Signed-off-by: Sergio Correia +--- + .../create-runtime-policy/setup-rpm-tests | 28 +++++++++++++------ + 1 file changed, 20 insertions(+), 8 deletions(-) + +diff --git a/test/data/create-runtime-policy/setup-rpm-tests b/test/data/create-runtime-policy/setup-rpm-tests +index 708438c..b62729b 100755 +--- a/test/data/create-runtime-policy/setup-rpm-tests ++++ b/test/data/create-runtime-policy/setup-rpm-tests +@@ -217,20 +217,32 @@ create_rpm() { + # https://github.com/rpm-software-management/rpm/commit/96467dce18f264b278e17ffe1859c88d9b5aa4b6 + _pkgname="DUMMY-${_name}-${_version}-${_rel}.noarch.rpm" + +- _expected_pkg="${RPMSDIR}/noarch/${_pkgname}" +- [ -e "${_expected_pkg}" ] && return 0 ++ # For some reason, it may not store the built package within the ++ # noarch directory, but directly in RPMS, so let's check both ++ # locations. ++ _expected_pkg="${RPMSDIR}/noarch/${_pkgname} ${RPMSDIR}/${_pkgname}" ++ for _expected in ${_expected_pkg}; do ++ if [ -e "${_expected}" ]; then ++ echo "(create_rpm) CREATED RPM: ${_expected}" >&2 ++ return 0 ++ fi ++ done + + # OK, the package was not built where it should. Let us see if + # it was built in ~/rpmbuild instead, and if that is the case, + # copy it to the expected location. +- _bad_location_pkg="${HOME}/rpmbuild/RPMS/noarch/${_pkgname}" +- if [ -e "${_bad_location_pkg}" ]; then +- echo "WARNING: the package ${_pkgname} was built into ~/rpmbuild despite rpmbuild being instructed to build it at a different location. Probably a fallout from https://github.com/rpm-software-management/rpm/commit/96467dce" >&2 +- install -D -m644 "${_bad_location_pkg}" "${_expected_pkg}" +- return 0 +- fi ++ _bad_location_pkg="${HOME}/rpmbuild/RPMS/noarch/${_pkgname} ${HOME}/rpmbuild/RPMS/${_pkgname}" ++ for _bad_l in ${_bad_location_pkg}; do ++ if [ -e "${_bad_l}" ]; then ++ echo "WARNING: the package ${_pkgname} was built into ~/rpmbuild despite rpmbuild being instructed to build it at a different location. Probably a fallout from https://github.com/rpm-software-management/rpm/commit/96467dce" >&2 ++ install -D -m644 "${_bad_l}" "${RPMSDIR}/noarch/${_pkgname}" ++ echo "(create_rpm) CREATED RPM: ${RPMSDIR}/noarch/${_pkgname}" >&2 ++ return 0 ++ fi ++ done + + # Should not be here. ++ echo "create_rpm() ended with error; probably an issue with the location where the RPMs were built" >&2 + return 1 + } + +-- +2.47.1 + diff --git a/SOURCES/0003-Use-version-2.0-as-the-minimum-for-the-configuration.patch b/SOURCES/0003-Use-version-2.0-as-the-minimum-for-the-configuration.patch deleted file mode 100644 index 4ecd144..0000000 --- a/SOURCES/0003-Use-version-2.0-as-the-minimum-for-the-configuration.patch +++ /dev/null @@ -1,51 +0,0 @@ -From b8e26ca5e98e1b842db2fc21411962d40f27c557 Mon Sep 17 00:00:00 2001 -From: rpm-build -Date: Tue, 15 Aug 2023 07:19:28 -0400 -Subject: [PATCH 3/4] Use version 2.0 as the minimum for the configuration - ---- - keylime/cmd/convert_config.py | 16 +++++++++++----- - 1 file changed, 11 insertions(+), 5 deletions(-) - -diff --git a/keylime/cmd/convert_config.py b/keylime/cmd/convert_config.py -index ac28151..1d71b99 100755 ---- a/keylime/cmd/convert_config.py -+++ b/keylime/cmd/convert_config.py -@@ -191,7 +191,13 @@ def output(components: List[str], config: RawConfigParser, templates: str, outdi - - # Check that there are templates for all components - for component in components: -- version = config[component]["version"].strip('" ') -+ # Minimum version. -+ version = '2.0' -+ if "version" in config[component]: -+ version = config[component]["version"].strip('" ') -+ else: -+ config[component]["version"] = version -+ - version_dir = os.path.join(templates, version) - if not os.path.isdir(version_dir): - raise Exception(f"Could not find directory {version_dir}") -@@ -292,15 +298,15 @@ def process_mapping( - raise Exception("Invalid version number found in old configuration") - - except (configparser.NoOptionError, configparser.NoSectionError): -- print(f"No version found in old configuration for {component}, using '1.0'") -- old_version = (1, 0) -+ print(f"No version found in old configuration for {component}, using '2.0'") -+ old_version = (2, 0) - else: - # If the old_version does not contain the component from the - # mapping, use the minimum version to use defaults -- old_version = (1, 0) -+ old_version = (2, 0) - - # Skip versions lower than the current version -- if old_version >= new_version: -+ if old_version >= new_version and component in old_config: - new[component] = old_config[component] - continue - --- -2.39.3 - diff --git a/SOURCES/0003-tests-skip-measured-boot-related-tests-for-s390x-and.patch b/SOURCES/0003-tests-skip-measured-boot-related-tests-for-s390x-and.patch new file mode 100644 index 0000000..8cf9b37 --- /dev/null +++ b/SOURCES/0003-tests-skip-measured-boot-related-tests-for-s390x-and.patch @@ -0,0 +1,52 @@ +From 4e7cd6b75de27897ecc8e7329732cd945f7adfd0 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Thu, 22 May 2025 18:27:04 +0100 +Subject: [PATCH 3/6] tests: skip measured-boot related tests for s390x and + ppc64le + +Signed-off-by: Sergio Correia +--- + test/test_create_mb_policy.py | 2 ++ + test/test_mba_parsing.py | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py +index eaed0e3..b00d8e7 100644 +--- a/test/test_create_mb_policy.py ++++ b/test/test_create_mb_policy.py +@@ -5,6 +5,7 @@ Copyright 2024 Red Hat, Inc. + + import argparse + import os ++import platform + import unittest + + from keylime.policy import create_mb_policy +@@ -12,6 +13,7 @@ from keylime.policy import create_mb_policy + DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data", "create-mb-policy")) + + ++@unittest.skipIf(platform.machine() in ["ppc64le", "s390x"], "ppc64le and s390x are not supported") + class CreateMeasuredBootPolicy_Test(unittest.TestCase): + def test_event_to_sha256(self): + test_cases = [ +diff --git a/test/test_mba_parsing.py b/test/test_mba_parsing.py +index 670a602..e157116 100644 +--- a/test/test_mba_parsing.py ++++ b/test/test_mba_parsing.py +@@ -1,10 +1,12 @@ + import os ++import platform + import unittest + + from keylime.common.algorithms import Hash + from keylime.mba import mba + + ++@unittest.skipIf(platform.machine() in ["ppc64le", "s390x"], "ppc64le and s390x are not supported") + class TestMBAParsing(unittest.TestCase): + def test_parse_bootlog(self): + """Test parsing binary measured boot event log""" +-- +2.47.1 + diff --git a/SOURCES/0004-Duplicate-str_to_version-for-the-upgrade-tool.patch b/SOURCES/0004-Duplicate-str_to_version-for-the-upgrade-tool.patch deleted file mode 100644 index 66cb11b..0000000 --- a/SOURCES/0004-Duplicate-str_to_version-for-the-upgrade-tool.patch +++ /dev/null @@ -1,88 +0,0 @@ -From dbd521e8e8f0ffd9ace79c7b9b888f4cb89488f9 Mon Sep 17 00:00:00 2001 -From: rpm-build -Date: Tue, 15 Aug 2023 06:09:37 -0400 -Subject: [PATCH 4/4] Duplicate str_to_version for the upgrade tool - -So it does not depend on python-keylime ---- - keylime/cmd/convert_config.py | 24 ++++++++++++++++++++++-- - templates/2.0/adjust.py | 22 ++++++++++++++++++++-- - 2 files changed, 42 insertions(+), 4 deletions(-) - -diff --git a/keylime/cmd/convert_config.py b/keylime/cmd/convert_config.py -index c1c6180..cad5e31 100755 ---- a/keylime/cmd/convert_config.py -+++ b/keylime/cmd/convert_config.py -@@ -84,13 +84,33 @@ import importlib.util - import itertools - import json - import os -+import re - import shutil - from configparser import RawConfigParser --from typing import List, Optional, Tuple -+from typing import List, Optional, Tuple, Union - - from jinja2 import Template - --from keylime.common.version import str_to_version -+ -+def str_to_version(v_str: str) -> Union[Tuple[int, int], None]: -+ """ -+ Validates the string format and converts the provided string to a tuple of -+ ints which can be sorted and compared. -+ -+ :returns: Tuple with version number parts converted to int. In case of -+ invalid version string, returns None -+ """ -+ -+ # Strip to remove eventual quotes and spaces -+ v_str = v_str.strip('" ') -+ -+ m = re.match(r"^(\d+)\.(\d+)$", v_str) -+ -+ if not m: -+ return None -+ -+ return (int(m.group(1)), int(m.group(2))) -+ - - COMPONENTS = ["agent", "verifier", "tenant", "registrar", "ca", "logging"] - -diff --git a/templates/2.0/adjust.py b/templates/2.0/adjust.py -index 312b790..c1e582a 100644 ---- a/templates/2.0/adjust.py -+++ b/templates/2.0/adjust.py -@@ -2,9 +2,27 @@ import ast - import configparser - import re - from configparser import RawConfigParser --from typing import Dict, List, Optional, Tuple -+from typing import Dict, List, Optional, Tuple, Union - --from keylime.common.version import str_to_version -+ -+def str_to_version(v_str: str) -> Union[Tuple[int, int], None]: -+ """ -+ Validates the string format and converts the provided string to a tuple of -+ ints which can be sorted and compared. -+ -+ :returns: Tuple with version number parts converted to int. In case of -+ invalid version string, returns None -+ """ -+ -+ # Strip to remove eventual quotes and spaces -+ v_str = v_str.strip('" ') -+ -+ m = re.match(r"^(\d+)\.(\d+)$", v_str) -+ -+ if not m: -+ return None -+ -+ return (int(m.group(1)), int(m.group(2))) - - - def adjust(config: RawConfigParser, mapping: Dict) -> None: # pylint: disable=unused-argument --- -2.39.3 - diff --git a/SOURCES/0004-templates-duplicate-str_to_version-in-the-adjust-scr.patch b/SOURCES/0004-templates-duplicate-str_to_version-in-the-adjust-scr.patch new file mode 100644 index 0000000..3432ee9 --- /dev/null +++ b/SOURCES/0004-templates-duplicate-str_to_version-in-the-adjust-scr.patch @@ -0,0 +1,52 @@ +From 7ca86e1c0d68f45915d9f583ffaf149285905005 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Tue, 3 Jun 2025 10:50:48 +0100 +Subject: [PATCH 4/6] templates: duplicate str_to_version() in the adjust + script + +As a follow-up of upstream PR#1486, duplicate the str_to_version() +method in adjust.py so that we do not need the keylime modules in +order for the configuration upgrade script to run. + +Signed-off-by: Sergio Correia +--- + templates/2.0/adjust.py | 22 ++++++++++++++++++++-- + 1 file changed, 20 insertions(+), 2 deletions(-) + +diff --git a/templates/2.0/adjust.py b/templates/2.0/adjust.py +index 6008e4c..24ba898 100644 +--- a/templates/2.0/adjust.py ++++ b/templates/2.0/adjust.py +@@ -4,9 +4,27 @@ import logging + import re + from configparser import RawConfigParser + from logging import Logger +-from typing import Dict, List, Optional, Tuple ++from typing import Dict, Tuple, Union + +-from keylime.common.version import str_to_version ++ ++def str_to_version(v_str: str) -> Union[Tuple[int, int], None]: ++ """ ++ Validates the string format and converts the provided string to a tuple of ++ ints which can be sorted and compared. ++ ++ :returns: Tuple with version number parts converted to int. In case of ++ invalid version string, returns None ++ """ ++ ++ # Strip to remove eventual quotes and spaces ++ v_str = v_str.strip('" ') ++ ++ m = re.match(r"^(\d+)\.(\d+)$", v_str) ++ ++ if not m: ++ return None ++ ++ return (int(m.group(1)), int(m.group(2))) + + + def adjust( +-- +2.47.1 + diff --git a/SOURCES/0005-Restore-RHEL-9-version-of-create_allowlist.sh.patch b/SOURCES/0005-Restore-RHEL-9-version-of-create_allowlist.sh.patch new file mode 100644 index 0000000..bebd40f --- /dev/null +++ b/SOURCES/0005-Restore-RHEL-9-version-of-create_allowlist.sh.patch @@ -0,0 +1,404 @@ +From c60460eccab93863dbd1fd0b748e5a275c8e6737 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Tue, 3 Jun 2025 21:29:15 +0100 +Subject: [PATCH 5/6] Restore RHEL-9 version of create_allowlist.sh + +Signed-off-by: Sergio Correia +--- + scripts/create_runtime_policy.sh | 335 ++++++++++--------------------- + 1 file changed, 104 insertions(+), 231 deletions(-) + +diff --git a/scripts/create_runtime_policy.sh b/scripts/create_runtime_policy.sh +index 90ba50b..c0b641d 100755 +--- a/scripts/create_runtime_policy.sh ++++ b/scripts/create_runtime_policy.sh +@@ -1,282 +1,155 @@ +-#!/usr/bin/env bash ++#!/usr/bin/bash + ################################################################################ + # SPDX-License-Identifier: Apache-2.0 + # Copyright 2017 Massachusetts Institute of Technology. + ################################################################################ + +- +-if [ $0 != "-bash" ] ; then +- pushd `dirname "$0"` > /dev/null 2>&1 +-fi +-KCRP_BASE_DIR=$(pwd) +-if [ $0 != "-bash" ] ; then +- popd 2>&1 > /dev/null +-fi +-KCRP_BASE_DIR=$KCRP_BASE_DIR/.. +- +-function detect_hash { +- local hashstr=$1 +- +- case "${#hashstr}" in +- 32) hashalgo=md5sum ;; +- 40) hashalgo=sha1sum ;; +- 64) hashalgo=sha256sum ;; +- 128) hashalgo=sha512sum ;; +- *) hashalgo="na";; +- esac +- +- echo $hashalgo +-} +- +-function announce { +- # 1 - MESSAGE +- +- MESSAGE=$(echo "${1}" | tr '\n' ' ') +- MESSAGE=$(echo $MESSAGE | sed "s/\t\t*/ /g") +- +- echo "==> $(date) - ${0} - $MESSAGE" +-} +- +-function valid_algo { +- local algo=$1 +- +- [[ " ${ALGO_LIST[@]} " =~ " ${algo} " ]] +-} +- + # Configure the installer here + INITRAMFS_TOOLS_GIT=https://salsa.debian.org/kernel-team/initramfs-tools.git + INITRAMFS_TOOLS_VER="master" + +-# All defaults +-ALGO=sha1sum +-WORK_DIR=/tmp/kcrp +-OUTPUT_DIR=${WORK_DIR}/output +-ALLOWLIST_DIR=${WORK_DIR}/allowlist +-INITRAMFS_LOC="/boot/" +-INITRAMFS_STAGING_DIR=${WORK_DIR}/ima_ramfs/ +-INITRAMFS_TOOLS_DIR=${WORK_DIR}/initramfs-tools +-BOOT_AGGREGATE_LOC="/sys/kernel/security/ima/ascii_runtime_measurements" +-ROOTFS_LOC="/" +-EXCLUDE_LIST="none" +-SKIP_PATH="none" +-ALGO_LIST=("sha1sum" "sha256sum" "sha512sum") ++WORKING_DIR=$(readlink -f "$0") ++WORKING_DIR=$(dirname "$WORKING_DIR") + + # Grabs Debian's initramfs_tools from Git repo if no other options exist + if [[ ! `command -v unmkinitramfs` && ! -x "/usr/lib/dracut/skipcpio" ]] ; then + # Create temp dir for pulling in initramfs-tools +- announce "INFO: Downloading initramfs-tools: $INITRAMFS_TOOLS_DIR" ++ TMPDIR=`mktemp -d` || exit 1 ++ echo "INFO: Downloading initramfs-tools: $TMPDIR" + +- mkdir -p $INITRAMFS_TOOLS_DIR + # Clone initramfs-tools repo +- pushd $INITRAMFS_TOOLS_DIR > /dev/null 2>&1 +- git clone $INITRAMFS_TOOLS_GIT initramfs-tools > /dev/null 2>&1 +- pushd initramfs-tools > /dev/null 2>&1 +- git checkout $INITRAMFS_TOOLS_VER > /dev/null 2>&1 +- popd > /dev/null 2>&1 +- popd > /dev/null 2>&1 ++ pushd $TMPDIR ++ git clone $INITRAMFS_TOOLS_GIT initramfs-tools ++ pushd initramfs-tools ++ git checkout $INITRAMFS_TOOLS_VER ++ popd # $TMPDIR ++ popd + + shopt -s expand_aliases +- alias unmkinitramfs=$INITRAMFS_TOOLS_DIR/initramfs-tools/unmkinitramfs +- +- which unmkinitramfs > /dev/null 2>&1 || exit 1 ++ alias unmkinitramfs=$TMPDIR/initramfs-tools/unmkinitramfs + fi + ++ + if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" 1>&2 + exit 1 + fi + +-USAGE=$(cat <<-END +- Usage: $0 -o/--output_file FILENAME [-a/--algo ALGO] [-x/--ramdisk-location PATH] [-y/--boot_aggregate-location PATH] [-z/--rootfs-location PATH] [-e/--exclude_list FILENAME] [-s/--skip-path PATH] [-h/--help] ++if [ $# -lt 1 ] ++then ++ echo "No arguments provided" >&2 ++ echo "Usage: `basename $0` -o [filename] -h [hash-algo]" >&2 ++ exit $NOARGS; ++fi + +- optional arguments: +- -a/--algo (checksum algorithm to be used, default: $ALGO) +- -x/--ramdisk-location (path to initramdisk, default: $INITRAMFS_LOC, set to "none" to skip) +- -y/--boot_aggregate-location (path for IMA log, used for boot aggregate extraction, default: $BOOT_AGGREGATE_LOC, set to "none" to skip) +- -z/--rootfs-location (path to root filesystem, default: $ROOTFS_LOC, cannot be skipped) +- -e/--exclude_list (filename containing a list of paths to be excluded (i.e., verifier will not try to match checksums, default: $EXCLUDE_LIST) +- -s/--skip-path (comma-separated path list, files found there will not have checksums calculated, default: $SKIP_PATH) +- -h/--help (show this message and exit) +-END +-) ++ALGO=sha256sum + +-while [[ $# -gt 0 ]] +-do +- key="$1" ++ALGO_LIST=("sha1sum" "sha256sum" "sha512sum") ++ ++valid_algo() { ++ local algo=$1 ++ ++ [[ " ${ALGO_LIST[@]} " =~ " ${algo} " ]] ++} + +- case $key in +- -a|--algo) +- ALGO="$2" +- shift +- ;; +- -a=*|--algo=*) +- ALGO=$(echo $key | cut -d '=' -f 2) +- ;; +- -x|--ramdisk-location) +- INITRAMFS_LOC="$2" +- shift +- ;; +- -x=*|--ramdisk-location=*) +- INITRAMFS_LOC=$(echo $key | cut -d '=' -f 2) +- ;; +- -y|--boot_aggregate-location) +- BOOT_AGGREGATE_LOC=$2 +- shift +- ;; +- -y=*|--boot_aggregate-location=*) +- BOOT_AGGREGATE_LOC=$(echo $key | cut -d '=' -f 2) +- ;; +- -z|--rootfs-location) +- ROOTFS_LOC=$2 +- shift +- ;; +- -z=*|--rootfs-location=*) +- ROOTFS_LOC=$(echo $key | cut -d '=' -f 2) +- ;; +- -e|--exclude_list) +- EXCLUDE_LIST=$2 +- shift +- ;; +- -e=*|--exclude_list=*) +- EXCLUDE_LIST=$(echo $key | cut -d '=' -f 2) +- ;; +- -o=*|--output_file=*) +- OUTPUT=$(echo $key | cut -d '=' -f 2) +- ;; +- -o|--output_file) +- OUTPUT=$2 +- shift +- ;; +- -s=*|--skip-path=*) +- SKIP_PATH=$(echo $key | cut -d '=' -f 2) +- ;; +- -s|--skip-path) +- SKIP_PATH=$2 +- shift +- ;; +- -h|--help) +- printf "%s\n" "$USAGE" +- exit 0 +- shift +- ;; +- *) +- # unknown option +- ;; +- esac +- shift ++while getopts ":o:h:" opt; do ++ case $opt in ++ o) ++ OUTPUT=$(readlink -f $OPTARG) ++ rm -f $OUTPUT ++ ;; ++ h) ++ if valid_algo $OPTARG; then ++ ALGO=$OPTARG ++ else ++ echo "Invalid hash function argument: use sha1sum, sha256sum, or sha512sum" ++ exit 1 ++ fi ++ ;; ++ esac + done + +-if ! valid_algo $ALGO ++if [ ! "$OUTPUT" ] + then +- echo "Invalid hash function argument: pick from \"${ALGO_LIST[@]}\"" ++ echo "Missing argument for -o" >&2; ++ echo "Usage: $0 -o [filename] -h [hash-algo]" >&2; + exit 1 + fi + +-if [[ -z $OUTPUT ]] +-then +- printf "%s\n" "$USAGE" +- exit 1 ++ ++# Where to look for initramfs image ++INITRAMFS_LOC="/boot" ++if [ -d "/ostree" ]; then ++ # If we are on an ostree system change where we look for initramfs image ++ loc=$(grep -E "/ostree/[^/]([^/]*)" -o /proc/cmdline | head -n 1 | cut -d / -f 3) ++ INITRAMFS_LOC="/boot/ostree/${loc}/" + fi + +-rm -rf $ALLOWLIST_DIR +-rm -rf $INITRAMFS_STAGING_DIR +-rm -rf $OUTPUT_DIR + +-announce "Writing allowlist $ALLOWLIST_DIR/${OUTPUT} with $ALGO..." +-mkdir -p $ALLOWLIST_DIR ++echo "Writing allowlist to $OUTPUT with $ALGO..." + +-if [[ $BOOT_AGGREGATE_LOC != "none" ]] +-then +- announce "--- Adding boot agregate from $BOOT_AGGREGATE_LOC on allowlist $ALLOWLIST_DIR/${OUTPUT} ..." + # Add boot_aggregate from /sys/kernel/security/ima/ascii_runtime_measurements (IMA Log) file. + # The boot_aggregate measurement is always the first line in the IMA Log file. + # The format of the log lines is the following: + # + # File_Digest may start with the digest algorithm specified (e.g "sha1:", "sha256:") depending on the template used. +- head -n 1 $BOOT_AGGREGATE_LOC | awk '{ print $4 " boot_aggregate" }' | sed 's/.*://' >> $ALLOWLIST_DIR/${OUTPUT} ++head -n 1 /sys/kernel/security/ima/ascii_runtime_measurements | awk '{ print $4 " boot_aggregate" }' | sed 's/.*://' >> $OUTPUT + +- bagghash=$(detect_hash $(cat $ALLOWLIST_DIR/${OUTPUT} | cut -d ' ' -f 1)) +- if [[ $ALGO != $bagghash ]] +- then +- announce "ERROR: \"boot aggregate\" has was calculated with $bagghash, but files will be calculated with $ALGO. Use option -a $bagghash" +- exit 1 +- fi +-else +- announce "--- Skipping boot aggregate..." +-fi +- +-announce "--- Adding all appropriate files from $ROOTFS_LOC on allowlist $ALLOWLIST_DIR/${OUTPUT} ..." + # Add all appropriate files under root FS to allowlist +-pushd $ROOTFS_LOC > /dev/null 2>&1 +-BASE_EXCLUDE_DIRS="\bsys\b\|\brun\b\|\bproc\b\|\blost+found\b\|\bdev\b\|\bmedia\b\|\bsnap\b\|\bmnt\b\|\bvar\b\|\btmp\b" +-ROOTFS_FILE_LIST=$(ls | grep -v $BASE_EXCLUDE_DIRS) +-if [[ $SKIP_PATH != "none" ]] +-then +- SKIP_PATH=$(echo $SKIP_PATH | sed -e "s#^$ROOTFS_LOC##g" -e "s#,$ROOTFS_LOC##g" -e "s#,#\\\|#g") +- ROOTFS_FILE_LIST=$(echo "$ROOTFS_FILE_LIST" | grep -v "$SKIP_PATH") +-fi +-find $ROOTFS_FILE_LIST \( -fstype rootfs -o -xtype f -type l -o -type f \) -uid 0 -exec $ALGO "$ROOTFS_LOC/{}" >> $ALLOWLIST_DIR/${OUTPUT} \; +-popd > /dev/null 2>&1 ++cd / ++find `ls / | grep -v "\bsys\b\|\brun\b\|\bproc\b\|\blost+found\b\|\bdev\b\|\bmedia\b\|\bsnap\b\|mnt"` \( -fstype rootfs -o -xtype f -type l -o -type f \) -uid 0 -exec $ALGO '/{}' >> $OUTPUT \; + + # Create staging area for init ram images +-mkdir -p $INITRAMFS_STAGING_DIR ++rm -rf /tmp/ima/ ++mkdir -p /tmp/ima + +-if [[ $INITRAMFS_LOC != "none" ]] +-then +- # Where to look for initramfs image +- if [[ -d "/ostree" ]] +- then +- X=$INITRAMFS_LOC +- # If we are on an ostree system change where we look for initramfs image +- loc=$(grep -E "/ostree/[^/]([^/]*)" -o /proc/cmdline | head -n 1 | cut -d / -f 3) +- INITRAMFS_LOC="/boot/ostree/${loc}/" +- announce "--- The location of initramfs was overriden from \"${X}\" to \"$INITRAMFS_LOC\"" +- fi +- +- announce "--- Creating allowlist for init ram disks found under \"$INITRAMFS_LOC\" to $ALLOWLIST_DIR/${OUTPUT} ..." +- for i in $(ls ${INITRAMFS_LOC}/initr* 2> /dev/null) +- do +- announce " extracting $i" +- mkdir -p $INITRAMFS_STAGING_DIR/$i-extracted +- cd $INITRAMFS_STAGING_DIR/$i-extracted +- +- # platform-specific handling of init ram disk images +- if [[ `command -v unmkinitramfs` ]] ; then +- mkdir -p $INITRAMFS_STAGING_DIR/$i-extracted-unmk +- unmkinitramfs $i $INITRAMFS_STAGING_DIR/$i-extracted-unmk +- if [[ -d "$INITRAMFS_STAGING_DIR/$i-extracted-unmk/main/" ]] ; then +- cp -r $INITRAMFS_STAGING_DIR/$i-extracted-unmk/main/. /tmp/ima/$i-extracted +- else +- cp -r $INITRAMFS_STAGING_DIR/$i-extracted-unmk/. /tmp/ima/$i-extracted +- fi +- elif [[ -x "/usr/lib/dracut/skipcpio" ]] ; then +- /usr/lib/dracut/skipcpio $i | gunzip -c | cpio -i -d 2> /dev/null ++# Iterate through init ram disks and add files to allowlist ++echo "Creating allowlist for init ram disk" ++for i in `ls ${INITRAMFS_LOC}/initr*` ++do ++ echo "extracting $i" ++ mkdir -p /tmp/ima/$i-extracted ++ cd /tmp/ima/$i-extracted ++ ++ # platform-specific handling of init ram disk images ++ if [[ `command -v unmkinitramfs` ]] ; then ++ mkdir -p /tmp/ima/$i-extracted-unmk ++ unmkinitramfs $i /tmp/ima/$i-extracted-unmk ++ if [[ -d "/tmp/ima/$i-extracted-unmk/main/" ]] ; then ++ cp -r /tmp/ima/$i-extracted-unmk/main/. /tmp/ima/$i-extracted + else +- announce "ERROR: No tools for initramfs image processing found!" +- exit 1 ++ cp -r /tmp/ima/$i-extracted-unmk/. /tmp/ima/$i-extracted + fi ++ elif [[ -x "/usr/lib/dracut/skipcpio" ]] ; then ++ /usr/lib/dracut/skipcpio $i | gunzip -c 2> /dev/null | cpio -i -d 2> /dev/null ++ else ++ echo "ERROR: No tools for initramfs image processing found!" ++ break ++ fi + +- find -type f -exec $ALGO "./{}" \; | sed "s| \./\./| /|" >> $ALLOWLIST_DIR/${OUTPUT} +- done +-fi +- +-# Non-critical cleanup on the resulting file (when ROOTFS_LOC = '/', the path starts on allowlist ends up with double '//' ) +-sed -i "s^ //^ /^g" $ALLOWLIST_DIR/${OUTPUT} +-# A bit of cleanup on the resulting file (among other problems, sha256sum might output a hash with the prefix '\\') +-sed -i "s/^\\\//g" $ALLOWLIST_DIR/${OUTPUT} +- +-# Convert to runtime policy +-mkdir -p $OUTPUT_DIR +-announce "Converting created allowlist ($ALLOWLIST_DIR/${OUTPUT}) to Keylime runtime policy ($OUTPUT_DIR/${OUTPUT}) ..." +-CONVERT_CMD_OPTS="--allowlist $ALLOWLIST_DIR/${OUTPUT} --output_file $OUTPUT_DIR/${OUTPUT}" +-[ -f $EXCLUDE_LIST ] && CONVERT_CMD_OPTS="$CONVERT_CMD_OPTS --excludelist "$(readlink -f -- "${EXCLUDE_LIST}")"" ++ find -type f -exec $ALGO "./{}" \; | sed "s| \./\./| /|" >> $OUTPUT ++done + +-pushd $KCRP_BASE_DIR > /dev/null 2>&1 +-export PYTHONPATH=$KCRP_BASE_DIR:$PYTHONPATH +-# only 3 dependencies required: pip3 install cryptography lark packaging +-python3 ./keylime/cmd/convert_runtime_policy.py $CONVERT_CMD_OPTS; echo " " +-if [[ $? -eq 0 ]] +-then +- announce "Done, new runtime policy file present at ${OUTPUT_DIR}/$OUTPUT. It can be used on the tenant keylime host with \"keylime_tenant -c add --runtime-policy ${OUTPUT_DIR}/$OUTPUT " +-fi +-popd > /dev/null 2>&1 ++# when ROOTFS_LOC = '/', the path starts on allowlist ends up with double '//' ++# ++# Example: ++# ++# b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c //bar ++# ++# Replace the unwanted '//' with a single '/' ++sed -i 's| /\+| /|g' $ALLOWLIST_DIR/${OUTPUT} ++ ++# When the file name contains newlines or backslashes, the output of sha256sum ++# adds a backslash at the beginning of the line. ++# ++# Example: ++# ++# $ echo foo > ba\\r ++# $ sha256sum ba\\r ++# \b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c ba\\r ++# ++# Remove the unwanted backslash prefix ++sed -i 's/^\\//g' $ALLOWLIST_DIR/${OUTPUT} ++ ++# Clean up ++rm -rf /tmp/ima +-- +2.47.1 + diff --git a/SOURCES/0005-elchecking-example-add-ignores-for-EV_PLATFORM_CONFI.patch b/SOURCES/0005-elchecking-example-add-ignores-for-EV_PLATFORM_CONFI.patch deleted file mode 100644 index 18a242d..0000000 --- a/SOURCES/0005-elchecking-example-add-ignores-for-EV_PLATFORM_CONFI.patch +++ /dev/null @@ -1,50 +0,0 @@ -From f2432efbeb7b6305067111bb3a77ef5d7da4eb5b Mon Sep 17 00:00:00 2001 -From: Thore Sommer -Date: Thu, 10 Aug 2023 16:15:57 +0300 -Subject: [PATCH 5/6] elchecking/example: add ignores for - EV_PLATFORM_CONFIG_FLAGS - -These are generated by edk2 when used with QEMU, but we do not have a -reference for them. - -Signed-off-by: Thore Sommer ---- - keylime/mba/elchecking/example.py | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - -diff --git a/keylime/mba/elchecking/example.py b/keylime/mba/elchecking/example.py -index 8885227..921db4e 100644 ---- a/keylime/mba/elchecking/example.py -+++ b/keylime/mba/elchecking/example.py -@@ -75,7 +75,6 @@ shim_authcode_sha256_no_secureboot = tests.obj_test( - kernel_cmdline=tests.type_test(str), - ) - -- - allowed_kernel_list_test_no_secureboot = tests.list_test(shim_authcode_sha256_no_secureboot) - - -@@ -303,6 +302,20 @@ class Example(policies.Policy): - ), - ), - ) -+ # edk2 measures up to 4 of those events, where we do not have a good way to get a reference -+ # See: -+ # - https://github.com/keylime/keylime/issues/1393 -+ # - https://github.com/tianocore/edk2/commit/935343cf1639a28530904a1e8d73d6517a07cbff -+ dispatcher.set( -+ (1, "EV_PLATFORM_CONFIG_FLAGS"), -+ tests.Or( -+ tests.OnceTest(tests.AcceptAll()), -+ tests.OnceTest(tests.AcceptAll()), -+ tests.OnceTest(tests.AcceptAll()), -+ tests.OnceTest(tests.AcceptAll()), -+ ), -+ ) -+ - dispatcher.set((4, "EV_EFI_ACTION"), tests.EvEfiActionTest(4)) - for pcr in range(8): - dispatcher.set((pcr, "EV_SEPARATOR"), tests.EvSeperatorTest()) --- -2.39.3 - diff --git a/SOURCES/0006-Revert-default-server_key_password-for-verifier-regi.patch b/SOURCES/0006-Revert-default-server_key_password-for-verifier-regi.patch new file mode 100644 index 0000000..48d8420 --- /dev/null +++ b/SOURCES/0006-Revert-default-server_key_password-for-verifier-regi.patch @@ -0,0 +1,66 @@ +From 733db4036f2142152795fc51b761f05e39594b08 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Tue, 27 May 2025 09:31:54 +0000 +Subject: [PATCH 6/6] Revert "default" server_key_password for + verifier/registrar + +Signed-off-by: Sergio Correia +--- + templates/2.0/mapping.json | 4 ++-- + templates/2.1/mapping.json | 6 +++--- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/templates/2.0/mapping.json b/templates/2.0/mapping.json +index 80dcdde..8fce124 100644 +--- a/templates/2.0/mapping.json ++++ b/templates/2.0/mapping.json +@@ -232,7 +232,7 @@ + "server_key_password": { + "section": "cloud_verifier", + "option": "private_key_pw", +- "default": "" ++ "default": "default" + }, + "enable_agent_mtls": { + "section": "cloud_verifier", +@@ -563,7 +563,7 @@ + "server_key_password": { + "section": "registrar", + "option": "private_key_pw", +- "default": "" ++ "default": "default" + }, + "server_cert": { + "section": "registrar", +diff --git a/templates/2.1/mapping.json b/templates/2.1/mapping.json +index 956a53a..88e3fb6 100644 +--- a/templates/2.1/mapping.json ++++ b/templates/2.1/mapping.json +@@ -262,7 +262,7 @@ + "server_key_password": { + "section": "verifier", + "option": "server_key_password", +- "default": "" ++ "default": "default" + }, + "enable_agent_mtls": { + "section": "verifier", +@@ -593,7 +593,7 @@ + "server_key_password": { + "section": "registrar", + "option": "server_key_password", +- "default": "" ++ "default": "default" + }, + "server_cert": { + "section": "registrar", +@@ -835,4 +835,4 @@ + "handler_consoleHandler": "logging", + "logger_keylime": "logging" + } +-} +\ No newline at end of file ++} +-- +2.47.1 + diff --git a/SOURCES/0006-Revert-mapping-changes.patch b/SOURCES/0006-Revert-mapping-changes.patch deleted file mode 100644 index e06a1c0..0000000 --- a/SOURCES/0006-Revert-mapping-changes.patch +++ /dev/null @@ -1,43 +0,0 @@ -From ed213b9533535ceae5026b2fab274f80bcc58cb8 Mon Sep 17 00:00:00 2001 -From: rpm-build -Date: Tue, 15 Aug 2023 09:18:32 -0400 -Subject: [PATCH 6/6] Revert mapping changes - ---- - templates/2.0/mapping.json | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/templates/2.0/mapping.json b/templates/2.0/mapping.json -index 66addbc..0036b63 100644 ---- a/templates/2.0/mapping.json -+++ b/templates/2.0/mapping.json -@@ -207,7 +207,7 @@ - "registrar_port": { - "section": "cloud_verifier", - "option": "registrar_port", -- "default": "8881" -+ "default": "8891" - }, - "tls_dir": { - "section": "cloud_verifier", -@@ -232,7 +232,7 @@ - "server_key_password": { - "section": "cloud_verifier", - "option": "private_key_pw", -- "default": "" -+ "default": "default" - }, - "enable_agent_mtls": { - "section": "cloud_verifier", -@@ -558,7 +558,7 @@ - "server_key_password": { - "section": "registrar", - "option": "private_key_pw", -- "default": "" -+ "default": "default" - }, - "server_cert": { - "section": "registrar", --- -2.39.3 - diff --git a/SOURCES/0007-Handle-session-close-using-a-session-manager.patch b/SOURCES/0007-Handle-session-close-using-a-session-manager.patch deleted file mode 100644 index ead77ea..0000000 --- a/SOURCES/0007-Handle-session-close-using-a-session-manager.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 3dc40e8b1878d84045ee80cb6d216348713c048a Mon Sep 17 00:00:00 2001 -From: Karel Srot -Date: Tue, 15 Aug 2023 10:00:50 +0200 -Subject: [PATCH 7/7] Handle session close using a session manager - -Resolves https://github.com/keylime/keylime/issues/1455 - -Signed-off-by: Karel Srot ---- - keylime/revocation_notifier.py | 50 +++++++++++++++++----------------- - packit-ci.fmf | 1 + - 2 files changed, 26 insertions(+), 25 deletions(-) - -diff --git a/keylime/revocation_notifier.py b/keylime/revocation_notifier.py -index 31a3095..5cc8b1a 100644 ---- a/keylime/revocation_notifier.py -+++ b/keylime/revocation_notifier.py -@@ -132,32 +132,32 @@ def notify_webhook(tosend: Dict[str, Any]) -> None: - def worker_webhook(tosend: Dict[str, Any], url: str) -> None: - interval = config.getfloat("verifier", "retry_interval") - exponential_backoff = config.getboolean("verifier", "exponential_backoff") -- session = requests.session() -- logger.info("Sending revocation event via webhook...") -- for i in range(config.getint("verifier", "max_retries")): -- next_retry = retry.retry_time(exponential_backoff, interval, i, logger) -- try: -- response = session.post(url, json=tosend, timeout=5) -- if response.status_code in [200, 202]: -- break -- -- logger.debug( -- "Unable to publish revocation message %d times via webhook, " -- "trying again in %d seconds. " -- "Server returned status code: %s", -- i, -- next_retry, -- response.status_code, -- ) -- except requests.exceptions.RequestException as e: -- logger.debug( -- "Unable to publish revocation message %d times via webhook, trying again in %d seconds: %s", -- i, -- next_retry, -- e, -- ) -+ with requests.Session() as session: -+ logger.info("Sending revocation event via webhook...") -+ for i in range(config.getint("verifier", "max_retries")): -+ next_retry = retry.retry_time(exponential_backoff, interval, i, logger) -+ try: -+ response = session.post(url, json=tosend, timeout=5) -+ if response.status_code in [200, 202]: -+ break -+ -+ logger.debug( -+ "Unable to publish revocation message %d times via webhook, " -+ "trying again in %d seconds. " -+ "Server returned status code: %s", -+ i, -+ next_retry, -+ response.status_code, -+ ) -+ except requests.exceptions.RequestException as e: -+ logger.debug( -+ "Unable to publish revocation message %d times via webhook, trying again in %d seconds: %s", -+ i, -+ next_retry, -+ e, -+ ) - -- time.sleep(next_retry) -+ time.sleep(next_retry) - - w = functools.partial(worker_webhook, tosend, url) - t = threading.Thread(target=w, daemon=True) -diff --git a/packit-ci.fmf b/packit-ci.fmf -index f4d2dae..7abe313 100644 ---- a/packit-ci.fmf -+++ b/packit-ci.fmf -@@ -108,6 +108,7 @@ adjust: - - /setup/configure_tpm_emulator - - /setup/install_upstream_keylime - - /setup/install_rust_keylime_from_copr -+ - /setup/configure_kernel_ima_module/ima_policy_simple - - /functional/basic-attestation-on-localhost - - /functional/basic-attestation-with-custom-certificates - - /functional/basic-attestation-without-mtls --- -2.41.0 - diff --git a/SOURCES/0007-fix_db_connection_leaks.patch b/SOURCES/0007-fix_db_connection_leaks.patch new file mode 100644 index 0000000..64be967 --- /dev/null +++ b/SOURCES/0007-fix_db_connection_leaks.patch @@ -0,0 +1,2208 @@ +diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py +index 8ab81d1..7553ac8 100644 +--- a/keylime/cloud_verifier_tornado.py ++++ b/keylime/cloud_verifier_tornado.py +@@ -7,7 +7,8 @@ import sys + import traceback + from concurrent.futures import ThreadPoolExecutor + from multiprocessing import Process +-from typing import Any, Dict, List, Optional, Tuple, Union, cast ++from contextlib import contextmanager ++from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, cast + + import tornado.httpserver + import tornado.ioloop +@@ -34,7 +35,7 @@ from keylime.agentstates import AgentAttestState, AgentAttestStates + from keylime.common import retry, states, validators + from keylime.common.version import str_to_version + from keylime.da import record +-from keylime.db.keylime_db import DBEngineManager, SessionManager ++from keylime.db.keylime_db import SessionManager, make_engine + from keylime.db.verifier_db import VerfierMain, VerifierAllowlist, VerifierMbpolicy + from keylime.failure import MAX_SEVERITY_LABEL, Component, Event, Failure, set_severity_config + from keylime.ima import ima +@@ -47,7 +48,7 @@ GLOBAL_POLICY_CACHE: Dict[str, Dict[str, str]] = {} + set_severity_config(config.getlist("verifier", "severity_labels"), config.getlist("verifier", "severity_policy")) + + try: +- engine = DBEngineManager().make_engine("cloud_verifier") ++ engine = make_engine("cloud_verifier") + except SQLAlchemyError as err: + logger.error("Error creating SQL engine or session: %s", err) + sys.exit(1) +@@ -61,8 +62,17 @@ except record.RecordManagementException as rme: + sys.exit(1) + + +-def get_session() -> Session: +- return SessionManager().make_session(engine) ++@contextmanager ++def session_context() -> Iterator[Session]: ++ """ ++ Context manager for database sessions that ensures proper cleanup. ++ To use: ++ with session_context() as session: ++ # use session ++ """ ++ session_manager = SessionManager() ++ with session_manager.session_context(engine) as session: ++ yield session + + + def get_AgentAttestStates() -> AgentAttestStates: +@@ -130,19 +140,18 @@ def _from_db_obj(agent_db_obj: VerfierMain) -> Dict[str, Any]: + return agent_dict + + +-def verifier_read_policy_from_cache(stored_agent: VerfierMain) -> str: +- checksum = "" +- name = "empty" +- agent_id = str(stored_agent.agent_id) ++def verifier_read_policy_from_cache(ima_policy_data: Dict[str, str]) -> str: ++ checksum = ima_policy_data.get("checksum", "") ++ name = ima_policy_data.get("name", "empty") ++ agent_id = ima_policy_data.get("agent_id", "") ++ ++ if not agent_id: ++ return "" + + if agent_id not in GLOBAL_POLICY_CACHE: + GLOBAL_POLICY_CACHE[agent_id] = {} + GLOBAL_POLICY_CACHE[agent_id][""] = "" + +- if stored_agent.ima_policy: +- checksum = str(stored_agent.ima_policy.checksum) +- name = stored_agent.ima_policy.name +- + if checksum not in GLOBAL_POLICY_CACHE[agent_id]: + if len(GLOBAL_POLICY_CACHE[agent_id]) > 1: + # Perform a cleanup of the contents, IMA policy checksum changed +@@ -162,8 +171,9 @@ def verifier_read_policy_from_cache(stored_agent: VerfierMain) -> str: + checksum, + agent_id, + ) +- # Actually contacts the database and load the (large) ima_policy column for "allowlists" table +- ima_policy = stored_agent.ima_policy.ima_policy ++ ++ # Get the large ima_policy content - it's already loaded in ima_policy_data ++ ima_policy = ima_policy_data.get("ima_policy", "") + assert isinstance(ima_policy, str) + GLOBAL_POLICY_CACHE[agent_id][checksum] = ima_policy + +@@ -182,22 +192,19 @@ def store_attestation_state(agentAttestState: AgentAttestState) -> None: + # Only store if IMA log was evaluated + if agentAttestState.get_ima_pcrs(): + agent_id = agentAttestState.agent_id +- session = get_session() + try: +- update_agent = session.query(VerfierMain).get(agentAttestState.get_agent_id()) +- assert update_agent +- update_agent.boottime = agentAttestState.get_boottime() +- update_agent.next_ima_ml_entry = agentAttestState.get_next_ima_ml_entry() +- ima_pcrs_dict = agentAttestState.get_ima_pcrs() +- update_agent.ima_pcrs = list(ima_pcrs_dict.keys()) +- for pcr_num, value in ima_pcrs_dict.items(): +- setattr(update_agent, f"pcr{pcr_num}", value) +- update_agent.learned_ima_keyrings = agentAttestState.get_ima_keyrings().to_json() +- try: ++ with session_context() as session: ++ update_agent = session.query(VerfierMain).get(agentAttestState.get_agent_id()) ++ assert update_agent ++ update_agent.boottime = agentAttestState.get_boottime() ++ update_agent.next_ima_ml_entry = agentAttestState.get_next_ima_ml_entry() ++ ima_pcrs_dict = agentAttestState.get_ima_pcrs() ++ update_agent.ima_pcrs = list(ima_pcrs_dict.keys()) ++ for pcr_num, value in ima_pcrs_dict.items(): ++ setattr(update_agent, f"pcr{pcr_num}", value) ++ update_agent.learned_ima_keyrings = agentAttestState.get_ima_keyrings().to_json() + session.add(update_agent) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error on storing attestation state for agent %s: %s", agent_id, e) +- session.commit() ++ # session.commit() is automatically called by context manager + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error on storing attestation state for agent %s: %s", agent_id, e) + +@@ -354,45 +361,17 @@ class AgentsHandler(BaseHandler): + was not found, it either completed successfully, or failed. If found, the agent_id is still polling + to contact the Cloud Agent. + """ +- session = get_session() +- + rest_params, agent_id = self.__validate_input("GET") + if not rest_params: + return + +- if (agent_id is not None) and (agent_id != ""): +- # If the agent ID is not valid (wrong set of characters), +- # just do nothing. +- agent = None +- try: +- agent = ( +- session.query(VerfierMain) +- .options( # type: ignore +- joinedload(VerfierMain.ima_policy).load_only( +- VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore +- ) +- ) +- .options( # type: ignore +- joinedload(VerfierMain.mb_policy).load_only(VerifierMbpolicy.mb_policy) # pyright: ignore +- ) +- .filter_by(agent_id=agent_id) +- .one_or_none() +- ) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- +- if agent is not None: +- response = cloud_verifier_common.process_get_status(agent) +- web_util.echo_json_response(self, 200, "Success", response) +- else: +- web_util.echo_json_response(self, 404, "agent id not found") +- else: +- json_response = None +- if "bulk" in rest_params: +- agent_list = None +- +- if ("verifier" in rest_params) and (rest_params["verifier"] != ""): +- agent_list = ( ++ with session_context() as session: ++ if (agent_id is not None) and (agent_id != ""): ++ # If the agent ID is not valid (wrong set of characters), ++ # just do nothing. ++ agent = None ++ try: ++ agent = ( + session.query(VerfierMain) + .options( # type: ignore + joinedload(VerfierMain.ima_policy).load_only( +@@ -402,39 +381,70 @@ class AgentsHandler(BaseHandler): + .options( # type: ignore + joinedload(VerfierMain.mb_policy).load_only(VerifierMbpolicy.mb_policy) # pyright: ignore + ) +- .filter_by(verifier_id=rest_params["verifier"]) +- .all() ++ .filter_by(agent_id=agent_id) ++ .one_or_none() + ) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ ++ if agent is not None: ++ response = cloud_verifier_common.process_get_status(agent) ++ web_util.echo_json_response(self, 200, "Success", response) + else: +- agent_list = ( +- session.query(VerfierMain) +- .options( # type: ignore +- joinedload(VerfierMain.ima_policy).load_only( +- VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore ++ web_util.echo_json_response(self, 404, "agent id not found") ++ else: ++ json_response = None ++ if "bulk" in rest_params: ++ agent_list = None ++ ++ if ("verifier" in rest_params) and (rest_params["verifier"] != ""): ++ agent_list = ( ++ session.query(VerfierMain) ++ .options( # type: ignore ++ joinedload(VerfierMain.ima_policy).load_only( ++ VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore ++ ) + ) ++ .options( # type: ignore ++ joinedload(VerfierMain.mb_policy).load_only( ++ VerifierMbpolicy.mb_policy # type: ignore[arg-type] ++ ) ++ ) ++ .filter_by(verifier_id=rest_params["verifier"]) ++ .all() + ) +- .options( # type: ignore +- joinedload(VerfierMain.mb_policy).load_only(VerifierMbpolicy.mb_policy) # pyright: ignore ++ else: ++ agent_list = ( ++ session.query(VerfierMain) ++ .options( # type: ignore ++ joinedload(VerfierMain.ima_policy).load_only( ++ VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore ++ ) ++ ) ++ .options( # type: ignore ++ joinedload(VerfierMain.mb_policy).load_only( ++ VerifierMbpolicy.mb_policy # type: ignore[arg-type] ++ ) ++ ) ++ .all() + ) +- .all() +- ) + +- json_response = {} +- for agent in agent_list: +- json_response[agent.agent_id] = cloud_verifier_common.process_get_status(agent) ++ json_response = {} ++ for agent in agent_list: ++ json_response[agent.agent_id] = cloud_verifier_common.process_get_status(agent) + +- web_util.echo_json_response(self, 200, "Success", json_response) +- else: +- if ("verifier" in rest_params) and (rest_params["verifier"] != ""): +- json_response_list = ( +- session.query(VerfierMain.agent_id).filter_by(verifier_id=rest_params["verifier"]).all() +- ) ++ web_util.echo_json_response(self, 200, "Success", json_response) + else: +- json_response_list = session.query(VerfierMain.agent_id).all() ++ if ("verifier" in rest_params) and (rest_params["verifier"] != ""): ++ json_response_list = ( ++ session.query(VerfierMain.agent_id).filter_by(verifier_id=rest_params["verifier"]).all() ++ ) ++ else: ++ json_response_list = session.query(VerfierMain.agent_id).all() + +- web_util.echo_json_response(self, 200, "Success", {"uuids": json_response_list}) ++ web_util.echo_json_response(self, 200, "Success", {"uuids": json_response_list}) + +- logger.info("GET returning 200 response for agent_id list") ++ logger.info("GET returning 200 response for agent_id list") + + def delete(self) -> None: + """This method handles the DELETE requests to remove agents from the Cloud Verifier. +@@ -442,59 +452,55 @@ class AgentsHandler(BaseHandler): + Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors. + agents requests require a single agent_id parameter which identifies the agent to be deleted. + """ +- session = get_session() +- + rest_params, agent_id = self.__validate_input("DELETE") + if not rest_params or not agent_id: + return + +- agent = None +- try: +- agent = session.query(VerfierMain).filter_by(agent_id=agent_id).first() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ with session_context() as session: ++ agent = None ++ try: ++ agent = session.query(VerfierMain).filter_by(agent_id=agent_id).first() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) + +- if agent is None: +- web_util.echo_json_response(self, 404, "agent id not found") +- logger.info("DELETE returning 404 response. agent id: %s not found.", agent_id) +- return ++ if agent is None: ++ web_util.echo_json_response(self, 404, "agent id not found") ++ logger.info("DELETE returning 404 response. agent id: %s not found.", agent_id) ++ return + +- verifier_id = config.get("verifier", "uuid", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID) +- if verifier_id != agent.verifier_id: +- web_util.echo_json_response(self, 404, "agent id associated to this verifier") +- logger.info("DELETE returning 404 response. agent id: %s not associated to this verifer.", agent_id) +- return ++ verifier_id = config.get("verifier", "uuid", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID) ++ if verifier_id != agent.verifier_id: ++ web_util.echo_json_response(self, 404, "agent id associated to this verifier") ++ logger.info("DELETE returning 404 response. agent id: %s not associated to this verifer.", agent_id) ++ return + +- # Cleanup the cache when the agent is deleted. Do it early. +- if agent_id in GLOBAL_POLICY_CACHE: +- del GLOBAL_POLICY_CACHE[agent_id] +- logger.debug( +- "Cleaned up policy cache from all entries used by agent %s", +- agent_id, +- ) ++ # Cleanup the cache when the agent is deleted. Do it early. ++ if agent_id in GLOBAL_POLICY_CACHE: ++ del GLOBAL_POLICY_CACHE[agent_id] ++ logger.debug( ++ "Cleaned up policy cache from all entries used by agent %s", ++ agent_id, ++ ) + +- op_state = agent.operational_state +- if op_state in (states.SAVED, states.FAILED, states.TERMINATED, states.TENANT_FAILED, states.INVALID_QUOTE): +- try: +- verifier_db_delete_agent(session, agent_id) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 200, "Success") +- logger.info("DELETE returning 200 response for agent id: %s", agent_id) +- else: +- try: +- update_agent = session.query(VerfierMain).get(agent_id) +- assert update_agent +- update_agent.operational_state = states.TERMINATED ++ op_state = agent.operational_state ++ if op_state in (states.SAVED, states.FAILED, states.TERMINATED, states.TENANT_FAILED, states.INVALID_QUOTE): + try: ++ verifier_db_delete_agent(session, agent_id) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 200, "Success") ++ logger.info("DELETE returning 200 response for agent id: %s", agent_id) ++ else: ++ try: ++ update_agent = session.query(VerfierMain).get(agent_id) ++ assert update_agent ++ update_agent.operational_state = states.TERMINATED + session.add(update_agent) ++ # session.commit() is automatically called by context manager ++ web_util.echo_json_response(self, 202, "Accepted") ++ logger.info("DELETE returning 202 response for agent id: %s", agent_id) + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- session.commit() +- web_util.echo_json_response(self, 202, "Accepted") +- logger.info("DELETE returning 202 response for agent id: %s", agent_id) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) + + def post(self) -> None: + """This method handles the POST requests to add agents to the Cloud Verifier. +@@ -502,7 +508,6 @@ class AgentsHandler(BaseHandler): + Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's will return errors. + agents requests require a json block sent in the body + """ +- session = get_session() + # TODO: exception handling needs fixing + # Maybe handle exceptions with if/else if/else blocks ... simple and avoids nesting + try: # pylint: disable=too-many-nested-blocks +@@ -585,201 +590,208 @@ class AgentsHandler(BaseHandler): + runtime_policy = base64.b64decode(json_body.get("runtime_policy")).decode() + runtime_policy_stored = None + +- if runtime_policy_name: ++ with session_context() as session: ++ if runtime_policy_name: ++ try: ++ runtime_policy_stored = ( ++ session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).one_or_none() ++ ) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ raise ++ ++ # Prevent overwriting existing IMA policies with name provided in request ++ if runtime_policy and runtime_policy_stored: ++ web_util.echo_json_response( ++ self, ++ 409, ++ f"IMA policy with name {runtime_policy_name} already exists. Please use a different name or delete the allowlist from the verifier.", ++ ) ++ logger.warning("IMA policy with name %s already exists", runtime_policy_name) ++ return ++ ++ # Return an error code if the named allowlist does not exist in the database ++ if not runtime_policy and not runtime_policy_stored: ++ web_util.echo_json_response( ++ self, 404, f"Could not find IMA policy with name {runtime_policy_name}!" ++ ) ++ logger.warning("Could not find IMA policy with name %s", runtime_policy_name) ++ return ++ ++ # Prevent overwriting existing agents with UUID provided in request + try: +- runtime_policy_stored = ( +- session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).one_or_none() +- ) ++ new_agent_count = session.query(VerfierMain).filter_by(agent_id=agent_id).count() + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise ++ raise e + +- # Prevent overwriting existing IMA policies with name provided in request +- if runtime_policy and runtime_policy_stored: ++ if new_agent_count > 0: + web_util.echo_json_response( + self, + 409, +- f"IMA policy with name {runtime_policy_name} already exists. Please use a different name or delete the allowlist from the verifier.", ++ f"Agent of uuid {agent_id} already exists. Please use delete or update.", + ) +- logger.warning("IMA policy with name %s already exists", runtime_policy_name) ++ logger.warning("Agent of uuid %s already exists", agent_id) + return + +- # Return an error code if the named allowlist does not exist in the database +- if not runtime_policy and not runtime_policy_stored: +- web_util.echo_json_response( +- self, 404, f"Could not find IMA policy with name {runtime_policy_name}!" +- ) +- logger.warning("Could not find IMA policy with name %s", runtime_policy_name) +- return +- +- # Prevent overwriting existing agents with UUID provided in request +- try: +- new_agent_count = session.query(VerfierMain).filter_by(agent_id=agent_id).count() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise e +- +- if new_agent_count > 0: +- web_util.echo_json_response( +- self, +- 409, +- f"Agent of uuid {agent_id} already exists. Please use delete or update.", +- ) +- logger.warning("Agent of uuid %s already exists", agent_id) +- return +- +- # Write IMA policy to database if needed +- if not runtime_policy_name and not runtime_policy: +- logger.info("IMA policy data not provided with request! Using default empty IMA policy.") +- runtime_policy = json.dumps(cast(Dict[str, Any], ima.EMPTY_RUNTIME_POLICY)) ++ # Write IMA policy to database if needed ++ if not runtime_policy_name and not runtime_policy: ++ logger.info("IMA policy data not provided with request! Using default empty IMA policy.") ++ runtime_policy = json.dumps(cast(Dict[str, Any], ima.EMPTY_RUNTIME_POLICY)) + +- if runtime_policy: +- runtime_policy_key_bytes = signing.get_runtime_policy_keys( +- runtime_policy.encode(), +- json_body.get("runtime_policy_key"), +- ) +- +- try: +- ima.verify_runtime_policy( ++ if runtime_policy: ++ runtime_policy_key_bytes = signing.get_runtime_policy_keys( + runtime_policy.encode(), +- runtime_policy_key_bytes, +- verify_sig=config.getboolean( +- "verifier", "require_allow_list_signatures", fallback=False +- ), ++ json_body.get("runtime_policy_key"), + ) +- except ima.ImaValidationError as e: +- web_util.echo_json_response(self, e.code, e.message) +- logger.warning(e.message) +- return + +- if not runtime_policy_name: +- runtime_policy_name = agent_id +- +- try: +- runtime_policy_db_format = ima.runtime_policy_db_contents( +- runtime_policy_name, runtime_policy +- ) +- except ima.ImaValidationError as e: +- message = f"Runtime policy is malformatted: {e.message}" +- web_util.echo_json_response(self, e.code, message) +- logger.warning(message) +- return +- +- try: +- runtime_policy_stored = ( +- session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).one_or_none() +- ) +- except SQLAlchemyError as e: +- logger.error( +- "SQLAlchemy Error while retrieving stored ima policy for agent ID %s: %s", agent_id, e +- ) +- raise +- try: +- if runtime_policy_stored is None: +- runtime_policy_stored = VerifierAllowlist(**runtime_policy_db_format) +- session.add(runtime_policy_stored) ++ try: ++ ima.verify_runtime_policy( ++ runtime_policy.encode(), ++ runtime_policy_key_bytes, ++ verify_sig=config.getboolean( ++ "verifier", "require_allow_list_signatures", fallback=False ++ ), ++ ) ++ except ima.ImaValidationError as e: ++ web_util.echo_json_response(self, e.code, e.message) ++ logger.warning(e.message) ++ return ++ ++ if not runtime_policy_name: ++ runtime_policy_name = agent_id ++ ++ try: ++ runtime_policy_db_format = ima.runtime_policy_db_contents( ++ runtime_policy_name, runtime_policy ++ ) ++ except ima.ImaValidationError as e: ++ message = f"Runtime policy is malformatted: {e.message}" ++ web_util.echo_json_response(self, e.code, message) ++ logger.warning(message) ++ return ++ ++ try: ++ runtime_policy_stored = ( ++ session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).one_or_none() ++ ) ++ except SQLAlchemyError as e: ++ logger.error( ++ "SQLAlchemy Error while retrieving stored ima policy for agent ID %s: %s", ++ agent_id, ++ e, ++ ) ++ raise ++ try: ++ if runtime_policy_stored is None: ++ runtime_policy_stored = VerifierAllowlist(**runtime_policy_db_format) ++ session.add(runtime_policy_stored) ++ session.commit() ++ except SQLAlchemyError as e: ++ logger.error( ++ "SQLAlchemy Error while updating ima policy for agent ID %s: %s", agent_id, e ++ ) ++ raise ++ ++ # Handle measured boot policy ++ # - No name, mb_policy : store mb_policy using agent UUID as name ++ # - Name, no mb_policy : fetch existing mb_policy from DB ++ # - Name, mb_policy : store mb_policy using name ++ ++ mb_policy_name = json_body["mb_policy_name"] ++ mb_policy = json_body["mb_policy"] ++ mb_policy_stored = None ++ ++ if mb_policy_name: ++ try: ++ mb_policy_stored = ( ++ session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one_or_none() ++ ) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ raise ++ ++ # Prevent overwriting existing mb_policy with name provided in request ++ if mb_policy and mb_policy_stored: ++ web_util.echo_json_response( ++ self, ++ 409, ++ f"mb_policy with name {mb_policy_name} already exists. Please use a different name or delete the mb_policy from the verifier.", ++ ) ++ logger.warning("mb_policy with name %s already exists", mb_policy_name) ++ return ++ ++ # Return error if the mb_policy is neither provided nor stored. ++ if not mb_policy and not mb_policy_stored: ++ web_util.echo_json_response( ++ self, 404, f"Could not find mb_policy with name {mb_policy_name}!" ++ ) ++ logger.warning("Could not find mb_policy with name %s", mb_policy_name) ++ return ++ ++ else: ++ # Use the UUID of the agent ++ mb_policy_name = agent_id ++ try: ++ mb_policy_stored = ( ++ session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one_or_none() ++ ) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ raise ++ ++ # Prevent overwriting existing mb_policy ++ if mb_policy and mb_policy_stored: ++ web_util.echo_json_response( ++ self, ++ 409, ++ f"mb_policy with name {mb_policy_name} already exists. You can delete the mb_policy from the verifier.", ++ ) ++ logger.warning("mb_policy with name %s already exists", mb_policy_name) ++ return ++ ++ # Store the policy into database if not stored ++ if mb_policy_stored is None: ++ try: ++ mb_policy_db_format = mba.mb_policy_db_contents(mb_policy_name, mb_policy) ++ mb_policy_stored = VerifierMbpolicy(**mb_policy_db_format) ++ session.add(mb_policy_stored) + session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error while updating ima policy for agent ID %s: %s", agent_id, e) +- raise +- +- # Handle measured boot policy +- # - No name, mb_policy : store mb_policy using agent UUID as name +- # - Name, no mb_policy : fetch existing mb_policy from DB +- # - Name, mb_policy : store mb_policy using name +- +- mb_policy_name = json_body["mb_policy_name"] +- mb_policy = json_body["mb_policy"] +- mb_policy_stored = None ++ except SQLAlchemyError as e: ++ logger.error( ++ "SQLAlchemy Error while updating mb_policy for agent ID %s: %s", agent_id, e ++ ) ++ raise + +- if mb_policy_name: ++ # Write the agent to the database, attaching associated stored ima_policy and mb_policy + try: +- mb_policy_stored = ( +- session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one_or_none() ++ assert runtime_policy_stored ++ assert mb_policy_stored ++ session.add( ++ VerfierMain(**agent_data, ima_policy=runtime_policy_stored, mb_policy=mb_policy_stored) + ) ++ session.commit() + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise ++ raise e + +- # Prevent overwriting existing mb_policy with name provided in request +- if mb_policy and mb_policy_stored: +- web_util.echo_json_response( +- self, +- 409, +- f"mb_policy with name {mb_policy_name} already exists. Please use a different name or delete the mb_policy from the verifier.", +- ) +- logger.warning("mb_policy with name %s already exists", mb_policy_name) +- return ++ # add default fields that are ephemeral ++ for key, val in exclude_db.items(): ++ agent_data[key] = val + +- # Return error if the mb_policy is neither provided nor stored. +- if not mb_policy and not mb_policy_stored: +- web_util.echo_json_response( +- self, 404, f"Could not find mb_policy with name {mb_policy_name}!" ++ # Prepare SSLContext for mTLS connections ++ agent_data["ssl_context"] = None ++ if agent_mtls_cert_enabled: ++ agent_data["ssl_context"] = web_util.generate_agent_tls_context( ++ "verifier", agent_data["mtls_cert"], logger=logger + ) +- logger.warning("Could not find mb_policy with name %s", mb_policy_name) +- return + +- else: +- # Use the UUID of the agent +- mb_policy_name = agent_id +- try: +- mb_policy_stored = ( +- session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one_or_none() +- ) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise +- +- # Prevent overwriting existing mb_policy +- if mb_policy and mb_policy_stored: +- web_util.echo_json_response( +- self, +- 409, +- f"mb_policy with name {mb_policy_name} already exists. You can delete the mb_policy from the verifier.", +- ) +- logger.warning("mb_policy with name %s already exists", mb_policy_name) +- return +- +- # Store the policy into database if not stored +- if mb_policy_stored is None: +- try: +- mb_policy_db_format = mba.mb_policy_db_contents(mb_policy_name, mb_policy) +- mb_policy_stored = VerifierMbpolicy(**mb_policy_db_format) +- session.add(mb_policy_stored) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error while updating mb_policy for agent ID %s: %s", agent_id, e) +- raise ++ if agent_data["ssl_context"] is None: ++ logger.warning("Connecting to agent without mTLS: %s", agent_id) + +- # Write the agent to the database, attaching associated stored ima_policy and mb_policy +- try: +- assert runtime_policy_stored +- assert mb_policy_stored +- session.add( +- VerfierMain(**agent_data, ima_policy=runtime_policy_stored, mb_policy=mb_policy_stored) +- ) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise e +- +- # add default fields that are ephemeral +- for key, val in exclude_db.items(): +- agent_data[key] = val +- +- # Prepare SSLContext for mTLS connections +- agent_data["ssl_context"] = None +- if agent_mtls_cert_enabled: +- agent_data["ssl_context"] = web_util.generate_agent_tls_context( +- "verifier", agent_data["mtls_cert"], logger=logger +- ) +- +- if agent_data["ssl_context"] is None: +- logger.warning("Connecting to agent without mTLS: %s", agent_id) +- +- asyncio.ensure_future(process_agent(agent_data, states.GET_QUOTE)) +- web_util.echo_json_response(self, 200, "Success") +- logger.info("POST returning 200 response for adding agent id: %s", agent_id) ++ asyncio.ensure_future(process_agent(agent_data, states.GET_QUOTE)) ++ web_util.echo_json_response(self, 200, "Success") ++ logger.info("POST returning 200 response for adding agent id: %s", agent_id) + else: + web_util.echo_json_response(self, 400, "uri not supported") + logger.warning("POST returning 400 response. uri not supported") +@@ -794,54 +806,54 @@ class AgentsHandler(BaseHandler): + Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's will return errors. + agents requests require a json block sent in the body + """ +- session = get_session() + try: + rest_params, agent_id = self.__validate_input("PUT") + if not rest_params: + return + +- try: +- verifier_id = config.get("verifier", "uuid", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID) +- db_agent = session.query(VerfierMain).filter_by(agent_id=agent_id, verifier_id=verifier_id).one() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) +- raise e ++ with session_context() as session: ++ try: ++ verifier_id = config.get("verifier", "uuid", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID) ++ db_agent = session.query(VerfierMain).filter_by(agent_id=agent_id, verifier_id=verifier_id).one() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ raise e + +- if db_agent is None: +- web_util.echo_json_response(self, 404, "agent id not found") +- logger.info("PUT returning 404 response. agent id: %s not found.", agent_id) +- return ++ if db_agent is None: ++ web_util.echo_json_response(self, 404, "agent id not found") ++ logger.info("PUT returning 404 response. agent id: %s not found.", agent_id) ++ return + +- if "reactivate" in rest_params: +- agent = _from_db_obj(db_agent) ++ if "reactivate" in rest_params: ++ agent = _from_db_obj(db_agent) + +- if agent["mtls_cert"] and agent["mtls_cert"] != "disabled": +- agent["ssl_context"] = web_util.generate_agent_tls_context( +- "verifier", agent["mtls_cert"], logger=logger +- ) +- if agent["ssl_context"] is None: +- logger.warning("Connecting to agent without mTLS: %s", agent_id) ++ if agent["mtls_cert"] and agent["mtls_cert"] != "disabled": ++ agent["ssl_context"] = web_util.generate_agent_tls_context( ++ "verifier", agent["mtls_cert"], logger=logger ++ ) ++ if agent["ssl_context"] is None: ++ logger.warning("Connecting to agent without mTLS: %s", agent_id) + +- agent["operational_state"] = states.START +- asyncio.ensure_future(process_agent(agent, states.GET_QUOTE)) +- web_util.echo_json_response(self, 200, "Success") +- logger.info("PUT returning 200 response for agent id: %s", agent_id) +- elif "stop" in rest_params: +- # do stuff for terminate +- logger.debug("Stopping polling on %s", agent_id) +- try: +- session.query(VerfierMain).filter(VerfierMain.agent_id == agent_id).update( # pyright: ignore +- {"operational_state": states.TENANT_FAILED} +- ) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) ++ agent["operational_state"] = states.START ++ asyncio.ensure_future(process_agent(agent, states.GET_QUOTE)) ++ web_util.echo_json_response(self, 200, "Success") ++ logger.info("PUT returning 200 response for agent id: %s", agent_id) ++ elif "stop" in rest_params: ++ # do stuff for terminate ++ logger.debug("Stopping polling on %s", agent_id) ++ try: ++ session.query(VerfierMain).filter(VerfierMain.agent_id == agent_id).update( # pyright: ignore ++ {"operational_state": states.TENANT_FAILED} ++ ) ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) + +- web_util.echo_json_response(self, 200, "Success") +- logger.info("PUT returning 200 response for agent id: %s", agent_id) +- else: +- web_util.echo_json_response(self, 400, "uri not supported") +- logger.warning("PUT returning 400 response. uri not supported") ++ web_util.echo_json_response(self, 200, "Success") ++ logger.info("PUT returning 200 response for agent id: %s", agent_id) ++ else: ++ web_util.echo_json_response(self, 400, "uri not supported") ++ logger.warning("PUT returning 400 response. uri not supported") + + except Exception as e: + web_util.echo_json_response(self, 400, f"Exception error: {str(e)}") +@@ -887,36 +899,36 @@ class AllowlistHandler(BaseHandler): + if not params_valid: + return + +- session = get_session() +- if allowlist_name is None: +- try: +- names_allowlists = session.query(VerifierAllowlist.name).all() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get names of allowlists") +- raise ++ with session_context() as session: ++ if allowlist_name is None: ++ try: ++ names_allowlists = session.query(VerifierAllowlist.name).all() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get names of allowlists") ++ raise + +- names_response = [] +- for name in names_allowlists: +- names_response.append(name[0]) +- web_util.echo_json_response(self, 200, "Success", {"runtimepolicy names": names_response}) ++ names_response = [] ++ for name in names_allowlists: ++ names_response.append(name[0]) ++ web_util.echo_json_response(self, 200, "Success", {"runtimepolicy names": names_response}) + +- else: +- try: +- allowlist = session.query(VerifierAllowlist).filter_by(name=allowlist_name).one() +- except NoResultFound: +- web_util.echo_json_response(self, 404, f"Runtime policy {allowlist_name} not found") +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get allowlist") +- raise ++ else: ++ try: ++ allowlist = session.query(VerifierAllowlist).filter_by(name=allowlist_name).one() ++ except NoResultFound: ++ web_util.echo_json_response(self, 404, f"Runtime policy {allowlist_name} not found") ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get allowlist") ++ raise + +- response = {} +- for field in ("name", "tpm_policy"): +- response[field] = getattr(allowlist, field, None) +- response["runtime_policy"] = getattr(allowlist, "ima_policy", None) +- web_util.echo_json_response(self, 200, "Success", response) ++ response = {} ++ for field in ("name", "tmp_policy"): ++ response[field] = getattr(allowlist, field, None) ++ response["runtime_policy"] = getattr(allowlist, "ima_policy", None) ++ web_util.echo_json_response(self, 200, "Success", response) + + def delete(self) -> None: + """Delete an allowlist +@@ -928,45 +940,44 @@ class AllowlistHandler(BaseHandler): + if not params_valid or allowlist_name is None: + return + +- session = get_session() +- try: +- runtime_policy = session.query(VerifierAllowlist).filter_by(name=allowlist_name).one() +- except NoResultFound: +- web_util.echo_json_response(self, 404, f"Runtime policy {allowlist_name} not found") +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get allowlist") +- raise ++ with session_context() as session: ++ try: ++ runtime_policy = session.query(VerifierAllowlist).filter_by(name=allowlist_name).one() ++ except NoResultFound: ++ web_util.echo_json_response(self, 404, f"Runtime policy {allowlist_name} not found") ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get allowlist") ++ raise + +- try: +- agent = session.query(VerfierMain).filter_by(ima_policy_id=runtime_policy.id).one_or_none() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise +- if agent is not None: +- web_util.echo_json_response( +- self, +- 409, +- f"Can't delete allowlist as it's currently in use by agent {agent.agent_id}", +- ) +- return ++ try: ++ agent = session.query(VerfierMain).filter_by(ima_policy_id=runtime_policy.id).one_or_none() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise ++ if agent is not None: ++ web_util.echo_json_response( ++ self, ++ 409, ++ f"Can't delete allowlist as it's currently in use by agent {agent.agent_id}", ++ ) ++ return + +- try: +- session.query(VerifierAllowlist).filter_by(name=allowlist_name).delete() +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- session.close() +- web_util.echo_json_response(self, 500, f"Database error: {e}") +- raise ++ try: ++ session.query(VerifierAllowlist).filter_by(name=allowlist_name).delete() ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, f"Database error: {e}") ++ raise + +- # NOTE(kaifeng) 204 Can not have response body, but current helper +- # doesn't support this case. +- self.set_status(204) +- self.set_header("Content-Type", "application/json") +- self.finish() +- logger.info("DELETE returning 204 response for allowlist: %s", allowlist_name) ++ # NOTE(kaifeng) 204 Can not have response body, but current helper ++ # doesn't support this case. ++ self.set_status(204) ++ self.set_header("Content-Type", "application/json") ++ self.finish() ++ logger.info("DELETE returning 204 response for allowlist: %s", allowlist_name) + + def __get_runtime_policy_db_format(self, runtime_policy_name: str) -> Dict[str, Any]: + """Get the IMA policy from the request and return it in Db format""" +@@ -1022,28 +1033,30 @@ class AllowlistHandler(BaseHandler): + if not runtime_policy_db_format: + return + +- session = get_session() +- # don't allow overwritting +- try: +- runtime_policy_count = session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).count() +- if runtime_policy_count > 0: +- web_util.echo_json_response(self, 409, f"Runtime policy with name {runtime_policy_name} already exists") +- logger.warning("Runtime policy with name %s already exists", runtime_policy_name) +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ with session_context() as session: ++ # don't allow overwritting ++ try: ++ runtime_policy_count = session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).count() ++ if runtime_policy_count > 0: ++ web_util.echo_json_response( ++ self, 409, f"Runtime policy with name {runtime_policy_name} already exists" ++ ) ++ logger.warning("Runtime policy with name %s already exists", runtime_policy_name) ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- try: +- # Add the agent and data +- session.add(VerifierAllowlist(**runtime_policy_db_format)) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ try: ++ # Add the agent and data ++ session.add(VerifierAllowlist(**runtime_policy_db_format)) ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- web_util.echo_json_response(self, 201) +- logger.info("POST returning 201") ++ web_util.echo_json_response(self, 201) ++ logger.info("POST returning 201") + + def put(self) -> None: + """Update an allowlist +@@ -1060,32 +1073,34 @@ class AllowlistHandler(BaseHandler): + if not runtime_policy_db_format: + return + +- session = get_session() +- # don't allow creating a new policy +- try: +- runtime_policy_count = session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).count() +- if runtime_policy_count != 1: +- web_util.echo_json_response( +- self, 409, f"Runtime policy with name {runtime_policy_name} does not already exist" +- ) +- logger.warning("Runtime policy with name %s does not already exist", runtime_policy_name) +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ with session_context() as session: ++ # don't allow creating a new policy ++ try: ++ runtime_policy_count = session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).count() ++ if runtime_policy_count != 1: ++ web_util.echo_json_response( ++ self, ++ 404, ++ f"Runtime policy with name {runtime_policy_name} does not already exist, use POST to create", ++ ) ++ logger.warning("Runtime policy with name %s does not already exist", runtime_policy_name) ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- try: +- # Update the named runtime policy +- session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).update( +- runtime_policy_db_format # pyright: ignore +- ) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ try: ++ # Update the named runtime policy ++ session.query(VerifierAllowlist).filter_by(name=runtime_policy_name).update( ++ runtime_policy_db_format # pyright: ignore ++ ) ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- web_util.echo_json_response(self, 201) +- logger.info("PUT returning 201") ++ web_util.echo_json_response(self, 201) ++ logger.info("PUT returning 201") + + def data_received(self, chunk: Any) -> None: + raise NotImplementedError() +@@ -1113,8 +1128,6 @@ class VerifyIdentityHandler(BaseHandler): + + This is useful for 3rd party tools and integrations to independently verify the state of an agent. + """ +- session = get_session() +- + # validate the parameters of our request + if self.request.uri is None: + web_util.echo_json_response(self, 400, "URI not specified") +@@ -1159,36 +1172,37 @@ class VerifyIdentityHandler(BaseHandler): + return + + # get the agent information from the DB +- agent = None +- try: +- agent = ( +- session.query(VerfierMain) +- .options( # type: ignore +- joinedload(VerfierMain.ima_policy).load_only( +- VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore ++ with session_context() as session: ++ agent = None ++ try: ++ agent = ( ++ session.query(VerfierMain) ++ .options( # type: ignore ++ joinedload(VerfierMain.ima_policy).load_only( ++ VerifierAllowlist.checksum, VerifierAllowlist.generator # pyright: ignore ++ ) + ) ++ .filter_by(agent_id=agent_id) ++ .one_or_none() + ) +- .filter_by(agent_id=agent_id) +- .one_or_none() +- ) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent_id, e) + +- if agent is not None: +- agentAttestState = get_AgentAttestStates().get_by_agent_id(agent_id) +- failure = cloud_verifier_common.process_verify_identity_quote( +- agent, quote, nonce, hash_alg, agentAttestState +- ) +- if failure: +- failure_contexts = "; ".join(x.context for x in failure.events) +- web_util.echo_json_response(self, 200, "Success", {"valid": 0, "reason": failure_contexts}) +- logger.info("GET returning 200, but validation failed") ++ if agent is not None: ++ agentAttestState = get_AgentAttestStates().get_by_agent_id(agent_id) ++ failure = cloud_verifier_common.process_verify_identity_quote( ++ agent, quote, nonce, hash_alg, agentAttestState ++ ) ++ if failure: ++ failure_contexts = "; ".join(x.context for x in failure.events) ++ web_util.echo_json_response(self, 200, "Success", {"valid": 0, "reason": failure_contexts}) ++ logger.info("GET returning 200, but validation failed") ++ else: ++ web_util.echo_json_response(self, 200, "Success", {"valid": 1}) ++ logger.info("GET returning 200, validation successful") + else: +- web_util.echo_json_response(self, 200, "Success", {"valid": 1}) +- logger.info("GET returning 200, validation successful") +- else: +- web_util.echo_json_response(self, 404, "agent id not found") +- logger.info("GET returning 404, agaent not found") ++ web_util.echo_json_response(self, 404, "agent id not found") ++ logger.info("GET returning 404, agaent not found") + + def data_received(self, chunk: Any) -> None: + raise NotImplementedError() +@@ -1231,35 +1245,35 @@ class MbpolicyHandler(BaseHandler): + if not params_valid: + return + +- session = get_session() +- if mb_policy_name is None: +- try: +- names_mbpolicies = session.query(VerifierMbpolicy.name).all() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get names of mbpolicies") +- raise ++ with session_context() as session: ++ if mb_policy_name is None: ++ try: ++ names_mbpolicies = session.query(VerifierMbpolicy.name).all() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get names of mbpolicies") ++ raise + +- names_response = [] +- for name in names_mbpolicies: +- names_response.append(name[0]) +- web_util.echo_json_response(self, 200, "Success", {"mbpolicy names": names_response}) ++ names_response = [] ++ for name in names_mbpolicies: ++ names_response.append(name[0]) ++ web_util.echo_json_response(self, 200, "Success", {"mbpolicy names": names_response}) + +- else: +- try: +- mbpolicy = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one() +- except NoResultFound: +- web_util.echo_json_response(self, 404, f"Measured boot policy {mb_policy_name} not found") +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get mb_policy") +- raise ++ else: ++ try: ++ mbpolicy = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one() ++ except NoResultFound: ++ web_util.echo_json_response(self, 404, f"Measured boot policy {mb_policy_name} not found") ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get mb_policy") ++ raise + +- response = {} +- response["name"] = getattr(mbpolicy, "name", None) +- response["mb_policy"] = getattr(mbpolicy, "mb_policy", None) +- web_util.echo_json_response(self, 200, "Success", response) ++ response = {} ++ response["name"] = getattr(mbpolicy, "name", None) ++ response["mb_policy"] = getattr(mbpolicy, "mb_policy", None) ++ web_util.echo_json_response(self, 200, "Success", response) + + def delete(self) -> None: + """Delete a mb_policy +@@ -1271,45 +1285,44 @@ class MbpolicyHandler(BaseHandler): + if not params_valid or mb_policy_name is None: + return + +- session = get_session() +- try: +- mbpolicy = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one() +- except NoResultFound: +- web_util.echo_json_response(self, 404, f"Measured boot policy {mb_policy_name} not found") +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- web_util.echo_json_response(self, 500, "Failed to get mb_policy") +- raise ++ with session_context() as session: ++ try: ++ mbpolicy = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).one() ++ except NoResultFound: ++ web_util.echo_json_response(self, 404, f"Measured boot policy {mb_policy_name} not found") ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, "Failed to get mb_policy") ++ raise + +- try: +- agent = session.query(VerfierMain).filter_by(mb_policy_id=mbpolicy.id).one_or_none() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise +- if agent is not None: +- web_util.echo_json_response( +- self, +- 409, +- f"Can't delete mb_policy as it's currently in use by agent {agent.agent_id}", +- ) +- return ++ try: ++ agent = session.query(VerfierMain).filter_by(mb_policy_id=mbpolicy.id).one_or_none() ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise ++ if agent is not None: ++ web_util.echo_json_response( ++ self, ++ 409, ++ f"Can't delete mb_policy as it's currently in use by agent {agent.agent_id}", ++ ) ++ return + +- try: +- session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).delete() +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- session.close() +- web_util.echo_json_response(self, 500, f"Database error: {e}") +- raise ++ try: ++ session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).delete() ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ web_util.echo_json_response(self, 500, f"Database error: {e}") ++ raise + +- # NOTE(kaifeng) 204 Can not have response body, but current helper +- # doesn't support this case. +- self.set_status(204) +- self.set_header("Content-Type", "application/json") +- self.finish() +- logger.info("DELETE returning 204 response for mb_policy: %s", mb_policy_name) ++ # NOTE(kaifeng) 204 Can not have response body, but current helper ++ # doesn't support this case. ++ self.set_status(204) ++ self.set_header("Content-Type", "application/json") ++ self.finish() ++ logger.info("DELETE returning 204 response for mb_policy: %s", mb_policy_name) + + def __get_mb_policy_db_format(self, mb_policy_name: str) -> Dict[str, Any]: + """Get the measured boot policy from the request and return it in Db format""" +@@ -1341,30 +1354,30 @@ class MbpolicyHandler(BaseHandler): + if not mb_policy_db_format: + return + +- session = get_session() +- # don't allow overwritting +- try: +- mbpolicy_count = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).count() +- if mbpolicy_count > 0: +- web_util.echo_json_response( +- self, 409, f"Measured boot policy with name {mb_policy_name} already exists" +- ) +- logger.warning("Measured boot policy with name %s already exists", mb_policy_name) +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ with session_context() as session: ++ # don't allow overwritting ++ try: ++ mbpolicy_count = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).count() ++ if mbpolicy_count > 0: ++ web_util.echo_json_response( ++ self, 409, f"Measured boot policy with name {mb_policy_name} already exists" ++ ) ++ logger.warning("Measured boot policy with name %s already exists", mb_policy_name) ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- try: +- # Add the data +- session.add(VerifierMbpolicy(**mb_policy_db_format)) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ try: ++ # Add the data ++ session.add(VerifierMbpolicy(**mb_policy_db_format)) ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- web_util.echo_json_response(self, 201) +- logger.info("POST returning 201") ++ web_util.echo_json_response(self, 201) ++ logger.info("POST returning 201") + + def put(self) -> None: + """Update an mb_policy +@@ -1381,32 +1394,32 @@ class MbpolicyHandler(BaseHandler): + if not mb_policy_db_format: + return + +- session = get_session() +- # don't allow creating a new policy +- try: +- mbpolicy_count = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).count() +- if mbpolicy_count != 1: +- web_util.echo_json_response( +- self, 409, f"Measured boot policy with name {mb_policy_name} does not already exist" +- ) +- logger.warning("Measured boot policy with name %s does not already exist", mb_policy_name) +- return +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ with session_context() as session: ++ # don't allow creating a new policy ++ try: ++ mbpolicy_count = session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).count() ++ if mbpolicy_count != 1: ++ web_util.echo_json_response( ++ self, 409, f"Measured boot policy with name {mb_policy_name} does not already exist" ++ ) ++ logger.warning("Measured boot policy with name %s does not already exist", mb_policy_name) ++ return ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- try: +- # Update the named mb_policy +- session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).update( +- mb_policy_db_format # pyright: ignore +- ) +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) +- raise ++ try: ++ # Update the named mb_policy ++ session.query(VerifierMbpolicy).filter_by(name=mb_policy_name).update( ++ mb_policy_db_format # pyright: ignore ++ ) ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) ++ raise + +- web_util.echo_json_response(self, 201) +- logger.info("PUT returning 201") ++ web_util.echo_json_response(self, 201) ++ logger.info("PUT returning 201") + + def data_received(self, chunk: Any) -> None: + raise NotImplementedError() +@@ -1460,17 +1473,18 @@ async def update_agent_api_version(agent: Dict[str, Any], timeout: float = 60.0) + return None + + logger.info("Agent %s new API version %s is supported", agent_id, new_version) +- session = get_session() +- agent["supported_version"] = new_version + +- # Remove keys that should not go to the DB +- agent_db = dict(agent) +- for key in exclude_db: +- if key in agent_db: +- del agent_db[key] ++ with session_context() as session: ++ agent["supported_version"] = new_version + +- session.query(VerfierMain).filter_by(agent_id=agent_id).update(agent_db) # pyright: ignore +- session.commit() ++ # Remove keys that should not go to the DB ++ agent_db = dict(agent) ++ for key in exclude_db: ++ if key in agent_db: ++ del agent_db[key] ++ ++ session.query(VerfierMain).filter_by(agent_id=agent_id).update(agent_db) # pyright: ignore ++ # session.commit() is automatically called by context manager + else: + logger.warning("Agent %s new API version %s is not supported", agent_id, new_version) + return None +@@ -1718,50 +1732,68 @@ async def notify_error( + revocation_notifier.notify(tosend) + if "agent" in notifiers: + verifier_id = config.get("verifier", "uuid", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID) +- session = get_session() +- agents = session.query(VerfierMain).filter_by(verifier_id=verifier_id).all() +- futures = [] +- loop = asyncio.get_event_loop() +- # Notify all agents asynchronously through a thread pool +- with ThreadPoolExecutor() as pool: +- for agent_db_obj in agents: +- if agent_db_obj.agent_id != agent["agent_id"]: +- agent = _from_db_obj(agent_db_obj) +- if agent["mtls_cert"] and agent["mtls_cert"] != "disabled": +- agent["ssl_context"] = web_util.generate_agent_tls_context( +- "verifier", agent["mtls_cert"], logger=logger +- ) +- func = functools.partial(invoke_notify_error, agent, tosend, timeout=timeout) +- futures.append(await loop.run_in_executor(pool, func)) +- # Wait for all tasks complete in 60 seconds +- try: +- for f in asyncio.as_completed(futures, timeout=60): +- await f +- except asyncio.TimeoutError as e: +- logger.error("Timeout during notifying error to agents: %s", e) ++ with session_context() as session: ++ agents = session.query(VerfierMain).filter_by(verifier_id=verifier_id).all() ++ futures = [] ++ loop = asyncio.get_event_loop() ++ # Notify all agents asynchronously through a thread pool ++ with ThreadPoolExecutor() as pool: ++ for agent_db_obj in agents: ++ if agent_db_obj.agent_id != agent["agent_id"]: ++ agent = _from_db_obj(agent_db_obj) ++ if agent["mtls_cert"] and agent["mtls_cert"] != "disabled": ++ agent["ssl_context"] = web_util.generate_agent_tls_context( ++ "verifier", agent["mtls_cert"], logger=logger ++ ) ++ func = functools.partial(invoke_notify_error, agent, tosend, timeout=timeout) ++ futures.append(await loop.run_in_executor(pool, func)) ++ # Wait for all tasks complete in 60 seconds ++ try: ++ for f in asyncio.as_completed(futures, timeout=60): ++ await f ++ except asyncio.TimeoutError as e: ++ logger.error("Timeout during notifying error to agents: %s", e) + + + async def process_agent( + agent: Dict[str, Any], new_operational_state: int, failure: Failure = Failure(Component.INTERNAL, ["verifier"]) + ) -> None: +- session = get_session() + try: # pylint: disable=R1702 + main_agent_operational_state = agent["operational_state"] + stored_agent = None +- try: +- stored_agent = ( +- session.query(VerfierMain) +- .options( # type: ignore +- joinedload(VerfierMain.ima_policy).load_only(VerifierAllowlist.checksum) # pyright: ignore +- ) +- .options( # type: ignore +- joinedload(VerfierMain.mb_policy).load_only(VerifierMbpolicy.mb_policy) # pyright: ignore ++ ++ # First database operation - read agent data and extract all needed data within session context ++ ima_policy_data = {} ++ mb_policy_data = None ++ with session_context() as session: ++ try: ++ stored_agent = ( ++ session.query(VerfierMain) ++ .options( # type: ignore ++ joinedload(VerfierMain.ima_policy) # Load full IMA policy object including content ++ ) ++ .options( # type: ignore ++ joinedload(VerfierMain.mb_policy).load_only(VerifierMbpolicy.mb_policy) # pyright: ignore ++ ) ++ .filter_by(agent_id=str(agent["agent_id"])) ++ .first() + ) +- .filter_by(agent_id=str(agent["agent_id"])) +- .first() +- ) +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error for agent ID %s: %s", agent["agent_id"], e) ++ ++ # Extract IMA policy data within session context to avoid DetachedInstanceError ++ if stored_agent and stored_agent.ima_policy: ++ ima_policy_data = { ++ "checksum": str(stored_agent.ima_policy.checksum), ++ "name": stored_agent.ima_policy.name, ++ "agent_id": str(stored_agent.agent_id), ++ "ima_policy": stored_agent.ima_policy.ima_policy, # Extract the large content too ++ } ++ ++ # Extract MB policy data within session context ++ if stored_agent and stored_agent.mb_policy: ++ mb_policy_data = stored_agent.mb_policy.mb_policy ++ ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error for agent ID %s: %s", agent["agent_id"], e) + + # if the stored agent could not be recovered from the database, stop polling + if not stored_agent: +@@ -1775,7 +1807,10 @@ async def process_agent( + logger.warning("Agent %s terminated by user.", agent["agent_id"]) + if agent["pending_event"] is not None: + tornado.ioloop.IOLoop.current().remove_timeout(agent["pending_event"]) +- verifier_db_delete_agent(session, agent["agent_id"]) ++ ++ # Second database operation - delete agent ++ with session_context() as session: ++ verifier_db_delete_agent(session, agent["agent_id"]) + return + + # if the user tells us to stop polling because the tenant quote check failed +@@ -1808,11 +1843,16 @@ async def process_agent( + if not failure.recoverable or failure.highest_severity == MAX_SEVERITY_LABEL: + if agent["pending_event"] is not None: + tornado.ioloop.IOLoop.current().remove_timeout(agent["pending_event"]) +- for key in exclude_db: +- if key in agent: +- del agent[key] +- session.query(VerfierMain).filter_by(agent_id=agent["agent_id"]).update(agent) # pyright: ignore +- session.commit() ++ ++ # Third database operation - update agent with failure state ++ with session_context() as session: ++ for key in exclude_db: ++ if key in agent: ++ del agent[key] ++ session.query(VerfierMain).filter_by(agent_id=agent["agent_id"]).update( ++ agent # type: ignore[arg-type] ++ ) ++ # session.commit() is automatically called by context manager + + # propagate all state, but remove none DB keys first (using exclude_db) + try: +@@ -1821,18 +1861,18 @@ async def process_agent( + if key in agent_db: + del agent_db[key] + +- session.query(VerfierMain).filter_by(agent_id=agent_db["agent_id"]).update(agent_db) # pyright: ignore +- session.commit() ++ # Fourth database operation - update agent state ++ with session_context() as session: ++ session.query(VerfierMain).filter_by(agent_id=agent_db["agent_id"]).update(agent_db) # pyright: ignore ++ # session.commit() is automatically called by context manager + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error for agent ID %s: %s", agent["agent_id"], e) + + # Load agent's IMA policy +- runtime_policy = verifier_read_policy_from_cache(stored_agent) ++ runtime_policy = verifier_read_policy_from_cache(ima_policy_data) + + # Get agent's measured boot policy +- mb_policy = None +- if stored_agent.mb_policy is not None: +- mb_policy = stored_agent.mb_policy.mb_policy ++ mb_policy = mb_policy_data + + # If agent was in a failed state we check if we either stop polling + # or just add it again to the event loop +@@ -1876,7 +1916,14 @@ async def process_agent( + ) + + pending = tornado.ioloop.IOLoop.current().call_later( +- interval, invoke_get_quote, agent, mb_policy, runtime_policy, False, timeout=timeout # type: ignore # due to python <3.9 ++ # type: ignore # due to python <3.9 ++ interval, ++ invoke_get_quote, ++ agent, ++ mb_policy, ++ runtime_policy, ++ False, ++ timeout=timeout, + ) + agent["pending_event"] = pending + return +@@ -1911,7 +1958,14 @@ async def process_agent( + next_retry, + ) + tornado.ioloop.IOLoop.current().call_later( +- next_retry, invoke_get_quote, agent, mb_policy, runtime_policy, True, timeout=timeout # type: ignore # due to python <3.9 ++ # type: ignore # due to python <3.9 ++ next_retry, ++ invoke_get_quote, ++ agent, ++ mb_policy, ++ runtime_policy, ++ True, ++ timeout=timeout, + ) + return + +@@ -1980,9 +2034,9 @@ async def activate_agents(agents: List[VerfierMain], verifier_ip: str, verifier_ + + + def get_agents_by_verifier_id(verifier_id: str) -> List[VerfierMain]: +- session = get_session() + try: +- return session.query(VerfierMain).filter_by(verifier_id=verifier_id).all() ++ with session_context() as session: ++ return session.query(VerfierMain).filter_by(verifier_id=verifier_id).all() + except SQLAlchemyError as e: + logger.error("SQLAlchemy Error: %s", e) + return [] +@@ -2007,20 +2061,20 @@ def main() -> None: + os.umask(0o077) + + VerfierMain.metadata.create_all(engine, checkfirst=True) # pyright: ignore +- session = get_session() +- try: +- query_all = session.query(VerfierMain).all() +- for row in query_all: +- if row.operational_state in states.APPROVED_REACTIVATE_STATES: +- row.operational_state = states.START # pyright: ignore +- session.commit() +- except SQLAlchemyError as e: +- logger.error("SQLAlchemy Error: %s", e) ++ with session_context() as session: ++ try: ++ query_all = session.query(VerfierMain).all() ++ for row in query_all: ++ if row.operational_state in states.APPROVED_REACTIVATE_STATES: ++ row.operational_state = states.START # pyright: ignore ++ # session.commit() is automatically called by context manager ++ except SQLAlchemyError as e: ++ logger.error("SQLAlchemy Error: %s", e) + +- num = session.query(VerfierMain.agent_id).count() +- if num > 0: +- agent_ids = session.query(VerfierMain.agent_id).all() +- logger.info("Agent ids in db loaded from file: %s", agent_ids) ++ num = session.query(VerfierMain.agent_id).count() ++ if num > 0: ++ agent_ids = session.query(VerfierMain.agent_id).all() ++ logger.info("Agent ids in db loaded from file: %s", agent_ids) + + logger.info("Starting Cloud Verifier (tornado) on port %s, use to stop", verifier_port) + +diff --git a/keylime/da/examples/sqldb.py b/keylime/da/examples/sqldb.py +index 8efc84e..04a8afb 100644 +--- a/keylime/da/examples/sqldb.py ++++ b/keylime/da/examples/sqldb.py +@@ -1,7 +1,10 @@ + import time ++from contextlib import contextmanager ++from typing import Iterator + + import sqlalchemy + import sqlalchemy.ext.declarative ++from sqlalchemy.orm import sessionmaker + + from keylime import keylime_logging + from keylime.da.record import BaseRecordManagement, base_build_key_list +@@ -45,23 +48,23 @@ class RecordManagement(BaseRecordManagement): + BaseRecordManagement.__init__(self, service) + + self.engine = sqlalchemy.create_engine(self.ps_url._replace(fragment="").geturl(), pool_recycle=1800) +- sm = sqlalchemy.orm.sessionmaker() +- self.session = sqlalchemy.orm.scoped_session(sm) +- self.session.configure(bind=self.engine) +- TableBase.metadata.create_all(self.engine) +- +- def agent_list_retrieval(self, record_prefix="auto", service="auto"): +- if record_prefix == "auto": +- record_prefix = "" +- +- agent_list = [] ++ self.SessionLocal = sessionmaker(bind=self.engine) + +- recordtype = self.get_record_type(service) +- tbl = type2table(recordtype) +- for agentid in self.session.query(tbl.agentid).distinct(): # pylint: disable=no-member +- agent_list.append(agentid[0]) ++ # Create tables if they don't exist ++ TableBase.metadata.create_all(self.engine) + +- return agent_list ++ @contextmanager ++ def session_context(self) -> Iterator: ++ """Context manager for database sessions that ensures proper cleanup.""" ++ session = self.SessionLocal() ++ try: ++ yield session ++ session.commit() ++ except Exception: ++ session.rollback() ++ raise ++ finally: ++ session.close() + + def record_create( + self, +@@ -84,8 +87,9 @@ class RecordManagement(BaseRecordManagement): + d = {"time": recordtime, "agentid": agentid, "record": rcrd} + + try: +- self.session.add((type2table(recordtype))(**d)) # pylint: disable=no-member +- self.session.commit() # pylint: disable=no-member ++ with self.session_context() as session: ++ session.add((type2table(recordtype))(**d)) ++ # session.commit() is automatically called by context manager + except Exception as e: + logger.error("Failed to create attestation record: %s", e) + +@@ -106,23 +110,23 @@ class RecordManagement(BaseRecordManagement): + if f"{end_date}" == "auto": + end_date = self.end_of_times + +- if self.only_last_record_wanted(start_date, end_date): +- attestion_record_rows = ( +- self.session.query(tbl) # pylint: disable=no-member +- .filter(tbl.agentid == record_identifier) +- .order_by(sqlalchemy.desc(tbl.time)) +- .limit(1) +- ) +- +- else: +- attestion_record_rows = self.session.query(tbl).filter( # pylint: disable=no-member +- tbl.agentid == record_identifier +- ) +- +- for row in attestion_record_rows: +- decoded_record_object = self.record_deserialize(row.record) +- self.record_signature_check(decoded_record_object, record_identifier) +- record_list.append(decoded_record_object) ++ with self.session_context() as session: ++ if self.only_last_record_wanted(start_date, end_date): ++ attestion_record_rows = ( ++ session.query(tbl) ++ .filter(tbl.agentid == record_identifier) ++ .order_by(sqlalchemy.desc(tbl.time)) ++ .limit(1) ++ ) ++ ++ else: ++ attestion_record_rows = session.query(tbl).filter(tbl.agentid == record_identifier) ++ ++ for row in attestion_record_rows: ++ decoded_record_object = self.record_deserialize(row.record) ++ self.record_signature_check(decoded_record_object, record_identifier) ++ record_list.append(decoded_record_object) ++ + return record_list + + def build_key_list(self, agent_identifier, service="auto"): +diff --git a/keylime/db/keylime_db.py b/keylime/db/keylime_db.py +index 5620a28..aa49e51 100644 +--- a/keylime/db/keylime_db.py ++++ b/keylime/db/keylime_db.py +@@ -1,7 +1,8 @@ + import os + from configparser import NoOptionError ++from contextlib import contextmanager + from sqlite3 import Connection as SQLite3Connection +-from typing import Any, Dict, Optional, cast ++from typing import Any, Iterator, Optional, cast + + from sqlalchemy import create_engine, event + from sqlalchemy.engine import Engine +@@ -22,90 +23,108 @@ def _set_sqlite_pragma(dbapi_connection: SQLite3Connection, _) -> None: + cursor.close() + + +-class DBEngineManager: +- service: Optional[str] +- +- def __init__(self) -> None: +- self.service = None +- +- def make_engine(self, service: str) -> Engine: +- """ +- To use: engine = self.make_engine('cloud_verifier') +- """ +- +- # Keep DB related stuff as it is, but read configuration from new +- # configs +- if service == "cloud_verifier": +- config_service = "verifier" +- else: +- config_service = service +- +- self.service = service +- +- try: +- p_sz_m_ovfl = config.get(config_service, "database_pool_sz_ovfl") +- p_sz, m_ovfl = p_sz_m_ovfl.split(",") +- except NoOptionError: +- p_sz = "5" +- m_ovfl = "10" +- +- engine_args: Dict[str, Any] = {} +- +- url = config.get(config_service, "database_url") +- if url: +- logger.info("database_url is set, using it to establish database connection") +- +- # If the keyword sqlite is provided as the database url, use the +- # cv_data.sqlite for the verifier or the file reg_data.sqlite for +- # the registrar, located at the config.WORK_DIR directory +- if url == "sqlite": ++def make_engine(service: str, **engine_args: Any) -> Engine: ++ """Create a database engine for a keylime service.""" ++ # Keep DB related stuff as it is, but read configuration from new ++ # configs ++ if service == "cloud_verifier": ++ config_service = "verifier" ++ else: ++ config_service = service ++ ++ url = config.get(config_service, "database_url") ++ if url: ++ logger.info("database_url is set, using it to establish database connection") ++ ++ # If the keyword sqlite is provided as the database url, use the ++ # cv_data.sqlite for the verifier or the file reg_data.sqlite for ++ # the registrar, located at the config.WORK_DIR directory ++ if url == "sqlite": ++ logger.info( ++ "database_url is set as 'sqlite' keyword, using default values to establish database connection" ++ ) ++ if service == "cloud_verifier": ++ database = "cv_data.sqlite" ++ elif service == "registrar": ++ database = "reg_data.sqlite" ++ else: ++ logger.error("Tried to setup database access for unknown service '%s'", service) ++ raise Exception(f"Unknown service '{service}' for database setup") ++ ++ database_file = os.path.abspath(os.path.join(config.WORK_DIR, database)) ++ url = f"sqlite:///{database_file}" ++ ++ kl_dir = os.path.dirname(os.path.abspath(database_file)) ++ if not os.path.exists(kl_dir): ++ os.makedirs(kl_dir, 0o700) ++ ++ engine_args["connect_args"] = {"check_same_thread": False} ++ ++ if not url.count("sqlite:"): ++ # sqlite does not support setting pool size and max overflow, only ++ # read from the config when it is going to be used ++ try: ++ p_sz_m_ovfl = config.get(config_service, "database_pool_sz_ovfl") ++ p_sz, m_ovfl = p_sz_m_ovfl.split(",") ++ logger.info("database_pool_sz_ovfl is set, pool size = %s, max overflow = %s", p_sz, m_ovfl) ++ except NoOptionError: ++ p_sz = "5" ++ m_ovfl = "10" + logger.info( +- "database_url is set as 'sqlite' keyword, using default values to establish database connection" ++ "database_pool_sz_ovfl is not set, using default pool size = %s, max overflow = %s", p_sz, m_ovfl + ) +- if service == "cloud_verifier": +- database = "cv_data.sqlite" +- elif service == "registrar": +- database = "reg_data.sqlite" +- else: +- logger.error("Tried to setup database access for unknown service '%s'", service) +- raise Exception(f"Unknown service '{service}' for database setup") +- +- database_file = os.path.abspath(os.path.join(config.WORK_DIR, database)) +- url = f"sqlite:///{database_file}" +- +- kl_dir = os.path.dirname(os.path.abspath(database_file)) +- if not os.path.exists(kl_dir): +- os.makedirs(kl_dir, 0o700) +- +- engine_args["connect_args"] = {"check_same_thread": False} + +- if not url.count("sqlite:"): +- engine_args["pool_size"] = int(p_sz) +- engine_args["max_overflow"] = int(m_ovfl) +- engine_args["pool_pre_ping"] = True ++ engine_args["pool_size"] = int(p_sz) ++ engine_args["max_overflow"] = int(m_ovfl) ++ engine_args["pool_pre_ping"] = True + +- # Enable DB debugging +- if config.DEBUG_DB and config.INSECURE_DEBUG: +- engine_args["echo"] = True ++ # Enable DB debugging ++ if config.DEBUG_DB and config.INSECURE_DEBUG: ++ engine_args["echo"] = True + +- engine = create_engine(url, **engine_args) +- return engine ++ engine = create_engine(url, **engine_args) ++ return engine + + + class SessionManager: + engine: Optional[Engine] ++ _scoped_session: Optional[scoped_session] + + def __init__(self) -> None: + self.engine = None ++ self._scoped_session = None + + def make_session(self, engine: Engine) -> Session: + """ + To use: session = self.make_session(engine) + """ + self.engine = engine +- my_session = scoped_session(sessionmaker()) ++ if self._scoped_session is None: ++ self._scoped_session = scoped_session(sessionmaker()) + try: +- my_session.configure(bind=self.engine) # type: ignore ++ self._scoped_session.configure(bind=self.engine) # type: ignore ++ self._scoped_session.configure(expire_on_commit=False) # type: ignore + except SQLAlchemyError as err: + logger.error("Error creating SQL session manager %s", err) +- return cast(Session, my_session()) ++ return cast(Session, self._scoped_session()) ++ ++ @contextmanager ++ def session_context(self, engine: Engine) -> Iterator[Session]: ++ """ ++ Context manager for database sessions that ensures proper cleanup. ++ To use: ++ with session_manager.session_context(engine) as session: ++ # use session ++ """ ++ session = self.make_session(engine) ++ try: ++ yield session ++ session.commit() ++ except Exception: ++ session.rollback() ++ raise ++ finally: ++ # Important: remove the session from the scoped session registry ++ # to prevent connection leaks with scoped_session ++ if self._scoped_session is not None: ++ self._scoped_session.remove() # type: ignore[no-untyped-call] +diff --git a/keylime/migrations/env.py b/keylime/migrations/env.py +index ac98349..a1881f2 100644 +--- a/keylime/migrations/env.py ++++ b/keylime/migrations/env.py +@@ -8,7 +8,7 @@ import sys + + from alembic import context + +-from keylime.db.keylime_db import DBEngineManager ++from keylime.db.keylime_db import make_engine + from keylime.db.registrar_db import Base as RegistrarBase + from keylime.db.verifier_db import Base as VerifierBase + +@@ -74,7 +74,7 @@ def run_migrations_offline(): + logger.info("Writing output to %s", file_) + + with open(file_, "w", encoding="utf-8") as buffer: +- engine = DBEngineManager().make_engine(name) ++ engine = make_engine(name) + connection = engine.connect() + context.configure( + connection=connection, +@@ -102,7 +102,7 @@ def run_migrations_online(): + engines = {} + for name in re.split(r",\s*", db_names): + engines[name] = rec = {} +- rec["engine"] = DBEngineManager().make_engine(name) ++ rec["engine"] = make_engine(name) + + for name, rec in engines.items(): + engine = rec["engine"] +diff --git a/keylime/models/base/db.py b/keylime/models/base/db.py +index dd47d63..0229765 100644 +--- a/keylime/models/base/db.py ++++ b/keylime/models/base/db.py +@@ -41,13 +41,6 @@ class DBManager: + + self._service = service + +- try: +- p_sz_m_ovfl = config.get(config_service, "database_pool_sz_ovfl") +- p_sz, m_ovfl = p_sz_m_ovfl.split(",") +- except NoOptionError: +- p_sz = "5" +- m_ovfl = "10" +- + engine_args: Dict[str, Any] = {} + + url = config.get(config_service, "database_url") +@@ -79,6 +72,21 @@ class DBManager: + engine_args["connect_args"] = {"check_same_thread": False} + + if not url.count("sqlite:"): ++ # sqlite does not support setting pool size and max overflow, only ++ # read from the config when it is going to be used ++ try: ++ p_sz_m_ovfl = config.get(config_service, "database_pool_sz_ovfl") ++ p_sz, m_ovfl = p_sz_m_ovfl.split(",") ++ logger.info("database_pool_sz_ovfl is set, pool size = %s, max overflow = %s", p_sz, m_ovfl) ++ except NoOptionError: ++ p_sz = "5" ++ m_ovfl = "10" ++ logger.info( ++ "database_pool_sz_ovfl is not set, using default pool size = %s, max overflow = %s", ++ p_sz, ++ m_ovfl, ++ ) ++ + engine_args["pool_size"] = int(p_sz) + engine_args["max_overflow"] = int(m_ovfl) + engine_args["pool_pre_ping"] = True +diff --git a/keylime/models/base/persistable_model.py b/keylime/models/base/persistable_model.py +index 18f7d0d..a779f0b 100644 +--- a/keylime/models/base/persistable_model.py ++++ b/keylime/models/base/persistable_model.py +@@ -207,10 +207,16 @@ class PersistableModel(BasicModel, metaclass=PersistableModelMeta): + setattr(self._db_mapping_inst, name, field.data_type.db_dump(value, db_manager.engine.dialect)) + + with db_manager.session_context() as session: +- session.add(self._db_mapping_inst) ++ # Merge the potentially detached object into the new session ++ merged_instance = session.merge(self._db_mapping_inst) ++ session.add(merged_instance) ++ # Update our reference to the merged instance ++ self._db_mapping_inst = merged_instance # pylint: disable=attribute-defined-outside-init + + self.clear_changes() + + def delete(self) -> None: + with db_manager.session_context() as session: +- session.delete(self._db_mapping_inst) # type: ignore[no-untyped-call] ++ # Merge the potentially detached object into the new session before deleting ++ merged_instance = session.merge(self._db_mapping_inst) ++ session.delete(merged_instance) # type: ignore[no-untyped-call] +diff --git a/packit-ci.fmf b/packit-ci.fmf +index 2d1e5e5..cb64faf 100644 +--- a/packit-ci.fmf ++++ b/packit-ci.fmf +@@ -101,6 +101,7 @@ adjust: + - /regression/CVE-2023-3674 + - /regression/issue-1380-agent-removed-and-re-added + - /regression/keylime-agent-option-override-through-envvar ++ - /regression/db-connection-leak-reproducer + - /sanity/keylime-secure_mount + - /sanity/opened-conf-files + - /upstream/run_keylime_tests +diff --git a/test/test_verifier_db.py b/test/test_verifier_db.py +index ad72fa6..aae8f8a 100644 +--- a/test/test_verifier_db.py ++++ b/test/test_verifier_db.py +@@ -172,3 +172,102 @@ class TestVerfierDB(unittest.TestCase): + + def tearDown(self): + self.session.close() ++ ++ def test_11_relationship_access_after_session_commit(self): ++ """Test that relationships can be accessed after session commits (DetachedInstanceError fix)""" ++ # This test reproduces the problematic pattern from cloud_verifier_tornado.py ++ # where objects are loaded with joinedload and then accessed after session closes ++ ++ # Create a new session manager and context (like in cloud_verifier_tornado.py) ++ session_manager = SessionManager() ++ ++ # First, load the agent with eager loading for relationships ++ stored_agent = None ++ with session_manager.session_context(self.engine) as session: ++ stored_agent = ( ++ session.query(VerfierMain) ++ .options(joinedload(VerfierMain.ima_policy)) ++ .options(joinedload(VerfierMain.mb_policy)) ++ .filter_by(agent_id=agent_id) ++ .first() ++ ) ++ # Verify agent was loaded correctly ++ self.assertIsNotNone(stored_agent) ++ # session.commit() is automatically called by context manager when exiting ++ ++ # Now verify we can access relationships AFTER the session has been closed ++ # This would previously trigger DetachedInstanceError ++ ++ # Ensure stored_agent is not None before proceeding ++ assert stored_agent is not None ++ ++ # Test accessing ima_policy relationship ++ self.assertIsNotNone(stored_agent.ima_policy) ++ assert stored_agent.ima_policy is not None # Type narrowing for linter ++ self.assertEqual(stored_agent.ima_policy.name, "test-allowlist") ++ # checksum is not set in test data ++ self.assertEqual(stored_agent.ima_policy.checksum, None) ++ ++ # Test accessing the ima_policy.ima_policy attribute (similar to verifier_read_policy_from_cache) ++ ima_policy_content = stored_agent.ima_policy.ima_policy ++ self.assertEqual(ima_policy_content, test_allowlist_data["ima_policy"]) ++ ++ # Test accessing mb_policy relationship ++ self.assertIsNotNone(stored_agent.mb_policy) ++ assert stored_agent.mb_policy is not None # Type narrowing for linter ++ self.assertEqual(stored_agent.mb_policy.name, "test-mbpolicy") ++ ++ # Test accessing the mb_policy.mb_policy attribute (similar to process_agent function) ++ mb_policy_content = stored_agent.mb_policy.mb_policy ++ self.assertEqual(mb_policy_content, test_mbpolicy_data["mb_policy"]) ++ ++ # Test that we can access these relationships multiple times without issues ++ for _ in range(3): ++ self.assertIsNotNone(stored_agent.ima_policy.ima_policy) ++ self.assertIsNotNone(stored_agent.mb_policy.mb_policy) ++ ++ def test_12_persistable_model_cross_session_fix(self): ++ """Test that PersistableModel can handle cross-session operations safely""" ++ # This test would previously fail with DetachedInstanceError before the fix ++ # Note: This is a conceptual test since we don't have actual PersistableModel ++ # subclasses in the test environment, but demonstrates the pattern ++ ++ # Simulate creating a SQLAlchemy object in one session ++ session_manager = SessionManager() ++ ++ # Load an object in one session context ++ test_agent = None ++ with session_manager.session_context(self.engine) as session: ++ test_agent = session.query(VerfierMain).filter_by(agent_id=agent_id).first() ++ self.assertIsNotNone(test_agent) ++ # Session closes here ++ ++ # Ensure test_agent is not None before proceeding ++ assert test_agent is not None ++ ++ # Now simulate using this object in a different session context ++ # This tests the pattern where PersistableModel would use session.add() or session.delete() ++ # on a cross-session object ++ with session_manager.session_context(self.engine) as session: ++ # Before the fix, this would cause DetachedInstanceError ++ # The fix uses session.merge() to handle detached objects safely ++ merged_agent = session.merge(test_agent) ++ assert merged_agent is not None # Type narrowing for linter ++ ++ # Test that we can modify and save the merged object ++ original_port = merged_agent.port ++ # Use setattr to avoid linter issues with Column assignment ++ setattr(merged_agent, "port", 9999) ++ session.add(merged_agent) ++ # session.commit() called automatically by context manager ++ ++ # Verify the change was persisted ++ with session_manager.session_context(self.engine) as session: ++ updated_agent = session.query(VerfierMain).filter_by(agent_id=agent_id).first() ++ assert updated_agent is not None # Type narrowing for linter ++ self.assertEqual(updated_agent.port, 9999) ++ ++ # Restore original value ++ # Use setattr to avoid linter issues ++ setattr(updated_agent, "port", original_port) ++ session.add(updated_agent) diff --git a/SOURCES/0008-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch b/SOURCES/0008-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch new file mode 100644 index 0000000..80fdde9 --- /dev/null +++ b/SOURCES/0008-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch @@ -0,0 +1,29 @@ +From d14e0a132cfedd081bffa7a990b9401d5e257cac Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Fri, 8 Aug 2025 16:40:01 +0100 +Subject: [PATCH 8/9] mb: support EV_EFI_HANDOFF_TABLES events on PCR1 + +Allow EV_EFI_HANDOFF_TABLES events on PCR1 alongside the existing +EV_EFI_HANDOFF_TABLES2 support to handle different firmware +implementations, in the example policy. + +Signed-off-by: Sergio Correia +--- + keylime/mba/elchecking/example.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/keylime/mba/elchecking/example.py b/keylime/mba/elchecking/example.py +index 2c6f699..a3d918a 100644 +--- a/keylime/mba/elchecking/example.py ++++ b/keylime/mba/elchecking/example.py +@@ -185,6 +185,7 @@ class Example(policies.Policy): + # We only expect one EV_NO_ACTION event at the start. + dispatcher.set((0, "EV_NO_ACTION"), tests.OnceTest(tests.AcceptAll())) + dispatcher.set((1, "EV_CPU_MICROCODE"), tests.OnceTest(tests.AcceptAll())) ++ dispatcher.set((1, "EV_EFI_HANDOFF_TABLES"), tests.OnceTest(tests.AcceptAll())) + dispatcher.set((1, "EV_EFI_HANDOFF_TABLES2"), tests.OnceTest(tests.AcceptAll())) + dispatcher.set((0, "EV_S_CRTM_VERSION"), events_final.get("s_crtms")) + dispatcher.set((0, "EV_EFI_PLATFORM_FIRMWARE_BLOB"), events_final.get("platform_firmware_blobs")) +-- +2.47.3 + diff --git a/SOURCES/0008-verifier-should-read-parameters-from-verifier.conf-o.patch b/SOURCES/0008-verifier-should-read-parameters-from-verifier.conf-o.patch deleted file mode 100644 index efb3a2c..0000000 --- a/SOURCES/0008-verifier-should-read-parameters-from-verifier.conf-o.patch +++ /dev/null @@ -1,31 +0,0 @@ -From aa891f456d5cf0fc23e16d87fb28efc79a0d8073 Mon Sep 17 00:00:00 2001 -From: Marcio Silva -Date: Wed, 23 Aug 2023 11:24:59 -0300 -Subject: [PATCH 8/8] verifier: should read parameters from verifier.conf only - -Single-line fix for #1446 - -The verifier should read "durable attestation" backend imports from -verifier.conf (and NOT from registrar.conf) - -Signed-off-by: Marcio Silva ---- - keylime/cloud_verifier_tornado.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py -index d65cb63..261022a 100644 ---- a/keylime/cloud_verifier_tornado.py -+++ b/keylime/cloud_verifier_tornado.py -@@ -51,7 +51,7 @@ except SQLAlchemyError as err: - sys.exit(1) - - try: -- rmc = record.get_record_mgt_class(config.get("registrar", "durable_attestation_import", fallback="")) -+ rmc = record.get_record_mgt_class(config.get("verifier", "durable_attestation_import", fallback="")) - if rmc: - rmc = rmc("verifier") - except record.RecordManagementException as rme: --- -2.41.0 - diff --git a/SOURCES/0009-CVE-2023-38201.patch b/SOURCES/0009-CVE-2023-38201.patch deleted file mode 100644 index cd1847d..0000000 --- a/SOURCES/0009-CVE-2023-38201.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 9e5ac9f25cd400b16d5969f531cee28290543f2a Mon Sep 17 00:00:00 2001 -From: Marcio Silva -Date: Wed, 12 Jul 2023 12:05:47 -0300 -Subject: [PATCH] Fix for CVE-2023-38201 (Security Advisory - GHSA-f4r5-q63f-gcww) - -In addition to remove the offending message, this patch also ensures -deletion of an agent's record from the database in case of failure after -a single attempt. - -Signed-off-by: Marcio Silva ---- - keylime/registrar_common.py | 15 +++++++++++++-- - 1 file changed, 13 insertions(+), 2 deletions(-) - -diff --git a/keylime/registrar_common.py b/keylime/registrar_common.py -index 1fd97cd0c..7f15ae430 100644 ---- a/keylime/registrar_common.py -+++ b/keylime/registrar_common.py -@@ -250,7 +250,9 @@ def get_network_params( - try: - port = int(port) - if port < 1 or port > 65535: -- logger.warning("Contact port for agent %s is not a number between 1 and got: %s.", agent_id, port) -+ logger.warning( -+ "Contact port for agent %s is not a number between 1 and 65535 got: %s.", agent_id, port -+ ) - port = None - except ValueError: - logger.warning("Contact port for agent %s is not a valid number got: %s.", agent_id, port) -@@ -447,7 +449,16 @@ def do_PUT(self) -> None: - logger.error("SQLAlchemy Error: %s", e) - raise - else: -- raise Exception(f"Auth tag {auth_tag} does not match expected value {ex_mac}") -+ if agent_id and session.query(RegistrarMain).filter_by(agent_id=agent_id).delete(): -+ try: -+ session.commit() -+ except SQLAlchemyError as e: -+ logger.error("SQLAlchemy Error: %s", e) -+ raise -+ -+ raise Exception( -+ f"Auth tag {auth_tag} for agent {agent_id} does not match expected value. The agent has been deleted from database, and a restart of it will be required" -+ ) - - web_util.echo_json_response(self, 200, "Success") - logger.info("PUT activated: %s", agent_id) diff --git a/SOURCES/0009-mb-support-vendor_db-as-logged-by-newer-shim-version.patch b/SOURCES/0009-mb-support-vendor_db-as-logged-by-newer-shim-version.patch new file mode 100644 index 0000000..bcc024e --- /dev/null +++ b/SOURCES/0009-mb-support-vendor_db-as-logged-by-newer-shim-version.patch @@ -0,0 +1,356 @@ +From 607b97ac8d414cb57b1ca89925631d41bd7ac04c Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Fri, 8 Aug 2025 16:41:54 +0100 +Subject: [PATCH 9/9] mb: support vendor_db as logged by newer shim versions + +- Updated example policy to properly handle different event structures + for vendor_db validation: + - KeySubsetMulti for EV_EFI_VARIABLE_DRIVER_CONFIG (has SignatureType field) + - SignatureSetMember for EV_EFI_VARIABLE_AUTHORITY (direct signature format) + +- Added method to extract vendor_db from EV_EFI_VARIABLE_AUTHORITY events + in reference state generation (keylime-policy create measured-boot and + the legacy create_mb_refstate script) +- Made vendor_db optional for backward compatibility + +This fixes attestation failures when vendor_db variables are present but +missing from reference states or validated with incorrect test types. + +See: https://github.com/rhboot/shim/pull/728 +Signed-off-by: Sergio Correia +--- + keylime/mba/elchecking/example.py | 45 +++++++++ + keylime/policy/create_mb_policy.py | 30 ++++++ + scripts/create_mb_refstate | 30 ++++++ + test/test_create_mb_policy.py | 142 +++++++++++++++++++++++++++++ + 4 files changed, 247 insertions(+) + +diff --git a/keylime/mba/elchecking/example.py b/keylime/mba/elchecking/example.py +index a3d918a..5a933ac 100644 +--- a/keylime/mba/elchecking/example.py ++++ b/keylime/mba/elchecking/example.py +@@ -21,6 +21,7 @@ from . import policies, tests + # kek - list of allowed KEK keys + # db - list of allowed db keys + # dbx - list of required dbx keys ++# vendor_db - list of allowed vendor_db keys (optional, for newer shim versions) + # mokdig - list of allowed digests of MoKList (PCR 14 EV_IPL) + # mokxdig - list of allowed digests of MoKListX (PCR 14 EV_IPL) + # kernels - list of allowed { +@@ -121,6 +122,10 @@ class Example(policies.Policy): + if req not in refstate: + raise Exception(f"refstate lacks {req}") + ++ # vendor_db is optional for backward compatibility ++ if "vendor_db" not in refstate: ++ refstate["vendor_db"] = [] ++ + dispatcher = tests.Dispatcher(("PCRIndex", "EventType")) + vd_driver_config = tests.VariableDispatch() + vd_authority = tests.VariableDispatch() +@@ -268,6 +273,34 @@ class Example(policies.Policy): + "db", + db_test, + ) ++ # Support vendor_db as logged by newer shim versions ++ # See: https://github.com/rhboot/shim/pull/728 ++ if not has_secureboot and not refstate["vendor_db"]: ++ vendor_db_test = tests.OnceTest(tests.AcceptAll()) ++ else: ++ vendor_db_test = tests.OnceTest( ++ tests.Or( ++ tests.KeySubsetMulti( ++ ["a159c0a5-e494-a74a-87b5-ab155c2bf072", "2616c4c1-4c50-9240-aca9-41f936934328"], ++ sigs_strip0x(refstate["vendor_db"]), ++ ), ++ tests.KeySubsetMulti( ++ ["a5c059a1-94e4-4aa7-87b5-ab155c2bf072", "c1c41626-504c-4092-aca9-41f936934328"], ++ sigs_strip0x(refstate["vendor_db"]), ++ ), ++ ) ++ ) ++ ++ vd_driver_config.set( ++ "cbb219d7-3a3d-9645-a3bc-dad00e67656f", ++ "vendor_db", ++ vendor_db_test, ++ ) ++ vd_driver_config.set( ++ "d719b2cb-3d3a-4596-a3bc-dad00e67656f", ++ "vendor_db", ++ vendor_db_test, ++ ) + + if not has_secureboot and not refstate["dbx"]: + dbx_test = tests.OnceTest(tests.AcceptAll()) +@@ -295,6 +328,18 @@ class Example(policies.Policy): + vd_db_test = tests.OnceTest(tests.AcceptAll()) + vd_authority.set("cbb219d7-3a3d-9645-a3bc-dad00e67656f", "db", vd_db_test) + vd_authority.set("d719b2cb-3d3a-4596-a3bc-dad00e67656f", "db", vd_db_test) ++ # Support vendor_db as logged by newer shim versions in EV_EFI_VARIABLE_AUTHORITY events ++ # See: https://github.com/rhboot/shim/pull/728 ++ # EV_EFI_VARIABLE_AUTHORITY events have different structure than EV_EFI_VARIABLE_DRIVER_CONFIG ++ # They contain direct signature data without SignatureType field ++ if not has_secureboot and not refstate["vendor_db"]: ++ vendor_db_authority_test = tests.OnceTest(tests.AcceptAll()) ++ else: ++ vendor_db_authority_test = tests.OnceTest( ++ tests.IterateTest(tests.SignatureSetMember(sigs_strip0x(refstate["vendor_db"]))) ++ ) ++ vd_authority.set("cbb219d7-3a3d-9645-a3bc-dad00e67656f", "vendor_db", vendor_db_authority_test) ++ vd_authority.set("d719b2cb-3d3a-4596-a3bc-dad00e67656f", "vendor_db", vendor_db_authority_test) + # Accept all SbatLevels of the Shim, because we already checked the hash of the Shim itself. + vd_sbat_level_test = tests.OnceTest(tests.AcceptAll()) + vd_authority.set("50ab5d60-46e0-0043-abb6-3dd810dd8b23", "SbatLevel", vd_sbat_level_test) +diff --git a/keylime/policy/create_mb_policy.py b/keylime/policy/create_mb_policy.py +index 859e652..b2b48f7 100644 +--- a/keylime/policy/create_mb_policy.py ++++ b/keylime/policy/create_mb_policy.py +@@ -93,6 +93,35 @@ def get_keys(events: List[Dict[str, Any]]) -> Dict[str, List[Any]]: + return out + + ++def get_vendor_db(events: List[Dict[str, Any]]) -> Dict[str, List[Any]]: ++ """Get vendor_db signatures from EV_EFI_VARIABLE_AUTHORITY events.""" ++ out: Dict[str, List[Any]] = {"vendor_db": []} ++ ++ for event in events: ++ if "EventType" not in event: ++ continue ++ if event["EventType"] != "EV_EFI_VARIABLE_AUTHORITY": ++ continue ++ if "Event" not in event or "UnicodeName" not in event["Event"]: ++ continue ++ ++ event_name = event["Event"]["UnicodeName"].lower() ++ if event_name == "vendor_db": ++ data = None ++ if "VariableData" in event["Event"]: ++ data = event["Event"]["VariableData"] ++ ++ if data is not None: ++ # VariableData for EV_EFI_VARIABLE_AUTHORITY is a list of signatures ++ for entry in data: ++ if "SignatureOwner" in entry and "SignatureData" in entry: ++ out["vendor_db"].append( ++ {"SignatureOwner": entry["SignatureOwner"], "SignatureData": f"0x{entry['SignatureData']}"} ++ ) ++ ++ return out ++ ++ + def get_kernel(events: List[Dict[str, Any]], secure_boot: bool) -> Dict[str, List[Dict[str, Any]]]: + """Extract digest for Shim, Grub, Linux Kernel and initrd.""" + out = [] +@@ -259,6 +288,7 @@ def create_mb_refstate(args: argparse.Namespace) -> Optional[Dict[str, object]]: + } + ], + **get_keys(events), ++ **get_vendor_db(events), + **get_mok(events), + **get_kernel(events, has_secureboot), + } +diff --git a/scripts/create_mb_refstate b/scripts/create_mb_refstate +index 23cafb9..c98e61d 100755 +--- a/scripts/create_mb_refstate ++++ b/scripts/create_mb_refstate +@@ -78,6 +78,35 @@ def get_keys(events): + return out + + ++def get_vendor_db(events): ++ """Get vendor_db signatures from EV_EFI_VARIABLE_AUTHORITY events.""" ++ out = {"vendor_db": []} ++ ++ for event in events: ++ if "EventType" not in event: ++ continue ++ if event["EventType"] != "EV_EFI_VARIABLE_AUTHORITY": ++ continue ++ if "Event" not in event or "UnicodeName" not in event["Event"]: ++ continue ++ ++ event_name = event["Event"]["UnicodeName"].lower() ++ if event_name == "vendor_db": ++ data = None ++ if "VariableData" in event["Event"]: ++ data = event["Event"]["VariableData"] ++ ++ if data is not None: ++ # VariableData for EV_EFI_VARIABLE_AUTHORITY is a list of signatures ++ for entry in data: ++ if "SignatureOwner" in entry and "SignatureData" in entry: ++ out["vendor_db"].append( ++ {"SignatureOwner": entry["SignatureOwner"], "SignatureData": f"0x{entry['SignatureData']}"} ++ ) ++ ++ return out ++ ++ + def get_kernel(events, secure_boot): + """ + Extract digest for Shim, Grub, Linux Kernel and initrd. +@@ -197,6 +226,7 @@ def main(): + } + ], + **get_keys(events), ++ **get_vendor_db(events), + **get_mok(events), + **get_kernel(events, has_secureboot), + } +diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py +index b00d8e7..cd32bda 100644 +--- a/test/test_create_mb_policy.py ++++ b/test/test_create_mb_policy.py +@@ -364,6 +364,148 @@ class CreateMeasuredBootPolicy_Test(unittest.TestCase): + for c in test_cases: + self.assertDictEqual(create_mb_policy.get_mok(c["events"]), c["expected"]) + ++ def test_get_vendor_db(self): ++ test_cases = [ ++ {"events": [], "expected": {"vendor_db": []}}, ++ # No EV_EFI_VARIABLE_AUTHORITY events. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Event": {"UnicodeName": "vendor_db", "VariableData": []}, ++ } ++ ], ++ "expected": {"vendor_db": []}, ++ }, ++ # Good vendor_db event with EV_EFI_VARIABLE_AUTHORITY. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_AUTHORITY", ++ "Event": { ++ "UnicodeName": "vendor_db", ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": { ++ "vendor_db": [ ++ {"SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", "SignatureData": "0xsig-data-1"} ++ ] ++ }, ++ }, ++ # Multiple vendor_db signatures. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_AUTHORITY", ++ "Event": { ++ "UnicodeName": "vendor_db", ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ }, ++ { ++ "SignatureOwner": "77fa9abd-0359-4d32-bd60-28f4e78f784b", ++ "SignatureData": "sig-data-2", ++ }, ++ ], ++ }, ++ } ++ ], ++ "expected": { ++ "vendor_db": [ ++ {"SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", "SignatureData": "0xsig-data-1"}, ++ {"SignatureOwner": "77fa9abd-0359-4d32-bd60-28f4e78f784b", "SignatureData": "0xsig-data-2"}, ++ ] ++ }, ++ }, ++ # Missing EventType. ++ { ++ "events": [ ++ { ++ "Event": { ++ "UnicodeName": "vendor_db", ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ } ++ ], ++ } ++ } ++ ], ++ "expected": {"vendor_db": []}, ++ }, ++ # Wrong EventType. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG", ++ "Event": { ++ "UnicodeName": "vendor_db", ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": {"vendor_db": []}, ++ }, ++ # Missing Event. ++ { ++ "events": [{"EventType": "EV_EFI_VARIABLE_AUTHORITY"}], ++ "expected": {"vendor_db": []}, ++ }, ++ # Missing UnicodeName. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_AUTHORITY", ++ "Event": { ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ } ++ ] ++ }, ++ } ++ ], ++ "expected": {"vendor_db": []}, ++ }, ++ # Wrong UnicodeName. ++ { ++ "events": [ ++ { ++ "EventType": "EV_EFI_VARIABLE_AUTHORITY", ++ "Event": { ++ "UnicodeName": "db", ++ "VariableData": [ ++ { ++ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", ++ "SignatureData": "sig-data-1", ++ } ++ ], ++ }, ++ } ++ ], ++ "expected": {"vendor_db": []}, ++ }, ++ ] ++ ++ for c in test_cases: ++ self.assertDictEqual(create_mb_policy.get_vendor_db(c["events"]), c["expected"]) ++ + def test_get_kernel(self): + test_cases = [ + {"events": [], "secureboot": False, "expected": {}}, +-- +2.47.3 + diff --git a/SOURCES/0010-CVE-2023-38200.patch b/SOURCES/0010-CVE-2023-38200.patch deleted file mode 100644 index 7c06151..0000000 --- a/SOURCES/0010-CVE-2023-38200.patch +++ /dev/null @@ -1,69 +0,0 @@ -From e17d5a6a47c1405a799a06754d3e905856e3035d Mon Sep 17 00:00:00 2001 -From: florian <264356+flozilla@users.noreply.github.com> -Date: Tue, 11 Jul 2023 21:31:27 +0200 -Subject: [PATCH 10/10] CVE-2023-38200 - -Extend Registrar SSL socket to be non-blocking - -Fixes: CVE-2023-38200 - -Upstream: - - https://github.com/keylime/keylime/commit/c68d8f0b7 - - https://github.com/keylime/keylime/commit/27d515f4b ---- - keylime/registrar_common.py | 23 ++++++++++++++++++++++- - 1 file changed, 22 insertions(+), 1 deletion(-) - -diff --git a/keylime/registrar_common.py b/keylime/registrar_common.py -index d1d20dd..6441e3b 100644 ---- a/keylime/registrar_common.py -+++ b/keylime/registrar_common.py -@@ -2,8 +2,10 @@ import base64 - import http.server - import ipaddress - import os -+import select - import signal - import socket -+import ssl - import sys - import threading - from http.server import BaseHTTPRequestHandler, HTTPServer -@@ -77,6 +79,25 @@ class BaseHandler(BaseHTTPRequestHandler, SessionManager): - - - class ProtectedHandler(BaseHandler): -+ def handle(self) -> None: -+ """Need to perform SSL handshake here, as -+ do_handshake_on_connect=False for non-blocking SSL socket""" -+ while True: -+ try: -+ self.request.do_handshake() -+ break -+ except ssl.SSLWantReadError: -+ select.select([self.request], [], []) -+ except ssl.SSLWantWriteError: -+ select.select([], [self.request], []) -+ except ssl.SSLError as e: -+ logger.error("SSL connection error: %s", e) -+ return -+ except Exception as e: -+ logger.error("General communication failure: %s", e) -+ return -+ BaseHTTPRequestHandler.handle(self) -+ - def do_HEAD(self) -> None: - """HEAD not supported""" - web_util.echo_json_response(self, 405, "HEAD not supported") -@@ -494,7 +515,7 @@ def start(host: str, tlsport: int, port: int) -> None: - protected_server = RegistrarServer((host, tlsport), ProtectedHandler) - context = web_util.init_mtls("registrar", logger=logger) - if context is not None: -- protected_server.socket = context.wrap_socket(protected_server.socket, server_side=True) -+ protected_server.socket = context.wrap_socket(protected_server.socket, server_side=True, do_handshake_on_connect=False) - thread_protected_server = threading.Thread(target=protected_server.serve_forever) - - # Set up the unprotected registrar server --- -2.41.0 - diff --git a/SOURCES/0010-verifier-Gracefully-shutdown-on-signal.patch b/SOURCES/0010-verifier-Gracefully-shutdown-on-signal.patch new file mode 100644 index 0000000..df7bb23 --- /dev/null +++ b/SOURCES/0010-verifier-Gracefully-shutdown-on-signal.patch @@ -0,0 +1,42 @@ +From 1b7191098ca3f6d72c6ad218564ae0938a87efd4 Mon Sep 17 00:00:00 2001 +From: Anderson Toshiyuki Sasaki +Date: Mon, 18 Aug 2025 12:22:55 +0000 +Subject: [PATCH 10/13] verifier: Gracefully shutdown on signal + +Wait for the processes to finish when interrupted by a signal. Do not +call exit(0) in the signal handler. + +Assisted-by: Claude 4 Sonnet +Signed-off-by: Anderson Toshiyuki Sasaki +--- + keylime/cloud_verifier_tornado.py | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py +index 7553ac8..7065661 100644 +--- a/keylime/cloud_verifier_tornado.py ++++ b/keylime/cloud_verifier_tornado.py +@@ -2138,7 +2138,7 @@ def main() -> None: + revocation_notifier.stop_broker() + for p in processes: + p.join() +- sys.exit(0) ++ # Do not call sys.exit(0) here as it interferes with multiprocessing cleanup + + signal.signal(signal.SIGINT, sig_handler) + signal.signal(signal.SIGTERM, sig_handler) +@@ -2159,3 +2159,11 @@ def main() -> None: + process = Process(target=server_process, args=(task_id, active_agents)) + process.start() + processes.append(process) ++ ++ # Wait for all worker processes to complete ++ try: ++ for p in processes: ++ p.join() ++ except KeyboardInterrupt: ++ # Signal handler will take care of cleanup ++ pass +-- +2.47.3 + diff --git a/SOURCES/0011-Automatically-update-agent-API-version.patch b/SOURCES/0011-Automatically-update-agent-API-version.patch deleted file mode 100644 index c87b309..0000000 --- a/SOURCES/0011-Automatically-update-agent-API-version.patch +++ /dev/null @@ -1,244 +0,0 @@ -From b0cf69c9db20eb319ea2e90c22f500e09b704224 Mon Sep 17 00:00:00 2001 -From: Anderson Toshiyuki Sasaki -Date: Wed, 23 Aug 2023 16:24:15 +0200 -Subject: [PATCH] Implement automatic agent API version bump - -Automatically update the agent supported API version in the database if -the agent is updated and its API version is bumped. - -Previously, if an agent was added to a verifier while it used an old API -version, and then it is updated with an API version bump, the -attestation would fail as the verifier would try to reach the agent -using the old API version. - -Fixes #1457 - -Signed-off-by: Anderson Toshiyuki Sasaki ---- - keylime/cloud_verifier_tornado.py | 185 +++++++++++++++++++++++++++--- - 1 file changed, 167 insertions(+), 18 deletions(-) - -diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py -index 261022ac6..31e6f7159 100644 ---- a/keylime/cloud_verifier_tornado.py -+++ b/keylime/cloud_verifier_tornado.py -@@ -32,6 +32,7 @@ - ) - from keylime.agentstates import AgentAttestState, AgentAttestStates - from keylime.common import retry, states, validators -+from keylime.common.version import str_to_version - from keylime.da import record - from keylime.db.keylime_db import DBEngineManager, SessionManager - from keylime.db.verifier_db import VerfierMain, VerifierAllowlist -@@ -998,6 +999,80 @@ def data_received(self, chunk: Any) -> None: - raise NotImplementedError() - - -+async def update_agent_api_version(agent: Dict[str, Any], timeout: float = 60.0) -> Union[Dict[str, Any], None]: -+ agent_id = agent["agent_id"] -+ -+ logger.info("Agent %s API version bump detected, trying to update stored API version", agent_id) -+ kwargs = {} -+ if agent["ssl_context"]: -+ kwargs["context"] = agent["ssl_context"] -+ -+ res = tornado_requests.request( -+ "GET", -+ f"http://{agent['ip']}:{agent['port']}/version", -+ **kwargs, -+ timeout=timeout, -+ ) -+ response = await res -+ -+ if response.status_code != 200: -+ logger.warning( -+ "Could not get agent %s supported API version, Error: %s", -+ agent["agent_id"], -+ response.status_code, -+ ) -+ return None -+ -+ try: -+ json_response = json.loads(response.body) -+ new_version = json_response["results"]["supported_version"] -+ old_version = agent["supported_version"] -+ -+ # Only update the API version to use if it is supported by the verifier -+ if new_version in keylime_api_version.all_versions(): -+ new_version_tuple = str_to_version(new_version) -+ old_version_tuple = str_to_version(old_version) -+ -+ assert new_version_tuple, f"Agent {agent_id} version {new_version} is invalid" -+ assert old_version_tuple, f"Agent {agent_id} version {old_version} is invalid" -+ -+ # Check that the new version is greater than current version -+ if new_version_tuple <= old_version_tuple: -+ logger.warning( -+ "Agent %s API version %s is lower or equal to previous version %s", -+ agent_id, -+ new_version, -+ old_version, -+ ) -+ return None -+ -+ logger.info("Agent %s new API version %s is supported", agent_id, new_version) -+ session = get_session() -+ agent["supported_version"] = new_version -+ -+ # Remove keys that should not go to the DB -+ agent_db = dict(agent) -+ for key in exclude_db: -+ if key in agent_db: -+ del agent_db[key] -+ -+ session.query(VerfierMain).filter_by(agent_id=agent_id).update(agent_db) # pyright: ignore -+ session.commit() -+ else: -+ logger.warning("Agent %s new API version %s is not supported", agent_id, new_version) -+ return None -+ -+ except SQLAlchemyError as e: -+ logger.error("SQLAlchemy Error updating API version for agent %s: %s", agent_id, e) -+ return None -+ except Exception as e: -+ logger.exception(e) -+ return None -+ -+ logger.info("Agent %s API version updated to %s", agent["agent_id"], agent["supported_version"]) -+ return agent -+ -+ - async def invoke_get_quote( - agent: Dict[str, Any], runtime_policy: str, need_pubkey: bool, timeout: float = 60.0 - ) -> None: -@@ -1028,15 +1103,43 @@ async def invoke_get_quote( - # this is a connection error, retry get quote - if response.status_code in [408, 500, 599]: - asyncio.ensure_future(process_agent(agent, states.GET_QUOTE_RETRY)) -- else: -- # catastrophic error, do not continue -- logger.critical( -- "Unexpected Get Quote response error for cloud agent %s, Error: %s", -- agent["agent_id"], -- response.status_code, -- ) -- failure.add_event("no_quote", "Unexpected Get Quote reponse from agent", False) -- asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ if response.status_code == 400: -+ try: -+ json_response = json.loads(response.body) -+ if "API version not supported" in json_response["status"]: -+ update = update_agent_api_version(agent) -+ updated = await update -+ -+ if updated: -+ asyncio.ensure_future(process_agent(updated, states.GET_QUOTE_RETRY)) -+ else: -+ logger.warning("Could not update stored agent %s API version", agent["agent_id"]) -+ failure.add_event( -+ "version_not_supported", -+ {"context": "Agent API version not supported", "data": json_response}, -+ False, -+ ) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ except Exception as e: -+ logger.exception(e) -+ failure.add_event( -+ "exception", {"context": "Agent caused the verifier to throw an exception", "data": str(e)}, False -+ ) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ # catastrophic error, do not continue -+ logger.critical( -+ "Unexpected Get Quote response error for cloud agent %s, Error: %s", -+ agent["agent_id"], -+ response.status_code, -+ ) -+ failure.add_event("no_quote", "Unexpected Get Quote reponse from agent", False) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) - else: - try: - json_response = json.loads(response.body) -@@ -1100,15 +1203,43 @@ async def invoke_provide_v(agent: Dict[str, Any], timeout: float = 60.0) -> None - if response.status_code != 200: - if response.status_code in [408, 500, 599]: - asyncio.ensure_future(process_agent(agent, states.PROVIDE_V_RETRY)) -- else: -- # catastrophic error, do not continue -- logger.critical( -- "Unexpected Provide V response error for cloud agent %s, Error: %s", -- agent["agent_id"], -- response.status_code, -- ) -- failure.add_event("no_v", {"message": "Unexpected provide V response", "data": response.status_code}, False) -- asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ if response.status_code == 400: -+ try: -+ json_response = json.loads(response.body) -+ if "API version not supported" in json_response["status"]: -+ update = update_agent_api_version(agent) -+ updated = await update -+ -+ if updated: -+ asyncio.ensure_future(process_agent(updated, states.PROVIDE_V_RETRY)) -+ else: -+ logger.warning("Could not update stored agent %s API version", agent["agent_id"]) -+ failure.add_event( -+ "version_not_supported", -+ {"context": "Agent API version not supported", "data": json_response}, -+ False, -+ ) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ except Exception as e: -+ logger.exception(e) -+ failure.add_event( -+ "exception", {"context": "Agent caused the verifier to throw an exception", "data": str(e)}, False -+ ) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) -+ return -+ -+ # catastrophic error, do not continue -+ logger.critical( -+ "Unexpected Provide V response error for cloud agent %s, Error: %s", -+ agent["agent_id"], -+ response.status_code, -+ ) -+ failure.add_event("no_v", {"message": "Unexpected provide V response", "data": response.status_code}, False) -+ asyncio.ensure_future(process_agent(agent, states.FAILED, failure)) - else: - asyncio.ensure_future(process_agent(agent, states.GET_QUOTE)) - -@@ -1134,6 +1265,24 @@ async def invoke_notify_error(agent: Dict[str, Any], tosend: Dict[str, Any], tim - agent["agent_id"], - ) - elif response.status_code != 200: -+ if response.status_code == 400: -+ try: -+ json_response = json.loads(response.body) -+ if "API version not supported" in json_response["status"]: -+ update = update_agent_api_version(agent) -+ updated = await update -+ -+ if updated: -+ asyncio.ensure_future(invoke_notify_error(updated, tosend)) -+ else: -+ logger.warning("Could not update stored agent %s API version", agent["agent_id"]) -+ -+ return -+ -+ except Exception as e: -+ logger.exception(e) -+ return -+ - logger.warning( - "Unexpected Notify Revocation response error for cloud agent %s, Error: %s", - agent["agent_id"], diff --git a/SOURCES/0011-revocations-Try-to-send-notifications-on-shutdown.patch b/SOURCES/0011-revocations-Try-to-send-notifications-on-shutdown.patch new file mode 100644 index 0000000..b36836e --- /dev/null +++ b/SOURCES/0011-revocations-Try-to-send-notifications-on-shutdown.patch @@ -0,0 +1,308 @@ +From af9ac50f5acf1a7d4ad285956b60e60c3c4416b7 Mon Sep 17 00:00:00 2001 +From: Anderson Toshiyuki Sasaki +Date: Wed, 23 Jul 2025 15:39:49 +0200 +Subject: [PATCH 11/13] revocations: Try to send notifications on shutdown + +During verifier shutdown, try to send any pending revocation +notification in a best-effort manner. In future, the pending revocation +notifications should be persisted to be processed during next startup. + +Assisted-by: Claude 4 Sonnet +Signed-off-by: Anderson Toshiyuki Sasaki +--- + keylime/cloud_verifier_tornado.py | 7 + + keylime/revocation_notifier.py | 239 ++++++++++++++++++++++-------- + 2 files changed, 184 insertions(+), 62 deletions(-) + +diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py +index 7065661..89aa703 100644 +--- a/keylime/cloud_verifier_tornado.py ++++ b/keylime/cloud_verifier_tornado.py +@@ -2109,6 +2109,10 @@ def main() -> None: + # Stop server to not accept new incoming connections + server.stop() + ++ # Gracefully shutdown webhook workers to prevent connection errors ++ if "webhook" in revocation_notifier.get_notifiers(): ++ revocation_notifier.shutdown_webhook_workers() ++ + # Wait for all connections to be closed and then stop ioloop + async def stop() -> None: + await server.close_all_connections() +@@ -2136,6 +2140,9 @@ def main() -> None: + def sig_handler(*_: Any) -> None: + if run_revocation_notifier: + revocation_notifier.stop_broker() ++ # Gracefully shutdown webhook workers to prevent connection errors ++ if "webhook" in revocation_notifier.get_notifiers(): ++ revocation_notifier.shutdown_webhook_workers() + for p in processes: + p.join() + # Do not call sys.exit(0) here as it interferes with multiprocessing cleanup +diff --git a/keylime/revocation_notifier.py b/keylime/revocation_notifier.py +index 5a7cc4b..c154028 100644 +--- a/keylime/revocation_notifier.py ++++ b/keylime/revocation_notifier.py +@@ -18,6 +18,174 @@ broker_proc: Optional[Process] = None + + _SOCKET_PATH = "/var/run/keylime/keylime.verifier.ipc" + ++# Global webhook manager instance (initialized when needed) ++_webhook_manager: Optional["WebhookNotificationManager"] = None ++ ++ ++class WebhookNotificationManager: ++ """Manages webhook worker threads and graceful shutdown for revocation notifications.""" ++ ++ def __init__(self) -> None: ++ self._shutdown_event = threading.Event() ++ self._workers: Set[threading.Thread] = set() ++ self._workers_lock = threading.Lock() ++ ++ def notify_webhook(self, tosend: Dict[str, Any]) -> None: ++ """Send webhook notification with worker thread management.""" ++ url = config.get("verifier", "webhook_url", section="revocations", fallback="") ++ # Check if a url was specified ++ if url == "": ++ return ++ ++ # Similarly to notify(), let's convert `tosend' to str to prevent ++ # possible issues with json handling by python-requests. ++ tosend = json.bytes_to_str(tosend) ++ ++ def worker_webhook(tosend: Dict[str, Any], url: str) -> None: ++ is_shutdown_mode = False ++ try: ++ interval = config.getfloat("verifier", "retry_interval") ++ exponential_backoff = config.getboolean("verifier", "exponential_backoff") ++ ++ max_retries = config.getint("verifier", "max_retries") ++ if max_retries <= 0: ++ logger.info("Invalid value found in 'max_retries' option for verifier, using default value") ++ max_retries = 5 ++ ++ # During shutdown, use fewer retries but still make best effort ++ if self._shutdown_event.is_set(): ++ is_shutdown_mode = True ++ max_retries = min(max_retries, 3) # Reduce retries during shutdown but still try ++ logger.info( ++ "Shutdown mode: attempting to send critical revocation notification with %d retries", ++ max_retries, ++ ) ++ ++ # Get TLS options from the configuration ++ (cert, key, trusted_ca, key_password), verify_server_cert = web_util.get_tls_options( ++ "verifier", is_client=True, logger=logger ++ ) ++ ++ # Generate the TLS context using the obtained options ++ tls_context = web_util.generate_tls_context( ++ cert, key, trusted_ca, key_password, is_client=True, logger=logger ++ ) ++ ++ logger.info("Sending revocation event via webhook to %s ...", url) ++ for i in range(max_retries): ++ next_retry = retry.retry_time(exponential_backoff, interval, i, logger) ++ ++ with RequestsClient( ++ url, ++ verify_server_cert, ++ tls_context, ++ ) as client: ++ try: ++ res = client.post("", json=tosend, timeout=5) ++ except requests.exceptions.SSLError as ssl_error: ++ if "TLSV1_ALERT_UNKNOWN_CA" in str(ssl_error): ++ logger.warning( ++ "Keylime does not recognize certificate from peer. Check if verifier 'trusted_server_ca' is configured correctly" ++ ) ++ ++ raise ssl_error from ssl_error ++ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: ++ # During shutdown, only suppress errors on the final attempt after all retries exhausted ++ if is_shutdown_mode and i == max_retries - 1: ++ logger.warning( ++ "Final attempt to send revocation notification failed during shutdown: %s", e ++ ) ++ return ++ # Otherwise, let the retry logic handle it ++ raise e ++ ++ if res and res.status_code in [200, 202]: ++ if is_shutdown_mode: ++ logger.info("Successfully sent revocation notification during shutdown") ++ break ++ ++ logger.debug( ++ "Unable to publish revocation message %d times via webhook, " ++ "trying again in %d seconds. " ++ "Server returned status code: %s", ++ i + 1, ++ next_retry, ++ res.status_code, ++ ) ++ ++ # During shutdown, use shorter retry intervals to complete faster ++ if is_shutdown_mode: ++ next_retry = min(next_retry, 2.0) # Cap retry interval during shutdown ++ ++ time.sleep(next_retry) ++ ++ except Exception as e: ++ # Only suppress errors during final shutdown phase and log appropriately ++ if is_shutdown_mode: ++ logger.warning("Failed to send revocation notification during shutdown: %s", e) ++ else: ++ logger.error("Error in webhook worker: %s", e) ++ finally: ++ # Remove this worker from the active set ++ current_thread = threading.current_thread() ++ with self._workers_lock: ++ self._workers.discard(current_thread) ++ ++ w = functools.partial(worker_webhook, tosend, url) ++ t = threading.Thread(target=w, daemon=True) ++ ++ # Add this worker to the active set ++ with self._workers_lock: ++ self._workers.add(t) ++ ++ t.start() ++ ++ def shutdown_workers(self) -> None: ++ """Signal webhook workers to shut down gracefully and wait for them to complete. ++ ++ This gives workers time to complete their critical revocation notifications ++ before the service shuts down completely. ++ """ ++ logger.info("Shutting down webhook workers gracefully...") ++ self._shutdown_event.set() ++ ++ # Give workers generous time to complete critical revocation notifications ++ timeout = 30.0 # Increased timeout for critical security notifications ++ end_time = time.time() + timeout ++ ++ with self._workers_lock: ++ workers_to_wait = list(self._workers) ++ ++ if workers_to_wait: ++ logger.info("Waiting for %d webhook workers to complete revocation notifications...", len(workers_to_wait)) ++ ++ for worker in workers_to_wait: ++ remaining_time = max(0, end_time - time.time()) ++ if remaining_time > 0: ++ logger.debug( ++ "Waiting for webhook worker %s to complete (timeout: %.1f seconds)", worker.name, remaining_time ++ ) ++ worker.join(timeout=remaining_time) ++ if worker.is_alive(): ++ logger.warning("Webhook worker %s did not complete within timeout", worker.name) ++ else: ++ logger.warning("Timeout exceeded while waiting for webhook workers") ++ break ++ ++ # Clean up completed workers ++ with self._workers_lock: ++ self._workers.clear() ++ ++ logger.info("Webhook workers shutdown complete") ++ ++ ++def _get_webhook_manager() -> WebhookNotificationManager: ++ """Get the global webhook manager instance, creating it if needed.""" ++ global _webhook_manager ++ if _webhook_manager is None: ++ _webhook_manager = WebhookNotificationManager() ++ return _webhook_manager ++ + + # return the revocation notification methods for cloud verifier + def get_notifiers() -> Set[str]: +@@ -83,6 +251,12 @@ def stop_broker() -> None: + broker_proc.kill() # pylint: disable=E1101 + + ++def shutdown_webhook_workers() -> None: ++ """Convenience function to shutdown webhook workers using the global manager.""" ++ manager = _get_webhook_manager() ++ manager.shutdown_workers() ++ ++ + def notify(tosend: Dict[str, Any]) -> None: + assert "zeromq" in get_notifiers() + try: +@@ -127,68 +301,9 @@ def notify(tosend: Dict[str, Any]) -> None: + + + def notify_webhook(tosend: Dict[str, Any]) -> None: +- url = config.get("verifier", "webhook_url", section="revocations", fallback="") +- # Check if a url was specified +- if url == "": +- return +- +- # Similarly to notify(), let's convert `tosend' to str to prevent +- # possible issues with json handling by python-requests. +- tosend = json.bytes_to_str(tosend) +- +- def worker_webhook(tosend: Dict[str, Any], url: str) -> None: +- interval = config.getfloat("verifier", "retry_interval") +- exponential_backoff = config.getboolean("verifier", "exponential_backoff") +- +- max_retries = config.getint("verifier", "max_retries") +- if max_retries <= 0: +- logger.info("Invalid value found in 'max_retries' option for verifier, using default value") +- max_retries = 5 +- +- # Get TLS options from the configuration +- (cert, key, trusted_ca, key_password), verify_server_cert = web_util.get_tls_options( +- "verifier", is_client=True, logger=logger +- ) +- +- # Generate the TLS context using the obtained options +- tls_context = web_util.generate_tls_context(cert, key, trusted_ca, key_password, is_client=True, logger=logger) +- +- logger.info("Sending revocation event via webhook to %s ...", url) +- for i in range(max_retries): +- next_retry = retry.retry_time(exponential_backoff, interval, i, logger) +- +- with RequestsClient( +- url, +- verify_server_cert, +- tls_context, +- ) as client: +- try: +- res = client.post("", json=tosend, timeout=5) +- except requests.exceptions.SSLError as ssl_error: +- if "TLSV1_ALERT_UNKNOWN_CA" in str(ssl_error): +- logger.warning( +- "Keylime does not recognize certificate from peer. Check if verifier 'trusted_server_ca' is configured correctly" +- ) +- +- raise ssl_error from ssl_error +- +- if res and res.status_code in [200, 202]: +- break +- +- logger.debug( +- "Unable to publish revocation message %d times via webhook, " +- "trying again in %d seconds. " +- "Server returned status code: %s", +- i + 1, +- next_retry, +- res.status_code, +- ) +- +- time.sleep(next_retry) +- +- w = functools.partial(worker_webhook, tosend, url) +- t = threading.Thread(target=w, daemon=True) +- t.start() ++ """Send webhook notification using the global webhook manager.""" ++ manager = _get_webhook_manager() ++ manager.notify_webhook(tosend) + + + cert_key = None +-- +2.47.3 + diff --git a/SOURCES/0012-Restore-create-allowlist.patch b/SOURCES/0012-Restore-create-allowlist.patch deleted file mode 100644 index 1e065ff..0000000 --- a/SOURCES/0012-Restore-create-allowlist.patch +++ /dev/null @@ -1,59 +0,0 @@ ---- a/scripts/create_runtime_policy.sh 2023-10-09 17:04:26.121194607 +0200 -+++ b/scripts/create_runtime_policy.sh 2023-10-09 17:06:02.089855614 +0200 -@@ -42,7 +42,7 @@ - exit $NOARGS; - fi - --ALGO=sha1sum -+ALGO=sha256sum - - ALGO_LIST=("sha1sum" "sha256sum" "sha512sum") - -@@ -78,7 +78,7 @@ - - - # Where to look for initramfs image --INITRAMFS_LOC="/boot/" -+INITRAMFS_LOC="/boot" - if [ -d "/ostree" ]; then - # If we are on an ostree system change where we look for initramfs image - loc=$(grep -E "/ostree/[^/]([^/]*)" -o /proc/cmdline | head -n 1 | cut -d / -f 3) -@@ -121,7 +121,7 @@ - cp -r /tmp/ima/$i-extracted-unmk/. /tmp/ima/$i-extracted - fi - elif [[ -x "/usr/lib/dracut/skipcpio" ]] ; then -- /usr/lib/dracut/skipcpio $i | gunzip -c | cpio -i -d 2> /dev/null -+ /usr/lib/dracut/skipcpio $i | gunzip -c 2> /dev/null | cpio -i -d 2> /dev/null - else - echo "ERROR: No tools for initramfs image processing found!" - break -@@ -130,9 +130,26 @@ - find -type f -exec $ALGO "./{}" \; | sed "s| \./\./| /|" >> $OUTPUT - done - --# Convert to runtime policy --echo "Converting created allowlist to Keylime runtime policy" --python3 $WORKING_DIR/../keylime/cmd/convert_runtime_policy.py -a $OUTPUT -o $OUTPUT -+# when ROOTFS_LOC = '/', the path starts on allowlist ends up with double '//' -+# -+# Example: -+# -+# b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c //bar -+# -+# Replace the unwanted '//' with a single '/' -+sed -i 's| /\+| /|g' $ALLOWLIST_DIR/${OUTPUT} -+ -+# When the file name contains newlines or backslashes, the output of sha256sum -+# adds a backslash at the beginning of the line. -+# -+# Example: -+# -+# $ echo foo > ba\\r -+# $ sha256sum ba\\r -+# \b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c ba\\r -+# -+# Remove the unwanted backslash prefix -+sed -i 's/^\\//g' $ALLOWLIST_DIR/${OUTPUT} - - # Clean up - rm -rf /tmp/ima diff --git a/SOURCES/0012-requests_client-close-the-session-at-the-end-of-the-.patch b/SOURCES/0012-requests_client-close-the-session-at-the-end-of-the-.patch new file mode 100644 index 0000000..ba24d60 --- /dev/null +++ b/SOURCES/0012-requests_client-close-the-session-at-the-end-of-the-.patch @@ -0,0 +1,45 @@ +From 5fb4484b07a7ba3fcdf451bf816b5f07a40d6d97 Mon Sep 17 00:00:00 2001 +From: Sergio Correia +Date: Wed, 4 Jun 2025 19:52:37 +0100 +Subject: [PATCH 12/13] requests_client: close the session at the end of the + resource manager + +We had an issue in the past in which the webhook worker would not +properly close the opened session. This was fixed in #1456 (Close +session in worker_webhook function). + +At some later point, in #1566 (revocation_notifier: Take into account CA +certificates added via configuration), some refactoring around the +webhook_worker() in revocation_notifier happened and it started using +the RequestsClient resource manager. + +However, the RequestsClient does not close the session at its end, which +in turns makes that the old issue of not closing properly the session +in the webhook_worker() returned. + +We now issue a session.close() at the end of the RequestsClient. + +Signed-off-by: Sergio Correia +--- + keylime/requests_client.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/keylime/requests_client.py b/keylime/requests_client.py +index 16615f7..b7da484 100644 +--- a/keylime/requests_client.py ++++ b/keylime/requests_client.py +@@ -40,7 +40,10 @@ class RequestsClient: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: +- pass ++ try: ++ self.session.close() ++ except Exception: ++ pass + + def request(self, method: str, url: str, **kwargs: Any) -> requests.Response: + return self.session.request(method, self.base_url + url, **kwargs) +-- +2.47.3 + diff --git a/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch b/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch deleted file mode 100644 index 2c40991..0000000 --- a/SOURCES/0013-Set-generator-and-timestamp-in-create-policy.patch +++ /dev/null @@ -1,44 +0,0 @@ -diff --git a/keylime/cloud_verifier_common.py b/keylime/cloud_verifier_common.py -index a7399d2..c0f416d 100644 ---- a/keylime/cloud_verifier_common.py -+++ b/keylime/cloud_verifier_common.py -@@ -8,7 +8,7 @@ from keylime.agentstates import AgentAttestState, AgentAttestStates, TPMClockInf - from keylime.common import algorithms - from keylime.db.verifier_db import VerfierMain - from keylime.failure import Component, Event, Failure --from keylime.ima import file_signatures -+from keylime.ima import file_signatures, ima - from keylime.ima.types import RuntimePolicyType - from keylime.tpm import tpm_util - from keylime.tpm.tpm_main import Tpm -@@ -271,7 +271,7 @@ def process_get_status(agent: VerfierMain) -> Dict[str, Any]: - logger.debug('The contents of the agent %s attribute "mb_refstate" are %s', agent.agent_id, agent.mb_refstate) - - has_runtime_policy = 0 -- if agent.ima_policy.generator and agent.ima_policy.generator > 1: -+ if agent.ima_policy.generator and agent.ima_policy.generator > ima.RUNTIME_POLICY_GENERATOR.EmptyAllowList: - has_runtime_policy = 1 - - response = { -diff --git a/keylime/cmd/create_policy.py b/keylime/cmd/create_policy.py -index 0841d64..086b92a 100755 ---- a/keylime/cmd/create_policy.py -+++ b/keylime/cmd/create_policy.py -@@ -6,6 +6,7 @@ import argparse - import binascii - import collections - import copy -+import datetime - import gzip - import json - import multiprocessing -@@ -580,6 +581,9 @@ def main() -> None: - policy["excludes"] = sorted(list(set(policy["excludes"]))) - policy["ima"]["ignored_keyrings"] = sorted(list(set(policy["ima"]["ignored_keyrings"]))) - -+ policy["meta"]["generator"] = ima.RUNTIME_POLICY_GENERATOR.LegacyAllowList -+ policy["meta"]["timestamp"] = str(datetime.datetime.now()) -+ - try: - ima.validate_runtime_policy(policy) - except ima.ImaValidationError as ex: diff --git a/SOURCES/0013-fix-malformed-certs-workaround.patch b/SOURCES/0013-fix-malformed-certs-workaround.patch new file mode 100644 index 0000000..05c3f41 --- /dev/null +++ b/SOURCES/0013-fix-malformed-certs-workaround.patch @@ -0,0 +1,1265 @@ +diff --git a/keylime/certificate_wrapper.py b/keylime/certificate_wrapper.py +new file mode 100644 +index 0000000..899a19a +--- /dev/null ++++ b/keylime/certificate_wrapper.py +@@ -0,0 +1,99 @@ ++""" ++X.509 Certificate wrapper that preserves original bytes for malformed certificates. ++ ++This module provides a wrapper around cryptography.x509.Certificate that preserves ++the original certificate bytes when the certificate required pyasn1 re-encoding ++due to ASN.1 DER non-compliance. This ensures signature validity is maintained ++throughout the database lifecycle. ++""" ++ ++import base64 ++from typing import Any, Dict, Optional ++ ++import cryptography.x509 ++from cryptography.hazmat.primitives.serialization import Encoding ++ ++ ++class CertificateWrapper: ++ """ ++ A wrapper around cryptography.x509.Certificate that preserves original bytes ++ when malformed certificates require pyasn1 re-encoding. ++ ++ This class wraps a cryptography.x509.Certificate and adds the ability ++ to store the original certificate bytes when the certificate was malformed ++ and required re-encoding using pyasn1. This ensures that signature validation ++ works correctly even for certificates that don't strictly follow ASN.1 DER. ++ """ ++ ++ def __init__(self, cert: cryptography.x509.Certificate, original_bytes: Optional[bytes] = None): ++ """ ++ Initialize the wrapper certificate. ++ ++ :param cert: The cryptography.x509.Certificate object ++ :param original_bytes: The original DER bytes if certificate was re-encoded, None otherwise ++ """ ++ self._cert = cert ++ self._original_bytes = original_bytes ++ ++ def __getattr__(self, name: str) -> Any: ++ """Delegate attribute access to the wrapped certificate.""" ++ return getattr(self._cert, name) ++ ++ def __setstate__(self, state: Dict[str, Any]) -> None: ++ """Support for pickling.""" ++ self.__dict__.update(state) ++ ++ def __getstate__(self) -> Dict[str, Any]: ++ """Support for pickling.""" ++ return self.__dict__ ++ ++ @property ++ def has_original_bytes(self) -> bool: ++ """Check if this certificate has preserved original bytes.""" ++ return self._original_bytes is not None ++ ++ @property ++ def original_bytes(self) -> Optional[bytes]: ++ """Return the preserved original bytes if available.""" ++ return self._original_bytes ++ ++ def public_bytes(self, encoding: Encoding) -> bytes: ++ """ ++ Return certificate bytes, using original bytes when available. ++ ++ For certificates with preserved original bytes, this method always uses ++ the original DER bytes to maintain signature validity. For PEM encoding, ++ it converts the original DER bytes to PEM format. ++ """ ++ if self.has_original_bytes: ++ if encoding == Encoding.DER: ++ return self._original_bytes # type: ignore[return-value] ++ if encoding == Encoding.PEM: ++ # Convert original DER bytes to PEM format ++ der_b64 = base64.b64encode(self._original_bytes).decode("utf-8") # type: ignore[arg-type] ++ # Split into 64-character lines per PEM specification (RFC 1421) ++ lines = [der_b64[i : i + 64] for i in range(0, len(der_b64), 64)] ++ # Create PEM format with proper headers ++ pem_content = "\n".join(["-----BEGIN CERTIFICATE-----"] + lines + ["-----END CERTIFICATE-----"]) + "\n" ++ return pem_content.encode("utf-8") ++ ++ # For certificates without original bytes, use standard method ++ return self._cert.public_bytes(encoding) ++ ++ # Delegate common certificate methods to maintain full compatibility ++ def __str__(self) -> str: ++ return f"CertificateWrapper(subject={self._cert.subject})" ++ ++ def __repr__(self) -> str: ++ return f"CertificateWrapper(subject={self._cert.subject}, has_original_bytes={self.has_original_bytes})" ++ ++ ++def wrap_certificate(cert: cryptography.x509.Certificate, original_bytes: Optional[bytes] = None) -> CertificateWrapper: ++ """ ++ Factory function to create a wrapped certificate. ++ ++ :param cert: The cryptography.x509.Certificate object ++ :param original_bytes: The original DER bytes if certificate was re-encoded ++ :returns: Wrapped certificate that preserves original bytes ++ """ ++ return CertificateWrapper(cert, original_bytes) +diff --git a/keylime/models/base/types/certificate.py b/keylime/models/base/types/certificate.py +index 0f03169..f6cdd48 100644 +--- a/keylime/models/base/types/certificate.py ++++ b/keylime/models/base/types/certificate.py +@@ -12,6 +12,7 @@ from pyasn1_modules import pem as pyasn1_pem + from pyasn1_modules import rfc2459 as pyasn1_rfc2459 + from sqlalchemy.types import Text + ++from keylime.certificate_wrapper import CertificateWrapper, wrap_certificate + from keylime.models.base.type import ModelType + + +@@ -78,19 +79,20 @@ class Certificate(ModelType): + cert = Certificate().cast("-----BEGIN CERTIFICATE-----\nMIIE...") + """ + +- IncomingValue = Union[cryptography.x509.Certificate, bytes, str, None] ++ IncomingValue = Union[cryptography.x509.Certificate, CertificateWrapper, bytes, str, None] + + def __init__(self) -> None: + super().__init__(Text) + +- def _load_der_cert(self, der_cert_data: bytes) -> cryptography.x509.Certificate: +- """Loads a binary x509 certificate encoded using ASN.1 DER as a ``cryptography.x509.Certificate`` object. This ++ def _load_der_cert(self, der_cert_data: bytes) -> CertificateWrapper: ++ """Loads a binary x509 certificate encoded using ASN.1 DER as a ``CertificateWrapper`` object. This + method does not require strict adherence to ASN.1 DER thereby making it possible to accept certificates which do + not follow every detail of the spec (this is the case for a number of TPM certs) [1,2]. + + It achieves this by first using the strict parser provided by python-cryptography. If that fails, it decodes the + certificate and re-encodes it using the more-forgiving pyasn1 library. The re-encoded certificate is then +- re-parsed by python-cryptography. ++ re-parsed by python-cryptography. For malformed certificates requiring re-encoding, the original bytes are ++ preserved in the wrapper to maintain signature validity. + + This method is equivalent to the ``cert_utils.x509_der_cert`` function but does not produce a warning when the + backup parser is used, allowing this condition to be optionally detected and handled by the model where +@@ -106,24 +108,28 @@ class Certificate(ModelType): + + :raises: :class:`SubstrateUnderrunError`: cert could not be deserialized even using the fallback pyasn1 parser + +- :returns: A ``cryptography.x509.Certificate`` object ++ :returns: A ``CertificateWrapper`` object + """ + + try: +- return cryptography.x509.load_der_x509_certificate(der_cert_data) ++ cert = cryptography.x509.load_der_x509_certificate(der_cert_data) ++ return wrap_certificate(cert, None) + except Exception: + pyasn1_cert = pyasn1_decoder.decode(der_cert_data, asn1Spec=pyasn1_rfc2459.Certificate())[0] +- return cryptography.x509.load_der_x509_certificate(pyasn1_encoder.encode(pyasn1_cert)) ++ cert = cryptography.x509.load_der_x509_certificate(pyasn1_encoder.encode(pyasn1_cert)) ++ # Preserve the original bytes when re-encoding is necessary ++ return wrap_certificate(cert, der_cert_data) + +- def _load_pem_cert(self, pem_cert_data: str) -> cryptography.x509.Certificate: ++ def _load_pem_cert(self, pem_cert_data: str) -> CertificateWrapper: + """Loads a text x509 certificate encoded using PEM (Base64ed DER with header and footer) as a +- ``cryptography.x509.Certificate`` object. This method does not require strict adherence to ASN.1 DER thereby ++ ``CertificateWrapper`` object. This method does not require strict adherence to ASN.1 DER thereby + making it possible to accept certificates which do not follow every detail of the spec (this is the case for + a number of TPM certs) [1,2]. + + It achieves this by first using the strict parser provided by python-cryptography. If that fails, it decodes the + certificate and re-encodes it using the more-forgiving pyasn1 library. The re-encoded certificate is then +- re-parsed by python-cryptography. ++ re-parsed by python-cryptography. For malformed certificates requiring re-encoding, the original DER bytes are ++ preserved in the wrapper to maintain signature validity. + + This method is equivalent to the ``cert_utils.x509_der_cert`` function but does not produce a warning when the + backup parser is used, allowing this condition to be optionally detected and handled by the model where +@@ -135,19 +141,24 @@ class Certificate(ModelType): + [2] https://github.com/pyca/cryptography/issues/7189 + [3] https://github.com/keylime/keylime/issues/1559 + +- :param der_cert_data: the DER bytes of the certificate ++ :param pem_cert_data: the PEM text of the certificate + + :raises: :class:`SubstrateUnderrunError`: cert could not be deserialized even using the fallback pyasn1 parser + +- :returns: A ``cryptography.x509.Certificate`` object ++ :returns: A ``CertificateWrapper`` object + """ + + try: +- return cryptography.x509.load_pem_x509_certificate(pem_cert_data.encode("utf-8")) ++ cert = cryptography.x509.load_pem_x509_certificate(pem_cert_data.encode("utf-8")) ++ return wrap_certificate(cert, None) + except Exception: + der_data = pyasn1_pem.readPemFromFile(io.StringIO(pem_cert_data)) + pyasn1_cert = pyasn1_decoder.decode(der_data, asn1Spec=pyasn1_rfc2459.Certificate())[0] +- return cryptography.x509.load_der_x509_certificate(pyasn1_encoder.encode(pyasn1_cert)) ++ cert = cryptography.x509.load_der_x509_certificate(pyasn1_encoder.encode(pyasn1_cert)) ++ # Only preserve original bytes if we have valid DER data ++ original_bytes = der_data if isinstance(der_data, bytes) and der_data else None ++ # Preserve the original bytes when re-encoding is necessary ++ return wrap_certificate(cert, original_bytes) + + def infer_encoding(self, value: IncomingValue) -> Optional[str]: + """Tries to infer the certificate encoding from the given value based on the data type and other surface-level +@@ -159,15 +170,21 @@ class Certificate(ModelType): + :returns: ``"der"`` when the value appears to be DER encoded + :returns: ``"pem"`` when the value appears to be PEM encoded + :returns: ``"base64"`` when the value appears to be Base64(DER) encoded (without PEM headers) ++ :returns: ``"wrapped"`` when the value is already a ``CertificateWrapper`` object + :returns: ``"decoded"`` when the value is already a ``cryptography.x509.Certificate`` object ++ :returns: ``"disabled"`` when the value is the string "disabled" + :returns: ``None`` when the encoding cannot be inferred + """ + # pylint: disable=no-else-return + +- if isinstance(value, cryptography.x509.Certificate): ++ if isinstance(value, CertificateWrapper): ++ return "wrapped" ++ elif isinstance(value, cryptography.x509.Certificate): + return "decoded" + elif isinstance(value, bytes): + return "der" ++ elif isinstance(value, str) and value == "disabled": ++ return "disabled" + elif isinstance(value, str) and value.startswith("-----BEGIN CERTIFICATE-----"): + return "pem" + elif isinstance(value, str): +@@ -190,19 +207,25 @@ class Certificate(ModelType): + :param value: The value in DER, Base64(DER), or PEM format (or an already deserialized certificate object) + + :returns: ``"True"`` if the value can be deserialized by python-cryptography and is ASN.1 DER compliant ++ :returns: ``"True"`` if the value is the string "disabled" (considered compliant as it's a valid field value) + :returns: ``"False"`` if the value cannot be deserialized by python-cryptography + :returns: ``None`` if the value is already a deserialized certificate of type ``cryptography.x509.Certificate`` + """ + + try: + encoding_inf = self.infer_encoding(value) ++ if encoding_inf == "wrapped": ++ # For CertificateWrapper objects, check if they have original bytes (indicating re-encoding was needed) ++ return not value.has_original_bytes # type: ignore[union-attr] + if encoding_inf == "decoded": + return None ++ if encoding_inf == "disabled": ++ return True + + if encoding_inf == "der": + cryptography.x509.load_der_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] + elif encoding_inf == "pem": +- cryptography.x509.load_pem_x509_certificate(value) # type: ignore[reportArgumentType, arg-type] ++ cryptography.x509.load_pem_x509_certificate(value.encode("utf-8")) # type: ignore[reportArgumentType, arg-type, union-attr] + elif encoding_inf == "base64": + der_value = base64.b64decode(value, validate=True) # type: ignore[reportArgumentType, arg-type] + cryptography.x509.load_der_x509_certificate(der_value) +@@ -213,25 +236,27 @@ class Certificate(ModelType): + + return True + +- def cast(self, value: IncomingValue) -> Optional[cryptography.x509.Certificate]: ++ def cast(self, value: IncomingValue) -> Optional[CertificateWrapper]: + """Tries to interpret the given value as an X.509 certificate and convert it to a +- ``cryptography.x509.Certificate`` object. Values which do not require conversion are returned unchanged. ++ ``CertificateWrapper`` object. Values which do not require conversion are returned unchanged. + + :param value: The value to convert (may be in DER, Base64(DER), or PEM format) + + :raises: :class:`TypeError`: ``value`` is of an unexpected data type + :raises: :class:`ValueError`: ``value`` does not contain data which is interpretable as a certificate + +- :returns: A ``cryptography.x509.Certificate`` object or None if an empty value is given ++ :returns: A ``CertificateWrapper`` object or None if an empty value is given + """ + + if not value: + return None + + encoding_inf = self.infer_encoding(value) ++ if encoding_inf == "wrapped": ++ return value # type: ignore[return-value] + if encoding_inf == "decoded": +- return value # type: ignore[reportReturnType, return-value] +- ++ # Wrap raw cryptography certificate without original bytes ++ return wrap_certificate(value, None) # type: ignore[arg-type] + if encoding_inf == "der": + try: + return self._load_der_cert(value) # type: ignore[reportArgumentType, arg-type] +@@ -271,7 +296,6 @@ class Certificate(ModelType): + if not cert: + return None + +- # Save as Base64-encoded value (without the PEM "BEGIN" and "END" header/footer for efficiency) + return base64.b64encode(cert.public_bytes(Encoding.DER)).decode("utf-8") + + def render(self, value: IncomingValue) -> Optional[str]: +@@ -281,9 +305,8 @@ class Certificate(ModelType): + if not cert: + return None + +- # Render certificate in PEM format + return cert.public_bytes(Encoding.PEM).decode("utf-8") # type: ignore[no-any-return] + + @property + def native_type(self) -> type: +- return cryptography.x509.Certificate ++ return CertificateWrapper +diff --git a/keylime/models/registrar/registrar_agent.py b/keylime/models/registrar/registrar_agent.py +index b232049..680316b 100644 +--- a/keylime/models/registrar/registrar_agent.py ++++ b/keylime/models/registrar/registrar_agent.py +@@ -1,7 +1,6 @@ + import base64 + import hmac + +-import cryptography.x509 + from cryptography.hazmat.primitives.asymmetric import ec, rsa + from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat + +@@ -116,35 +115,35 @@ class RegistrarAgent(PersistableModel): + if not cert_utils.verify_cert(cert, trust_store, cert_type): + self._add_error(cert_field, "must contain a certificate issued by a CA present in the trust store") + +- def _check_cert_compliance(self, cert_field, raw_cert): ++ def _check_cert_compliance(self, cert_field): + new_cert = self.changes.get(cert_field) + old_cert = self.values.get(cert_field) + + # If the certificate field has not been changed, no need to perform check +- if not raw_cert or not new_cert: ++ if not new_cert: ++ return True ++ ++ # If the certificate field is set as "disabled" (for mtls_cert) ++ if new_cert == "disabled": + return True + + # If the new certificate value is the same as the old certificate value, no need to perform check +- if ( +- isinstance(new_cert, cryptography.x509.Certificate) +- and isinstance(old_cert, cryptography.x509.Certificate) +- and new_cert.public_bytes(Encoding.DER) == old_cert.public_bytes(Encoding.DER) +- ): ++ if old_cert and new_cert.public_bytes(Encoding.DER) == old_cert.public_bytes(Encoding.DER): + return True + +- compliant = Certificate().asn1_compliant(raw_cert) ++ compliant = Certificate().asn1_compliant(new_cert) + + if not compliant: + if config.get("registrar", "malformed_cert_action") == "reject": +- self._add_error(cert_field, Certificate().generate_error_msg(raw_cert)) ++ self._add_error(cert_field, Certificate().generate_error_msg(new_cert)) + + return compliant + +- def _check_all_cert_compliance(self, data): ++ def _check_all_cert_compliance(self): + non_compliant_certs = [] + + for field_name in ("ekcert", "iak_cert", "idevid_cert", "mtls_cert"): +- if not self._check_cert_compliance(field_name, data.get(field_name)): ++ if not self._check_cert_compliance(field_name): + non_compliant_certs.append(f"'{field_name}'") + + if not non_compliant_certs: +@@ -291,7 +290,7 @@ class RegistrarAgent(PersistableModel): + # Ensure either an EK or IAK/IDevID is present, depending on configuration + self._check_root_identity_presence() + # Handle certificates which are not fully compliant with ASN.1 DER +- self._check_all_cert_compliance(data) ++ self._check_all_cert_compliance() + + # Basic validation of values + self.validate_required(["aik_tpm"]) +diff --git a/test/test_certificate_modeltype.py b/test/test_certificate_modeltype.py +new file mode 100644 +index 0000000..335ae0f +--- /dev/null ++++ b/test/test_certificate_modeltype.py +@@ -0,0 +1,197 @@ ++""" ++Unit tests for the Certificate ModelType class. ++ ++This module tests the certificate model type functionality including ++encoding inference and ASN.1 compliance checking. ++""" ++ ++import base64 ++import unittest ++ ++import cryptography.x509 ++from cryptography.hazmat.primitives.serialization import Encoding ++ ++from keylime.certificate_wrapper import CertificateWrapper, wrap_certificate ++from keylime.models.base.types.certificate import Certificate ++ ++ ++class TestCertificateModelType(unittest.TestCase): ++ """Test cases for Certificate ModelType class.""" ++ ++ def setUp(self): ++ """Set up test fixtures.""" ++ self.cert_type = Certificate() ++ ++ # Compliant certificate for testing (loads fine with python-cryptography) ++ self.compliant_cert_pem = """-----BEGIN CERTIFICATE----- ++MIIClzCCAX+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDARUZXN0 ++MB4XDTI1MDkxMTEyNDU1MVoXDTI2MDkxMTEyNDU1MVowDzENMAsGA1UEAwwEVGVz ++dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAO2V27HsMnKczHCaLgf9 ++FtxuorvkA5OMkz6KsW1eyryHr0TJ801prLpeNnMZ3U4pqLMqocMc7T2KO6nPZJxO ++7zRzehyo9pBBVO4pUR1QMGoTWuJQbqNieDQ4V9dW67N5wp/UWEkK6CNNd6aXjswb ++dVaDbIfDL8hMX6Lil3+pTysRWGqjRvBGJxS9r/mYRAvbz1JHPjfegSc0uxnUE+qZ ++SrbWa3TN82LX6jw6tKk0Z3CcPJC6QN+ijCxxAoHyLRYUIgZbAKe/FGRbjO0fuW11 ++L7TcE1k3eaC7RkvotIaCOW/RMOkwKu1MbCzFEA2YRYf9covEwdItzI4FE++ZJrsz ++LhUCAwEAaTANBgkqhkiG9w0BAQsFAAOCAQEAeqqJT0LnmAluAjrsCSK/eYYjwjhZ ++aKMi/iBO10zfb+GvT4yqEL5gnuWxJEx4TTcDww1clvOC1EcPUZFaKR3GIBGy0ZgJ ++zGCfg+sC6liyZ+4PSWSJHD2dT5N3IGp4/hPsrhKnVb9fYbRc0Bc5VHeS9QQoSJDH ++f9EbxCcwdErVllRter29OZCb4XnEEbTqLIKRYVrbsu/t4C+vzi0tmKg5HZXf9PMo ++D28zJGsCAr8sKW/iUKObqDOHEn56lk12NTJmJmi+g6rEikk/0czJlRjSGnJQLjUg ++d4wslruibXBsLPtJw2c6vTC2SV2F1PXwy5j1OKU+D6nxaaItQvWADEjcTg== ++-----END CERTIFICATE-----""" ++ ++ # Malformed certificate that requires pyasn1 re-encoding (fails with python-cryptography) ++ self.malformed_cert_b64 = ( ++ "MIIDUjCCAvegAwIBAgILAI5xYHQ14nH5hdYwCgYIKoZIzj0EAwIwVTFTMB8GA1UEAxMYTnV2b3Rv" ++ "biBUUE0gUm9vdCBDQSAyMTExMCUGA1UEChMeTnV2b3RvbiBUZWNobm9sb2d5IENvcnBvcmF0aW9u" ++ "MAkGA1UEBhMCVFcwHhcNMTkwNzIzMTcxNTEzWhcNMzkwNzE5MTcxNTEzWjAAMIIBIjANBgkqhkiG" ++ "9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk8kCj7srY/Zlvm1795fVXdyX44w5qsd1m5VywMDgSOavzPKO" ++ "kgbHgQNx6Ak5+4Q43EJ/5qsaDBv59F8W7K69maUwcMNq1xpuq0V/LiwgJVAtc3CdvlxtwQrn7+Uq" ++ "ieIGf+i8sGxpeUCSmYHJPTHNHqjQnvUtdGoy/+WO0i7WsAvX3k/gHHr4p58a8urjJ1RG2Lk1g48D" ++ "ESwl+D7atQEPWzgjr6vK/s5KpLrn7M+dh97TUbG1510AOWBPP35MtT8IZbqC4hs2Ol16gT1M3a9e" ++ "+GaMZkItLUwV76vKDNEgTZG8M1C9OItA/xwzlfXbPepzpxWb4kzHS4qZoQtl4vBZrQIDAQABo4IB" ++ "NjCCATIwUAYDVR0RAQH/BEYwRKRCMEAxPjAUBgVngQUCARMLaWQ6NEU1NDQzMDAwEAYFZ4EFAgIT" ++ "B05QQ1Q3NXgwFAYFZ4EFAgMTC2lkOjAwMDcwMDAyMAwGA1UdEwEB/wQCMAAwEAYDVR0lBAkwBwYF" ++ "Z4EFCAEwHwYDVR0jBBgwFoAUI/TiKtO+N0pEl3KVSqKDrtdSVy4wDgYDVR0PAQH/BAQDAgUgMCIG" ++ "A1UdCQQbMBkwFwYFZ4EFAhAxDjAMDAMyLjACAQACAgCKMGkGCCsGAQUFBwEBBF0wWzBZBggrBgEF" ++ "BQcwAoZNaHR0cHM6Ly93d3cubnV2b3Rvbi5jb20vc2VjdXJpdHkvTlRDLVRQTS1FSy1DZXJ0L051" ++ "dm90b24gVFBNIFJvb3QgQ0EgMjExMS5jZXIwCgYIKoZIzj0EAwIDSQAwRgIhAPHOFiBDZd0dfml2" ++ "a/KlPFhmX7Ahpd0Wq11ZUW1/ixviAiEAlex8BB5nsR6w8QrANwCxc7fH/YnbjXfMCFiWzeZH7ps=" ++ ) ++ ++ # Load certificates for testing ++ self.compliant_cert = cryptography.x509.load_pem_x509_certificate(self.compliant_cert_pem.encode()) ++ self.malformed_cert_der = base64.b64decode(self.malformed_cert_b64) ++ ++ def test_infer_encoding_wrapped_certificate(self): ++ """Test that CertificateWrapper objects are identified as 'wrapped'.""" ++ wrapped_cert = wrap_certificate(self.compliant_cert, None) ++ encoding = self.cert_type.infer_encoding(wrapped_cert) ++ self.assertEqual(encoding, "wrapped") ++ ++ def test_infer_encoding_raw_certificate(self): ++ """Test that raw cryptography.x509.Certificate objects are identified as 'decoded'.""" ++ encoding = self.cert_type.infer_encoding(self.compliant_cert) ++ self.assertEqual(encoding, "decoded") ++ ++ def test_infer_encoding_der_bytes(self): ++ """Test that DER bytes are identified as 'der'.""" ++ der_bytes = self.compliant_cert.public_bytes(Encoding.DER) ++ encoding = self.cert_type.infer_encoding(der_bytes) ++ self.assertEqual(encoding, "der") ++ ++ def test_infer_encoding_pem_string(self): ++ """Test that PEM strings are identified as 'pem'.""" ++ encoding = self.cert_type.infer_encoding(self.compliant_cert_pem) ++ self.assertEqual(encoding, "pem") ++ ++ def test_infer_encoding_base64_string(self): ++ """Test that Base64 strings are identified as 'base64'.""" ++ encoding = self.cert_type.infer_encoding(self.malformed_cert_b64) ++ self.assertEqual(encoding, "base64") ++ ++ def test_infer_encoding_none_for_invalid(self): ++ """Test that invalid types return None.""" ++ encoding = self.cert_type.infer_encoding(12345) # type: ignore[arg-type] # Testing invalid type ++ self.assertIsNone(encoding) ++ ++ def test_asn1_compliant_wrapped_without_original_bytes(self): ++ """Test that CertificateWrapper without original bytes is ASN.1 compliant.""" ++ wrapped_cert = wrap_certificate(self.compliant_cert, None) ++ compliant = self.cert_type.asn1_compliant(wrapped_cert) ++ self.assertTrue(compliant) ++ ++ def test_asn1_compliant_wrapped_with_original_bytes(self): ++ """Test that CertificateWrapper with original bytes is not ASN.1 compliant.""" ++ wrapped_cert = wrap_certificate(self.compliant_cert, b"fake_original_bytes") ++ compliant = self.cert_type.asn1_compliant(wrapped_cert) ++ self.assertFalse(compliant) ++ ++ def test_asn1_compliant_raw_certificate(self): ++ """Test that raw cryptography.x509.Certificate returns None (already decoded).""" ++ compliant = self.cert_type.asn1_compliant(self.compliant_cert) ++ self.assertIsNone(compliant) ++ ++ def test_asn1_compliant_pem_strings(self): ++ """Test ASN.1 compliance checking on PEM strings.""" ++ # The regular certificate and TPM certificate from test_registrar_db.py are actually ASN.1 compliant ++ # and can be loaded directly by python-cryptography without requiring pyasn1 re-encoding ++ compliant_regular = self.cert_type.asn1_compliant(self.compliant_cert_pem) ++ # Only test one certificate since both are the same type (ASN.1 compliant) ++ ++ # Should be ASN.1 compliant (True) since it loads fine with python-cryptography ++ self.assertTrue(compliant_regular) ++ ++ def test_asn1_compliant_der_and_base64(self): ++ """Test ASN.1 compliance checking on DER and Base64 formats.""" ++ # Test DER bytes - regular certificate should be compliant ++ der_bytes = self.compliant_cert.public_bytes(Encoding.DER) ++ compliant_der = self.cert_type.asn1_compliant(der_bytes) ++ self.assertTrue(compliant_der) ++ ++ # Test Base64 string - regular certificate should be compliant ++ b64_string = base64.b64encode(der_bytes).decode("utf-8") ++ compliant_b64 = self.cert_type.asn1_compliant(b64_string) ++ self.assertTrue(compliant_b64) ++ ++ def test_asn1_compliant_malformed_certificate(self): ++ """Test ASN.1 compliance checking on a truly malformed certificate.""" ++ # Test the malformed certificate that requires pyasn1 re-encoding ++ compliant = self.cert_type.asn1_compliant(self.malformed_cert_b64) ++ self.assertFalse(compliant) # Should be non-compliant since it needs pyasn1 fallback ++ ++ def test_asn1_compliant_invalid_data(self): ++ """Test that invalid certificate data is not ASN.1 compliant.""" ++ compliant = self.cert_type.asn1_compliant("invalid_certificate_data") ++ self.assertFalse(compliant) ++ ++ def test_cast_wrapped_certificate(self): ++ """Test that CertificateWrapper objects are returned unchanged.""" ++ wrapped_cert = wrap_certificate(self.compliant_cert, None) ++ result = self.cert_type.cast(wrapped_cert) ++ self.assertIs(result, wrapped_cert) ++ ++ def test_cast_raw_certificate_to_wrapped(self): ++ """Test that raw certificates are wrapped without original bytes.""" ++ result = self.cert_type.cast(self.compliant_cert) ++ self.assertIsInstance(result, CertificateWrapper) ++ assert result is not None # For type checker ++ self.assertFalse(result.has_original_bytes) ++ ++ def test_cast_pem_strings(self): ++ """Test casting PEM strings to CertificateWrapper.""" ++ # Test regular certificate - should be ASN.1 compliant, no original bytes needed ++ result_regular = self.cert_type.cast(self.compliant_cert_pem) ++ self.assertIsInstance(result_regular, CertificateWrapper) ++ assert result_regular is not None # For type checker ++ self.assertFalse(result_regular.has_original_bytes) ++ ++ # Note: Only testing compliant certificate since we now use one consistent certificate for all compliant scenarios ++ ++ def test_cast_malformed_certificate(self): ++ """Test casting the malformed certificate that requires pyasn1 re-encoding.""" ++ result = self.cert_type.cast(self.malformed_cert_b64) ++ self.assertIsInstance(result, CertificateWrapper) ++ assert result is not None # For type checker ++ # Malformed certificate should have original bytes since it needs re-encoding ++ self.assertTrue(result.has_original_bytes) ++ ++ def test_cast_der_bytes(self): ++ """Test casting DER bytes to CertificateWrapper.""" ++ der_bytes = self.compliant_cert.public_bytes(Encoding.DER) ++ result = self.cert_type.cast(der_bytes) ++ self.assertIsInstance(result, CertificateWrapper) ++ ++ def test_cast_none_value(self): ++ """Test that None values return None.""" ++ result = self.cert_type.cast(None) ++ self.assertIsNone(result) ++ ++ def test_cast_empty_string(self): ++ """Test that empty strings return None.""" ++ result = self.cert_type.cast("") ++ self.assertIsNone(result) ++ ++ ++if __name__ == "__main__": ++ unittest.main() +diff --git a/test/test_certificate_wrapper.py b/test/test_certificate_wrapper.py +new file mode 100644 +index 0000000..6b47260 +--- /dev/null ++++ b/test/test_certificate_wrapper.py +@@ -0,0 +1,385 @@ ++""" ++Unit tests for the CertificateWrapper class. ++ ++This module tests the certificate wrapper functionality that preserves original bytes ++for malformed certificates requiring pyasn1 re-encoding. ++""" ++ ++import base64 ++import subprocess ++import tempfile ++import unittest ++from unittest.mock import Mock ++ ++import cryptography.x509 ++from cryptography.hazmat.primitives.serialization import Encoding ++from pyasn1.codec.der import decoder as pyasn1_decoder ++from pyasn1.codec.der import encoder as pyasn1_encoder ++from pyasn1_modules import rfc2459 as pyasn1_rfc2459 ++ ++from keylime.certificate_wrapper import CertificateWrapper, wrap_certificate ++ ++ ++class TestCertificateWrapper(unittest.TestCase): ++ """Test cases for CertificateWrapper class.""" ++ ++ def setUp(self): ++ """Set up test fixtures.""" ++ # Malformed certificate (Base64 encoded) that requires pyasn1 re-encoding ++ # This is a real TPM certificate that doesn't strictly follow ASN.1 DER rules ++ self.malformed_cert_b64 = ( ++ "MIIDUjCCAvegAwIBAgILAI5xYHQ14nH5hdYwCgYIKoZIzj0EAwIwVTFTMB8GA1UEAxMYTnV2b3Rv" ++ "biBUUE0gUm9vdCBDQSAyMTExMCUGA1UEChMeTnV2b3RvbiBUZWNobm9sb2d5IENvcnBvcmF0aW9u" ++ "MAkGA1UEBhMCVFcwHhcNMTkwNzIzMTcxNTEzWhcNMzkwNzE5MTcxNTEzWjAAMIIBIjANBgkqhkiG" ++ "9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk8kCj7srY/Zlvm1795fVXdyX44w5qsd1m5VywMDgSOavzPKO" ++ "kgbHgQNx6Ak5+4Q43EJ/5qsaDBv59F8W7K69maUwcMNq1xpuq0V/LiwgJVAtc3CdvlxtwQrn7+Uq" ++ "ieIGf+i8sGxpeUCSmYHJPTHNHqjQnvUtdGoy/+WO0i7WsAvX3k/gHHr4p58a8urjJ1RG2Lk1g48D" ++ "ESwl+D7atQEPWzgjr6vK/s5KpLrn7M+dh97TUbG1510AOWBPP35MtT8IZbqC4hs2Ol16gT1M3a9e" ++ "+GaMZkItLUwV76vKDNEgTZG8M1C9OItA/xwzlfXbPepzpxWb4kzHS4qZoQtl4vBZrQIDAQABo4IB" ++ "NjCCATIwUAYDVR0RAQH/BEYwRKRCMEAxPjAUBgVngQUCARMLaWQ6NEU1NDQzMDAwEAYFZ4EFAgIT" ++ "B05QQ1Q3NXgwFAYFZ4EFAgMTC2lkOjAwMDcwMDAyMAwGA1UdEwEB/wQCMAAwEAYDVR0lBAkwBwYF" ++ "Z4EFCAEwHwYDVR0jBBgwFoAUI/TiKtO+N0pEl3KVSqKDrtdSVy4wDgYDVR0PAQH/BAQDAgUgMCIG" ++ "A1UdCQQbMBkwFwYFZ4EFAhAxDjAMDAMyLjACAQACAgCKMGkGCCsGAQUFBwEBBF0wWzBZBggrBgEF" ++ "BQcwAoZNaHR0cHM6Ly93d3cubnV2b3Rvbi5jb20vc2VjdXJpdHkvTlRDLVRQTS1FSy1DZXJ0L051" ++ "dm90b24gVFBNIFJvb3QgQ0EgMjExMS5jZXIwCgYIKoZIzj0EAwIDSQAwRgIhAPHOFiBDZd0dfml2" ++ "a/KlPFhmX7Ahpd0Wq11ZUW1/ixviAiEAlex8BB5nsR6w8QrANwCxc7fH/YnbjXfMCFiWzeZH7ps=" ++ ) ++ self.malformed_cert_der = base64.b64decode(self.malformed_cert_b64) ++ ++ # Create a mock certificate for testing ++ self.mock_cert = Mock(spec=cryptography.x509.Certificate) ++ self.mock_cert.subject = Mock() ++ self.mock_cert.subject.__str__ = Mock(return_value="CN=Test Certificate") ++ self.mock_cert.public_bytes.return_value = b"mock_der_data" ++ ++ def test_init_without_original_bytes(self): ++ """Test wrapper initialization without original bytes.""" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ # Test through public interface ++ self.assertFalse(wrapper.has_original_bytes) ++ self.assertIsNone(wrapper.original_bytes) ++ # Test delegation works ++ self.assertEqual(wrapper.subject, self.mock_cert.subject) ++ ++ def test_init_with_original_bytes(self): ++ """Test wrapper initialization with original bytes.""" ++ original_data = b"original_certificate_data" ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ # Test through public interface ++ self.assertTrue(wrapper.has_original_bytes) ++ self.assertEqual(wrapper.original_bytes, original_data) ++ # Test delegation works ++ self.assertEqual(wrapper.subject, self.mock_cert.subject) ++ ++ def test_getattr_delegation(self): ++ """Test that attributes are properly delegated to the wrapped certificate.""" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ # Access an attribute that should be delegated ++ result = wrapper.subject ++ self.assertEqual(result, self.mock_cert.subject) ++ ++ def test_public_bytes_der_without_original(self): ++ """Test public_bytes DER encoding without original bytes.""" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ result = wrapper.public_bytes(Encoding.DER) ++ ++ self.mock_cert.public_bytes.assert_called_once_with(Encoding.DER) ++ self.assertEqual(result, b"mock_der_data") ++ ++ def test_public_bytes_der_with_original(self): ++ """Test public_bytes DER encoding with original bytes.""" ++ original_data = b"original_certificate_data" ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ result = wrapper.public_bytes(Encoding.DER) ++ ++ # Should return original bytes, not call the wrapped certificate ++ self.mock_cert.public_bytes.assert_not_called() ++ self.assertEqual(result, original_data) ++ ++ def test_public_bytes_pem_without_original(self): ++ """Test public_bytes PEM encoding without original bytes.""" ++ self.mock_cert.public_bytes.return_value = b"-----BEGIN CERTIFICATE-----\nMIIB...\n-----END CERTIFICATE-----\n" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ result = wrapper.public_bytes(Encoding.PEM) ++ ++ self.mock_cert.public_bytes.assert_called_once_with(Encoding.PEM) ++ self.assertEqual(result, b"-----BEGIN CERTIFICATE-----\nMIIB...\n-----END CERTIFICATE-----\n") ++ ++ def test_public_bytes_pem_with_original(self): ++ """Test public_bytes PEM encoding with original bytes.""" ++ original_data = self.malformed_cert_der ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ result = wrapper.public_bytes(Encoding.PEM) ++ ++ # Should not call the wrapped certificate's method ++ self.mock_cert.public_bytes.assert_not_called() ++ ++ # Result should be PEM format derived from original bytes ++ self.assertIsInstance(result, bytes) ++ result_str = result.decode("utf-8") ++ self.assertTrue(result_str.startswith("-----BEGIN CERTIFICATE-----")) ++ self.assertTrue(result_str.endswith("-----END CERTIFICATE-----\n")) ++ ++ # Verify that the PEM content can be converted back to the original DER ++ pem_lines = result_str.strip().split("\n") ++ pem_content = "".join(pem_lines[1:-1]) # Remove headers and join ++ recovered_der = base64.b64decode(pem_content) ++ self.assertEqual(recovered_der, original_data) ++ ++ def test_pem_line_length_compliance(self): ++ """Test that PEM output follows RFC 1421 line length requirements (64 chars).""" ++ original_data = self.malformed_cert_der ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ result = wrapper.public_bytes(Encoding.PEM) ++ result_str = result.decode("utf-8") ++ ++ lines = result_str.strip().split("\n") ++ # Check that content lines (excluding headers) are max 64 chars ++ for line in lines[1:-1]: # Skip header and footer ++ self.assertLessEqual(len(line), 64) ++ ++ def test_str_representation(self): ++ """Test string representation of the wrapper.""" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ result = str(wrapper) ++ ++ expected = f"CertificateWrapper(subject={self.mock_cert.subject})" ++ self.assertEqual(result, expected) ++ ++ def test_repr_representation_without_original(self): ++ """Test repr representation without original bytes.""" ++ wrapper = CertificateWrapper(self.mock_cert) ++ ++ result = repr(wrapper) ++ ++ expected = f"CertificateWrapper(subject={self.mock_cert.subject}, has_original_bytes=False)" ++ self.assertEqual(result, expected) ++ ++ def test_repr_representation_with_original(self): ++ """Test repr representation with original bytes.""" ++ original_data = b"original_data" ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ result = repr(wrapper) ++ ++ expected = f"CertificateWrapper(subject={self.mock_cert.subject}, has_original_bytes=True)" ++ self.assertEqual(result, expected) ++ ++ def test_pickling_support(self): ++ """Test that the wrapper supports pickling operations.""" ++ original_data = b"test_data" ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ # Test getstate ++ state = wrapper.__getstate__() ++ self.assertIsInstance(state, dict) ++ self.assertIn("_cert", state) ++ self.assertIn("_original_bytes", state) ++ ++ # Test setstate ++ new_wrapper = CertificateWrapper(Mock(), None) ++ new_wrapper.__setstate__(state) ++ # Verify state was restored correctly through public interface ++ self.assertTrue(new_wrapper.has_original_bytes) ++ self.assertEqual(new_wrapper.original_bytes, original_data) ++ ++ def test_wrap_certificate_function_without_original(self): ++ """Test the wrap_certificate factory function without original bytes.""" ++ wrapper = wrap_certificate(self.mock_cert) ++ ++ self.assertIsInstance(wrapper, CertificateWrapper) ++ self.assertFalse(wrapper.has_original_bytes) ++ self.assertIsNone(wrapper.original_bytes) ++ ++ def test_wrap_certificate_function_with_original(self): ++ """Test the wrap_certificate factory function with original bytes.""" ++ original_data = b"original_certificate_data" ++ wrapper = wrap_certificate(self.mock_cert, original_data) ++ ++ self.assertIsInstance(wrapper, CertificateWrapper) ++ self.assertTrue(wrapper.has_original_bytes) ++ self.assertEqual(wrapper.original_bytes, original_data) ++ ++ def test_real_malformed_certificate_handling(self): ++ """Test with a real malformed certificate that requires pyasn1 re-encoding.""" ++ # This test simulates the scenario where a malformed certificate is processed ++ ++ # Mock the scenario where cryptography fails but pyasn1 succeeds ++ mock_reencoded_cert = Mock(spec=cryptography.x509.Certificate) ++ mock_reencoded_cert.subject = Mock() ++ mock_reencoded_cert.subject.__str__ = Mock(return_value="CN=Nuvoton TPM") ++ ++ # Create wrapper as if it came from the certificate loading process ++ wrapper = wrap_certificate(mock_reencoded_cert, self.malformed_cert_der) ++ ++ # Test that original bytes are preserved ++ self.assertTrue(wrapper.has_original_bytes) ++ self.assertEqual(wrapper.original_bytes, self.malformed_cert_der) ++ ++ # Test DER output uses original bytes ++ der_output = wrapper.public_bytes(Encoding.DER) ++ self.assertEqual(der_output, self.malformed_cert_der) ++ ++ # Test PEM output is derived from original bytes ++ pem_output = wrapper.public_bytes(Encoding.PEM) ++ self.assertIsInstance(pem_output, bytes) ++ ++ # Verify PEM can be converted back to original DER ++ pem_str = pem_output.decode("utf-8") ++ lines = pem_str.strip().split("\n") ++ content = "".join(lines[1:-1]) ++ recovered_der = base64.b64decode(content) ++ self.assertEqual(recovered_der, self.malformed_cert_der) ++ ++ def test_unsupported_encoding_fallback(self): ++ """Test that unsupported encoding types fall back to wrapped certificate.""" ++ # Create a custom encoding that's not DER or PEM ++ custom_encoding = Mock() ++ custom_encoding.name = "CUSTOM" ++ ++ original_data = b"original_data" ++ wrapper = CertificateWrapper(self.mock_cert, original_data) ++ ++ # Should fall back to wrapped certificate for unknown encoding ++ wrapper.public_bytes(custom_encoding) ++ self.mock_cert.public_bytes.assert_called_once_with(custom_encoding) ++ ++ def test_malformed_certificate_cryptography_failure_and_verification(self): ++ """ ++ Comprehensive test demonstrating that the malformed certificate: ++ 1. Fails to load with python-cryptography ++ 2. Can be verified with OpenSSL ++ 3. Is successfully handled by our wrapper after pyasn1 re-encoding ++ """ ++ # Test 1: Demonstrate that python-cryptography fails to load the malformed certificate ++ with self.assertRaises(Exception) as context: ++ cryptography.x509.load_der_x509_certificate(self.malformed_cert_der) ++ ++ # The specific exception type may vary, but it should fail ++ self.assertIsInstance(context.exception, Exception) ++ ++ # Test 2: Demonstrate that pyasn1 can handle the malformed certificate ++ try: ++ # Decode and re-encode using pyasn1 (simulating what the Certificate type does) ++ pyasn1_cert = pyasn1_decoder.decode(self.malformed_cert_der, asn1Spec=pyasn1_rfc2459.Certificate())[0] ++ reencoded_der = pyasn1_encoder.encode(pyasn1_cert) ++ ++ # Now cryptography should be able to load the re-encoded certificate ++ reencoded_cert = cryptography.x509.load_der_x509_certificate(reencoded_der) ++ self.assertIsNotNone(reencoded_cert) ++ ++ except Exception as e: ++ self.fail(f"pyasn1 should handle the malformed certificate, but got: {e}") ++ ++ # Test 3: Verify that our wrapper preserves the original bytes correctly ++ wrapper = wrap_certificate(reencoded_cert, self.malformed_cert_der) ++ ++ # The wrapper should preserve original bytes ++ self.assertTrue(wrapper.has_original_bytes) ++ self.assertEqual(wrapper.original_bytes, self.malformed_cert_der) ++ ++ # DER output should use original bytes ++ der_output = wrapper.public_bytes(Encoding.DER) ++ self.assertEqual(der_output, self.malformed_cert_der) ++ ++ # PEM output should be derived from original bytes ++ pem_output = wrapper.public_bytes(Encoding.PEM) ++ pem_str = pem_output.decode("utf-8") ++ ++ # Verify PEM format is correct ++ self.assertTrue(pem_str.startswith("-----BEGIN CERTIFICATE-----")) ++ self.assertTrue(pem_str.endswith("-----END CERTIFICATE-----\n")) ++ ++ # Test 4: Demonstrate OpenSSL can verify the certificate structure ++ # (Even without the root CA, OpenSSL should be able to parse the certificate) ++ try: ++ with tempfile.NamedTemporaryFile(mode="wb", suffix=".der", delete=False) as temp_file: ++ temp_file.write(self.malformed_cert_der) ++ temp_file.flush() ++ ++ # Use OpenSSL to parse the certificate (should succeed) ++ result = subprocess.run( ++ ["openssl", "x509", "-in", temp_file.name, "-inform", "DER", "-text", "-noout"], ++ capture_output=True, ++ text=True, ++ check=False, ++ ) ++ ++ # OpenSSL should successfully parse the certificate ++ self.assertEqual(result.returncode, 0) ++ self.assertIn("Nuvoton TPM Root CA 2111", result.stdout) ++ self.assertIn("Certificate:", result.stdout) ++ ++ except (subprocess.CalledProcessError, FileNotFoundError) as e: ++ # Skip if OpenSSL is not available, but don't fail the test ++ self.skipTest(f"OpenSSL not available for verification test: {e}") ++ ++ # Test 5: Verify certificate details are accessible through wrapper ++ # The subject should be empty (as shown in the OpenSSL output) ++ self.assertEqual(len(reencoded_cert.subject), 0) ++ ++ # The issuer should contain Nuvoton information ++ issuer_attrs = {} ++ for attr in reencoded_cert.issuer: ++ # Use dotted string representation to avoid accessing private _name ++ oid_name = attr.oid.dotted_string ++ if oid_name == "2.5.4.3": # Common Name OID ++ issuer_attrs["commonName"] = attr.value ++ self.assertIn("commonName", issuer_attrs) ++ self.assertEqual(issuer_attrs["commonName"], "Nuvoton TPM Root CA 2111") ++ ++ # Test 6: Demonstrate that even re-encoded certificates may have parsing issues ++ # This shows why preserving original bytes is crucial ++ try: ++ # Try to access extensions - this may fail due to malformed ASN.1 ++ extensions = list(reencoded_cert.extensions) ++ # If it succeeds, verify it has the expected Subject Alternative Name ++ # Subject Alternative Name OID is 2.5.29.17 ++ has_subject_alt_name = any(ext.oid.dotted_string == "2.5.29.17" for ext in extensions) ++ self.assertTrue(has_subject_alt_name, "EK certificate should have Subject Alternative Name extension") ++ except (ValueError, Exception) as e: ++ # This is actually expected for malformed certificates! ++ # Even after pyasn1 re-encoding, some parsing issues may remain ++ self.assertIn("parsing asn1", str(e).lower(), f"Expected ASN.1 parsing error, got: {e}") ++ # This demonstrates why our wrapper preserves original bytes - ++ # they maintain signature validity even when parsing has issues ++ ++ def test_certificate_chain_verification_simulation(self): ++ """ ++ Test that simulates certificate chain verification where original bytes matter. ++ This demonstrates why preserving original bytes is crucial for signature validation. ++ """ ++ # Create a wrapper with the malformed certificate ++ mock_reencoded_cert = Mock(spec=cryptography.x509.Certificate) ++ mock_reencoded_cert.subject = Mock() ++ mock_reencoded_cert.public_key.return_value = Mock() ++ ++ wrapper = wrap_certificate(mock_reencoded_cert, self.malformed_cert_der) ++ ++ # Simulate signature verification scenario ++ # In real verification, the signature is computed over the exact DER bytes ++ original_bytes_for_verification = wrapper.public_bytes(Encoding.DER) ++ ++ # Should get the original malformed bytes (preserving signature validity) ++ self.assertEqual(original_bytes_for_verification, self.malformed_cert_der) ++ ++ # If we didn't preserve original bytes, we'd get re-encoded bytes which would ++ # invalidate the signature even though the certificate content is the same ++ mock_reencoded_cert.public_bytes.return_value = b"reencoded_different_bytes" ++ ++ # Verify that using the wrapper gets original bytes, not re-encoded bytes ++ self.assertNotEqual(original_bytes_for_verification, b"reencoded_different_bytes") ++ self.assertEqual(original_bytes_for_verification, self.malformed_cert_der) ++ ++ ++if __name__ == "__main__": ++ unittest.main() +diff --git a/test/test_registrar_agent_cert_compliance.py b/test/test_registrar_agent_cert_compliance.py +new file mode 100644 +index 0000000..ede9b9f +--- /dev/null ++++ b/test/test_registrar_agent_cert_compliance.py +@@ -0,0 +1,289 @@ ++""" ++Integration tests for RegistrarAgent certificate compliance functionality. ++ ++This module tests the simplified certificate compliance checking methods ++to ensure they work correctly with the new CertificateWrapper-based approach. ++""" ++ ++import types ++import unittest ++from unittest.mock import Mock, patch ++ ++import cryptography.x509 ++ ++from keylime.certificate_wrapper import wrap_certificate ++from keylime.models.base.types.certificate import Certificate ++from keylime.models.registrar.registrar_agent import RegistrarAgent ++ ++ ++class TestRegistrarAgentCertCompliance(unittest.TestCase): ++ """Test cases for RegistrarAgent certificate compliance methods.""" ++ ++ # pylint: disable=protected-access,not-callable # Testing protected methods and dynamic method binding ++ ++ def setUp(self): ++ """Set up test fixtures.""" ++ # Create a test certificate ++ self.valid_cert_pem = """-----BEGIN CERTIFICATE----- ++MIIEnzCCA4egAwIBAgIEMV64bDANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJE ++RTEQMA4GA1UECBMHQmF2YXJpYTEhMB8GA1UEChMYSW5maW5lb24gVGVjaG5vbG9n ++aWVzIEFHMQwwCgYDVQQLEwNBSU0xGzAZBgNVBAMTEklGWCBUUE0gRUsgUm9vdCBD ++QTAeFw0wNTEwMjAxMzQ3NDNaFw0yNTEwMjAxMzQ3NDNaMHcxCzAJBgNVBAYTAkRF ++MQ8wDQYDVQQIEwZTYXhvbnkxITAfBgNVBAoTGEluZmluZW9uIFRlY2hub2xvZ2ll ++cyBBRzEMMAoGA1UECxMDQUlNMSYwJAYDVQQDEx1JRlggVFBNIEVLIEludGVybWVk ++aWF0ZSBDQSAwMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALftPhYN ++t4rE+JnU/XOPICbOBLvfo6iA7nuq7zf4DzsAWBdsZEdFJQfaK331ihG3IpQnlQ2i ++YtDim289265f0J4OkPFpKeFU27CsfozVaNUm6UR/uzwA8ncxFc3iZLRMRNLru/Al ++VG053ULVDQMVx2iwwbBSAYO9pGiGbk1iMmuZaSErMdb9v0KRUyZM7yABiyDlM3cz ++UQX5vLWV0uWqxdGoHwNva5u3ynP9UxPTZWHZOHE6+14rMzpobs6Ww2RR8BgF96rh ++4rRAZEl8BXhwiQq4STvUXkfvdpWH4lzsGcDDtrB6Nt3KvVNvsKz+b07Dk+Xzt+EH ++NTf3Byk2HlvX+scCAwEAAaOCATswggE3MB0GA1UdDgQWBBQ4k8292HPEIzMV4bE7 ++qWoNI8wQxzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIBADBYBgNV ++HSABAf8ETjBMMEoGC2CGSAGG+EUBBy8BMDswOQYIKwYBBQUHAgEWLWh0dHA6Ly93 ++d3cudmVyaXNpZ24uY29tL3JlcG9zaXRvcnkvaW5kZXguaHRtbDCBlwYDVR0jBIGP ++MIGMgBRW65FEhWPWcrOu1EWWC/eUDlRCpqFxpG8wbTELMAkGA1UEBhMCREUxEDAO ++BgNVBAgTB0JhdmFyaWExITAfBgNVBAoTGEluZmluZW9uIFRlY2hub2xvZ2llcyBB ++RzEMMAoGA1UECxMDQUlNMRswGQYDVQQDExJJRlggVFBNIEVLIFJvb3QgQ0GCAQMw ++DQYJKoZIhvcNAQEFBQADggEBABJ1+Ap3rNlxZ0FW0aIgdzktbNHlvXWNxFdYIBbM ++OKjmbOos0Y4O60eKPu259XmMItCUmtbzF3oKYXq6ybARUT2Lm+JsseMF5VgikSlU ++BJALqpKVjwAds81OtmnIQe2LSu4xcTSavpsL4f52cUAu/maMhtSgN9mq5roYptq9 ++DnSSDZrX4uYiMPl//rBaNDBflhJ727j8xo9CCohF3yQUoQm7coUgbRMzyO64yMIO ++3fhb+Vuc7sNwrMOz3VJN14C3JMoGgXy0c57IP/kD5zGRvljKEvrRC2I147+fPeLS ++DueRMS6lblvRKiZgmGAg7YaKOkOaEmVDMQ+fTo2Po7hI5wc= ++-----END CERTIFICATE-----""" ++ ++ self.valid_cert = cryptography.x509.load_pem_x509_certificate(self.valid_cert_pem.encode()) ++ ++ # Malformed certificate that actually requires pyasn1 re-encoding ++ self.malformed_cert_b64 = ( ++ "MIIDUjCCAvegAwIBAgILAI5xYHQ14nH5hdYwCgYIKoZIzj0EAwIwVTFTMB8GA1UEAxMYTnV2b3Rv" ++ "biBUUE0gUm9vdCBDQSAyMTExMCUGA1UEChMeTnV2b3RvbiBUZWNobm9sb2d5IENvcnBvcmF0aW9u" ++ "MAkGA1UEBhMCVFcwHhcNMTkwNzIzMTcxNTEzWhcNMzkwNzE5MTcxNTEzWjAAMIIBIjANBgkqhkiG" ++ "9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk8kCj7srY/Zlvm1795fVXdyX44w5qsd1m5VywMDgSOavzPKO" ++ "kgbHgQNx6Ak5+4Q43EJ/5qsaDBv59F8W7K69maUwcMNq1xpuq0V/LiwgJVAtc3CdvlxtwQrn7+Uq" ++ "ieIGf+i8sGxpeUCSmYHJPTHNHqjQnvUtdGoy/+WO0i7WsAvX3k/gHHr4p58a8urjJ1RG2Lk1g48D" ++ "ESwl+D7atQEPWzgjr6vK/s5KpLrn7M+dh97TUbG1510AOWBPP35MtT8IZbqC4hs2Ol16gT1M3a9e" ++ "+GaMZkItLUwV76vKDNEgTZG8M1C9OItA/xwzlfXbPepzpxWb4kzHS4qZoQtl4vBZrQIDAQABo4IB" ++ "NjCCATIwUAYDVR0RAQH/BEYwRKRCMEAxPjAUBgVngQUCARMLaWQ6NEU1NDQzMDAwEAYFZ4EFAgIT" ++ "B05QQ1Q3NXgwFAYFZ4EFAgMTC2lkOjAwMDcwMDAyMAwGA1UdEwEB/wQCMAAwEAYDVR0lBAkwBwYF" ++ "Z4EFCAEwHwYDVR0jBBgwFoAUI/TiKtO+N0pEl3KVSqKDrtdSVy4wDgYDVR0PAQH/BAQDAgUgMCIG" ++ "A1UdCQQbMBkwFwYFZ4EFAhAxDjAMDAMyLjACAQACAgCKMGkGCCsGAQUFBwEBBF0wWzBZBggrBgEF" ++ "BQcwAoZNaHR0cHM6Ly93d3cubnV2b3Rvbi5jb20vc2VjdXJpdHkvTlRDLVRQTS1FSy1DZXJ0L051" ++ "dm90b24gVFBNIFJvb3QgQ0EgMjExMS5jZXIwCgYIKoZIzj0EAwIDSQAwRgIhAPHOFiBDZd0dfml2" ++ "a/KlPFhmX7Ahpd0Wq11ZUW1/ixviAiEAlex8BB5nsR6w8QrANwCxc7fH/YnbjXfMCFiWzeZH7ps=" ++ ) ++ ++ # Create wrapped certificates for testing using Certificate type to ensure proper behavior ++ cert_type = Certificate() ++ ++ # Create compliant certificate (no original bytes needed) ++ self.compliant_wrapped_cert = wrap_certificate(self.valid_cert, None) ++ ++ # Create non-compliant certificate using the malformed cert data ++ self.non_compliant_wrapped_cert = cert_type.cast(self.malformed_cert_b64) ++ ++ def create_mock_registrar_agent(self): ++ """Create a mock RegistrarAgent with necessary attributes.""" ++ agent = Mock() ++ agent.changes = {} ++ agent.values = {} ++ agent._add_error = Mock() ++ ++ # Bind the actual methods to the mock instance ++ agent._check_cert_compliance = types.MethodType(RegistrarAgent._check_cert_compliance, agent) ++ agent._check_all_cert_compliance = types.MethodType(RegistrarAgent._check_all_cert_compliance, agent) ++ ++ return agent ++ ++ def test_check_cert_compliance_no_new_cert(self): ++ """Test _check_cert_compliance when no new certificate is provided.""" ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {} # No new certificate ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertTrue(result) ++ agent._add_error.assert_not_called() ++ ++ def test_check_cert_compliance_same_cert(self): ++ """Test _check_cert_compliance when new cert is same as old cert.""" ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {"ekcert": self.compliant_wrapped_cert} ++ agent.values = {"ekcert": self.compliant_wrapped_cert} ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertTrue(result) ++ agent._add_error.assert_not_called() ++ ++ def test_check_cert_compliance_different_cert_same_der(self): ++ """Test _check_cert_compliance when certificates have same DER bytes.""" ++ agent = self.create_mock_registrar_agent() ++ # Create two different wrapper objects but with same underlying certificate ++ cert1 = wrap_certificate(self.valid_cert, None) ++ cert2 = wrap_certificate(self.valid_cert, None) ++ ++ agent.changes = {"ekcert": cert1} ++ agent.values = {"ekcert": cert2} ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertTrue(result) ++ agent._add_error.assert_not_called() ++ ++ @patch("keylime.config.get") ++ def test_check_cert_compliance_compliant_cert(self, mock_config): ++ """Test _check_cert_compliance with ASN.1 compliant certificate.""" ++ mock_config.return_value = "warn" # Default action ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {"ekcert": self.compliant_wrapped_cert} ++ agent.values = {} # No old certificate ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertTrue(result) ++ agent._add_error.assert_not_called() ++ ++ @patch("keylime.config.get") ++ def test_check_cert_compliance_non_compliant_cert_warn(self, mock_config): ++ """Test _check_cert_compliance with non-compliant certificate (warn mode).""" ++ mock_config.return_value = "warn" # Warn action ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {"ekcert": self.non_compliant_wrapped_cert} ++ agent.values = {} # No old certificate ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertFalse(result) ++ agent._add_error.assert_not_called() # Should not add error in warn mode ++ ++ @patch("keylime.config.get") ++ def test_check_cert_compliance_non_compliant_cert_reject(self, mock_config): ++ """Test _check_cert_compliance with non-compliant certificate (reject mode).""" ++ mock_config.return_value = "reject" # Reject action ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {"ekcert": self.non_compliant_wrapped_cert} ++ agent.values = {} # No old certificate ++ ++ result = agent._check_cert_compliance("ekcert") ++ self.assertFalse(result) ++ agent._add_error.assert_called_once() # Should add error in reject mode ++ ++ @patch("keylime.config.get") ++ def test_check_all_cert_compliance_no_non_compliant(self, mock_config): ++ """Test _check_all_cert_compliance when all certificates are compliant.""" ++ mock_config.return_value = "warn" ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = { ++ "ekcert": self.compliant_wrapped_cert, ++ "iak_cert": self.compliant_wrapped_cert, ++ } ++ agent.values = {} ++ ++ # Should not raise any exceptions or log warnings ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ mock_logger.warning.assert_not_called() ++ mock_logger.error.assert_not_called() ++ ++ @patch("keylime.config.get") ++ def test_check_all_cert_compliance_with_non_compliant_warn(self, mock_config): ++ """Test _check_all_cert_compliance with non-compliant certificates (warn mode).""" ++ mock_config.return_value = "warn" ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = { ++ "ekcert": self.non_compliant_wrapped_cert, ++ "iak_cert": self.compliant_wrapped_cert, ++ "idevid_cert": self.non_compliant_wrapped_cert, ++ } ++ agent.values = {} ++ ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ # Should log warning for non-compliant certificates ++ mock_logger.warning.assert_called_once() ++ format_string = mock_logger.warning.call_args[0][0] ++ cert_names = mock_logger.warning.call_args[0][1] ++ self.assertIn("Certificate(s) %s may not conform", format_string) ++ self.assertEqual("'ekcert' and 'idevid_cert'", cert_names) ++ ++ @patch("keylime.config.get") ++ def test_check_all_cert_compliance_with_non_compliant_reject(self, mock_config): ++ """Test _check_all_cert_compliance with non-compliant certificates (reject mode).""" ++ mock_config.return_value = "reject" ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = { ++ "ekcert": self.non_compliant_wrapped_cert, ++ "mtls_cert": self.non_compliant_wrapped_cert, ++ } ++ agent.values = {} ++ ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ # Should log error for non-compliant certificates ++ mock_logger.error.assert_called_once() ++ format_string = mock_logger.error.call_args[0][0] ++ cert_names = mock_logger.error.call_args[0][1] ++ self.assertIn("Certificate(s) %s may not conform", format_string) ++ self.assertIn("were rejected due to config", format_string) ++ self.assertEqual("'ekcert' and 'mtls_cert'", cert_names) ++ ++ @patch("keylime.config.get") ++ def test_check_all_cert_compliance_ignore_mode(self, mock_config): ++ """Test _check_all_cert_compliance with ignore mode.""" ++ mock_config.return_value = "ignore" ++ ++ agent = self.create_mock_registrar_agent() ++ agent.changes = { ++ "ekcert": self.non_compliant_wrapped_cert, ++ "iak_cert": self.non_compliant_wrapped_cert, ++ } ++ agent.values = {} ++ ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ # Should not log anything in ignore mode ++ mock_logger.warning.assert_not_called() ++ mock_logger.error.assert_not_called() ++ ++ def test_check_all_cert_compliance_single_non_compliant(self): ++ """Test _check_all_cert_compliance message formatting for single certificate.""" ++ agent = self.create_mock_registrar_agent() ++ agent.changes = {"ekcert": self.non_compliant_wrapped_cert} ++ agent.values = {} ++ ++ with patch("keylime.config.get", return_value="warn"): ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ # Should format message correctly for single certificate ++ format_string = mock_logger.warning.call_args[0][0] ++ cert_names = mock_logger.warning.call_args[0][1] ++ self.assertIn("Certificate(s) %s may not conform", format_string) ++ self.assertEqual("'ekcert'", cert_names) ++ self.assertNotIn(" and", cert_names) # Should not have "and" for single cert ++ ++ def test_field_names_coverage(self): ++ """Test that all expected certificate field names are checked.""" ++ agent = self.create_mock_registrar_agent() ++ agent.changes = { ++ "ekcert": self.non_compliant_wrapped_cert, ++ "iak_cert": self.non_compliant_wrapped_cert, ++ "idevid_cert": self.non_compliant_wrapped_cert, ++ "mtls_cert": self.non_compliant_wrapped_cert, ++ } ++ agent.values = {} ++ ++ with patch("keylime.config.get", return_value="warn"): ++ with patch("keylime.models.registrar.registrar_agent.logger") as mock_logger: ++ agent._check_all_cert_compliance() ++ # Should check all four certificate fields ++ format_string = mock_logger.warning.call_args[0][0] ++ cert_names = mock_logger.warning.call_args[0][1] ++ self.assertIn("Certificate(s) %s may not conform", format_string) ++ expected_names = "'ekcert', 'iak_cert', 'idevid_cert' and 'mtls_cert'" ++ self.assertEqual(expected_names, cert_names) ++ ++ ++if __name__ == "__main__": ++ unittest.main() diff --git a/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch b/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch deleted file mode 100644 index 7b1ee26..0000000 --- a/SOURCES/0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch +++ /dev/null @@ -1,80 +0,0 @@ -From add9847988e963fd124863736592fc16cc8c716b Mon Sep 17 00:00:00 2001 -From: Stefan Berger -Date: Tue, 11 Jul 2023 18:03:28 -0400 -Subject: [PATCH 14/14] tpm_util: Replace a logger.error with an Exception in - case of invalid signature - -This fixes a possibly severe issue in 7.2.5 & 7.3.0. - -Signed-off-by: Stefan Berger ---- - keylime/tpm/tpm_util.py | 6 +----- - keylime/tpm/tpm_util_test.py | 21 +++++++++++++++++++++ - 2 files changed, 22 insertions(+), 5 deletions(-) - -diff --git a/keylime/tpm/tpm_util.py b/keylime/tpm/tpm_util.py -index ce2ce0f..58a1a04 100644 ---- a/keylime/tpm/tpm_util.py -+++ b/keylime/tpm/tpm_util.py -@@ -3,7 +3,6 @@ import string - import struct - from typing import Any, Dict, List, Optional, Tuple, Union - --from cryptography.exceptions import InvalidSignature - from cryptography.hazmat import backends - from cryptography.hazmat.primitives import hashes, hmac, serialization - from cryptography.hazmat.primitives.asymmetric import ec, padding -@@ -155,10 +154,7 @@ def checkquote( - digest.update(quoteblob) - quote_digest = digest.finalize() - -- try: -- verify(pubkey, signature, quote_digest, hashfunc) -- except InvalidSignature: -- logger.error("Invalid quote signature!") -+ verify(pubkey, signature, quote_digest, hashfunc) - - # Check that reported nonce is expected one - retDict = tpm2_objects.unmarshal_tpms_attest(quoteblob) -diff --git a/keylime/tpm/tpm_util_test.py b/keylime/tpm/tpm_util_test.py -index aaf16cd..2c73997 100644 ---- a/keylime/tpm/tpm_util_test.py -+++ b/keylime/tpm/tpm_util_test.py -@@ -2,6 +2,7 @@ import base64 - import unittest - from unittest import mock - -+from cryptography.exceptions import InvalidSignature - from cryptography.hazmat.primitives.asymmetric.ec import ( - SECP256R1, - EllipticCurve, -@@ -60,6 +61,26 @@ class TestTpmUtil(unittest.TestCase): - except Exception as e: - self.fail(f"checkquote failed with {e}") - -+ # test bad input -+ bad_quoteblob = bytearray(quoteblob) -+ bad_quoteblob[5] ^= 0x1 -+ with self.assertRaises(InvalidSignature): -+ checkquote(aikblob, nonce, sigblob, bad_quoteblob, pcrblob, "sha256") -+ -+ l = list(nonce) -+ l[0] = "a" -+ bad_nonce = "".join(l) -+ with self.assertRaises(Exception): -+ checkquote(aikblob, bad_nonce, sigblob, quoteblob, pcrblob, "sha256") -+ -+ bad_pcrblob = bytearray(pcrblob) -+ bad_pcrblob[5] ^= 0x1 -+ with self.assertRaises(Exception): -+ checkquote(aikblob, nonce, sigblob, quoteblob, bad_pcrblob, "sha256") -+ -+ with self.assertRaises(ValueError): -+ checkquote(aikblob, nonce, sigblob, quoteblob, pcrblob, "sha1") -+ - @staticmethod - def not_random(numbytes: int) -> bytes: - return b"\x12" * numbytes --- -2.41.0 - diff --git a/SOURCES/keylime.tmpfiles b/SOURCES/keylime.tmpfiles new file mode 100644 index 0000000..8aa7ecd --- /dev/null +++ b/SOURCES/keylime.tmpfiles @@ -0,0 +1,39 @@ +d /run/keylime 0700 keylime keylime - + +d /var/lib/keylime 0700 keylime keylime - + +d /etc/keylime 0500 keylime keylime - +d /etc/keylime/logging.conf.d 0500 keylime keylime - +d /etc/keylime/verifier.conf.d 0500 keylime keylime - +d /etc/keylime/registrar.conf.d 0500 keylime keylime - +d /etc/keylime/tenant.conf.d 0500 keylime keylime - +d /etc/keylime/agent.conf.d 0500 keylime keylime - + +# TPM certificate store. +# Copy the cert store from /usr/share/keylime/tpm_cert_store +# to /var/lib/keylime/tpm_cert_store. +# Files inside /var/lib/keylime/tpm_cert_store/ have +# 0400 permission and are owned by keylime/keylime, +# while /var/lib/keylime/tpm_cert_store/ itself has +# permission 0500, also owned by keylime/keylime. +C /var/lib/keylime/tpm_cert_store 0500 keylime keylime - /usr/share/keylime/tpm_cert_store +Z /var/lib/keylime/tpm_cert_store 0400 keylime keylime - +z /var/lib/keylime/tpm_cert_store 0500 keylime keylime - +# Finally, /var/lib/keylime itself has 0700 permission, +# and is owned by keylime/keylime. +z /var/lib/keylime 0700 keylime keylime - + +# Keylime configuration in /etc/keylime has permission 0400 +# owned by keylime/keylime, while snippet directories and +# the actual /etc/keylime directory have permission 0500, +# also owned by keylime/keylime. +Z /etc/keylime 0400 keylime keylime - +# Now fix the directories: +z /etc/keylime/ca.conf.d 0500 keylime keylime - +z /etc/keylime/logging.conf.d 0500 keylime keylime - +z /etc/keylime/verifier.conf.d 0500 keylime keylime - +z /etc/keylime/registrar.conf.d 0500 keylime keylime - +z /etc/keylime/tenant.conf.d 0500 keylime keylime - +z /etc/keylime/agent.conf.d 0500 keylime keylime - +# And finally, /etc/keylime itself. +z /etc/keylime 0500 keylime keylime - diff --git a/SPECS/keylime.spec b/SPECS/keylime.spec index 2ccec88..0ba90ce 100644 --- a/SPECS/keylime.spec +++ b/SPECS/keylime.spec @@ -1,5 +1,5 @@ %global srcname keylime -%global policy_version 1.2.0 +%global policy_version 42.1.2 %global with_selinux 1 %global selinuxtype targeted @@ -8,41 +8,60 @@ %global debug_package %{nil} Name: keylime -Version: 7.3.0 -Release: 13%{?dist} +Version: 7.12.1 +Release: 11%{?dist}.2 Summary: Open source TPM software for Bootstrapping and Maintaining Trust URL: https://github.com/keylime/keylime Source0: https://github.com/keylime/keylime/archive/refs/tags/v%{version}.tar.gz -Source1: %{srcname}.sysusers -Source2: https://github.com/RedHat-SP-Security/%{name}-selinux/archive/v%{policy_version}/keylime-selinux-%{policy_version}.tar.gz +Source1: https://github.com/RedHat-SP-Security/%{name}-selinux/archive/v%{policy_version}/keylime-selinux-%{policy_version}.tar.gz +Source2: %{srcname}.sysusers +Source3: %{srcname}.tmpfiles -Patch: 0001-Remove-usage-of-Required-NotRequired-typing_ext.patch -Patch: 0002-Allow-keylime_server_t-tcp-connect-to-several-domain.patch -Patch: 0003-Use-version-2.0-as-the-minimum-for-the-configuration.patch -Patch: 0004-Duplicate-str_to_version-for-the-upgrade-tool.patch -Patch: 0005-elchecking-example-add-ignores-for-EV_PLATFORM_CONFI.patch -Patch: 0006-Revert-mapping-changes.patch -Patch: 0007-Handle-session-close-using-a-session-manager.patch -Patch: 0008-verifier-should-read-parameters-from-verifier.conf-o.patch -Patch: 0009-CVE-2023-38201.patch -Patch: 0010-CVE-2023-38200.patch -Patch: 0011-Automatically-update-agent-API-version.patch -Patch: 0012-Restore-create-allowlist.patch -Patch: 0013-Set-generator-and-timestamp-in-create-policy.patch -Patch: 0014-tpm_util-Replace-a-logger.error-with-an-Exception-in.patch +Patch: 0001-Make-keylime-compatible-with-python-3.9.patch +Patch: 0002-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch +Patch: 0003-tests-skip-measured-boot-related-tests-for-s390x-and.patch +Patch: 0004-templates-duplicate-str_to_version-in-the-adjust-scr.patch +# RHEL-9 ships a slightly modified version of create_allowlist.sh and +# also a "default" server_key_password for the registrar and verifier. +# DO NOT REMOVE THE FOLLOWING TWO PATCHES IN FOLLOWING RHEL-9.x REBASES. +Patch: 0005-Restore-RHEL-9-version-of-create_allowlist.sh.patch +Patch: 0006-Revert-default-server_key_password-for-verifier-regi.patch +# Backported from https://github.com/keylime/keylime/pull/1782 +Patch: 0007-fix_db_connection_leaks.patch + +# Backported from https://github.com/keylime/keylime/pull/1791 +Patch: 0008-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch +Patch: 0009-mb-support-vendor_db-as-logged-by-newer-shim-version.patch + +# Backported from https://github.com/keylime/keylime/pull/1784 +# and https://github.com/keylime/keylime/pull/1785. +Patch: 0010-verifier-Gracefully-shutdown-on-signal.patch +Patch: 0011-revocations-Try-to-send-notifications-on-shutdown.patch +Patch: 0012-requests_client-close-the-session-at-the-end-of-the-.patch +Patch: 0013-fix-malformed-certs-workaround.patch License: ASL 2.0 and MIT BuildRequires: git-core -BuildRequires: swig BuildRequires: openssl-devel BuildRequires: python3-devel BuildRequires: python3-dbus BuildRequires: python3-jinja2 +BuildRequires: python3-cryptography +BuildRequires: python3-pyasn1 +BuildRequires: python3-pyasn1-modules +BuildRequires: python3-tornado +BuildRequires: python3-sqlalchemy +BuildRequires: python3-lark-parser +BuildRequires: python3-psutil +BuildRequires: python3-pyyaml +BuildRequires: python3-jsonschema BuildRequires: python3-setuptools BuildRequires: systemd-rpm-macros -BuildRequires: tpm2-abrmd-selinux +BuildRequires: rpm-sign +BuildRequires: createrepo_c +BuildRequires: tpm2-tools Requires: python3-%{srcname} = %{version}-%{release} Requires: %{srcname}-base = %{version}-%{release} @@ -67,8 +86,9 @@ License: MIT Requires(pre): python3-jinja2 Requires(pre): shadow-utils Requires(pre): util-linux +Requires(pre): tpm2-tss Requires: procps-ng -Requires: tpm2-tss +Requires: openssl %if 0%{?with_selinux} # This ensures that the *-selinux package and all it’s dependencies are not pulled @@ -77,6 +97,7 @@ Recommends: (%{srcname}-selinux if selinux-policy-%{selinuxtype}) %endif %ifarch %efi +BuildRequires: efivar-libs Requires: efivar-libs %endif @@ -103,6 +124,7 @@ Requires: python3-lark-parser Requires: python3-pyasn1 Requires: python3-pyasn1-modules Requires: python3-jsonschema +Requires: python3-psutil Requires: tpm2-tools Requires: openssl @@ -158,7 +180,7 @@ Requires: python3-%{srcname} = %{version}-%{release} The Keylime Tenant can be used to provision a Keylime Agent. %prep -%autosetup -S git -n %{srcname}-%{version} -a2 +%autosetup -S git -n %{srcname}-%{version} -a1 %if 0%{?with_selinux} # SELinux policy (originally from selinux-policy-contrib) @@ -176,7 +198,6 @@ bzip2 -9 %{srcname}.pp %py3_install mkdir -p %{buildroot}/%{_sharedstatedir}/%{srcname} mkdir -p --mode=0700 %{buildroot}/%{_rundir}/%{srcname} -mkdir -p --mode=0700 %{buildroot}/%{_localstatedir}/log/%{srcname} mkdir -p --mode=0700 %{buildroot}/%{_sysconfdir}/%{srcname}/ for comp in "verifier" "tenant" "registrar" "ca" "logging"; do @@ -216,22 +237,55 @@ install -Dpm 644 ./services/%{srcname}_verifier.service \ install -Dpm 644 ./services/%{srcname}_registrar.service \ %{buildroot}%{_unitdir}/%{srcname}_registrar.service -cp -r ./tpm_cert_store %{buildroot}%{_sharedstatedir}/%{srcname}/ -chmod 400 %{buildroot}%{_sharedstatedir}/%{srcname}/tpm_cert_store/*.pem +# TPM cert store is deployed to both /usr/share/keylime/tpm_cert_store +# and then /var/lib/keylime/tpm_cert_store. +for cert_store_dir in %{_datadir} %{_sharedstatedir}; do + mkdir -p %{buildroot}/"${cert_store_dir}"/%{srcname} + cp -r ./tpm_cert_store %{buildroot}/"${cert_store_dir}"/%{srcname}/ +done -install -p -d %{buildroot}/%{_tmpfilesdir} -cat > %{buildroot}/%{_tmpfilesdir}/%{srcname}.conf << EOF -d %{_rundir}/%{srcname} 0700 %{srcname} %{srcname} - -EOF +# Install the sysusers + tmpfiles.d configuration. +install -p -D -m 0644 %{SOURCE2} %{buildroot}/%{_sysusersdir}/%{srcname}.conf +install -p -D -m 0644 %{SOURCE3} %{buildroot}/%{_tmpfilesdir}/%{name}.conf -install -p -D -m 0644 %{SOURCE1} %{buildroot}%{_sysusersdir}/%{srcname}.conf +%check +# Create the default configuration files to be used by the tests. +# Also set the associated environment variables so that the tests +# will actually use them. +CONF_TEMP_DIR="$(mktemp -d)" + +%{python3} -m keylime.cmd.convert_config --out "${CONF_TEMP_DIR}" --templates templates/ +export KEYLIME_VERIFIER_CONFIG="${CONF_TEMP_DIR}/verifier.conf" +export KEYLIME_TENANT_CONFIG="${CONF_TEMP_DIR}/tenant.conf" +export KEYLIME_REGISTRAR_CONFIG="${CONF_TEMP_DIR}/registrar.conf" +export KEYLIME_CA_CONFIG="${CONF_TEMP_DIR}/ca.conf" +export KEYLIME_LOGGING_CONFIG="${CONF_TEMP_DIR}/logging.conf" + +# Run the tests. +%{python3} -m unittest + +# Cleanup. +[ "${CONF_TEMP_DIR}" ] && rm -rf "${CONF_TEMP_DIR}" +for e in KEYLIME_VERIFIER_CONFIG \ + KEYLIME_TENANT_CONFIG \ + KEYLIME_REGISTRAR_CONFIG \ + KEYLIME_CA_CONFIG \ + KEYLIME_LOGGING_CONFIG; do + unset "${e}" +done +exit 0 %pre base -%sysusers_create_compat %{SOURCE1} +%sysusers_create_compat %{SOURCE2} exit 0 %post base -/usr/bin/keylime_upgrade_config --component ca --component logging >/dev/null +for c in ca logging; do + [ -e /etc/keylime/"${c}.conf" ] || continue + /usr/bin/keylime_upgrade_config --component "${c}" \ + --input /etc/keylime/"${c}.conf" \ + >/dev/null +done exit 0 %posttrans base @@ -251,23 +305,29 @@ fi [ -d %{_sharedstatedir}/%{srcname}/tpm_cert_store ] && \ chmod 400 %{_sharedstatedir}/%{srcname}/tpm_cert_store/*.pem && \ chmod 500 %{_sharedstatedir}/%{srcname}/tpm_cert_store/ - -[ -d %{_localstatedir}/log/%{srcname} ] && \ - chown -R %{srcname} %{_localstatedir}/log/%{srcname}/ exit 0 %post verifier -/usr/bin/keylime_upgrade_config --component verifier >/dev/null +[ -e /etc/keylime/verifier.conf ] && \ + /usr/bin/keylime_upgrade_config --component verifier \ + --input /etc/keylime/verifier.conf \ + >/dev/null %systemd_post %{srcname}_verifier.service exit 0 %post registrar -/usr/bin/keylime_upgrade_config --component registrar >/dev/null +[ -e /etc/keylime/registrar.conf ] && \ + /usr/bin/keylime_upgrade_config --component registrar \ + --input /etc/keylime/registrar.conf / + >/dev/null %systemd_post %{srcname}_registrar.service exit 0 %post tenant -/usr/bin/keylime_upgrade_config --component tenant >/dev/null +[ -e /etc/keylime/tenant.conf ] && \ + /usr/bin/keylime_upgrade_config --component tenant \ + --input /etc/keylime/tenant.conf \ + >/dev/null exit 0 %preun verifier @@ -348,16 +408,19 @@ fi %{_bindir}/keylime_create_policy %{_bindir}/keylime_sign_runtime_policy %{_bindir}/keylime_userdata_encrypt +%{_bindir}/keylime-policy %files base %license LICENSE %doc README.md +%attr(500,%{srcname},%{srcname}) %dir %{_sysconfdir}/%{srcname} %attr(500,%{srcname},%{srcname}) %dir %{_sysconfdir}/%{srcname}/{ca,logging}.conf.d %config(noreplace) %verify(not md5 size mode mtime) %attr(400,%{srcname},%{srcname}) %{_sysconfdir}/%{srcname}/ca.conf %config(noreplace) %verify(not md5 size mode mtime) %attr(400,%{srcname},%{srcname}) %{_sysconfdir}/%{srcname}/logging.conf %attr(700,%{srcname},%{srcname}) %dir %{_rundir}/%{srcname} -%attr(700,%{srcname},%{srcname}) %dir %{_localstatedir}/log/%{srcname} %attr(700,%{srcname},%{srcname}) %dir %{_sharedstatedir}/%{srcname} +%attr(500,%{srcname},%{srcname}) %dir %{_datadir}/%{srcname}/tpm_cert_store +%attr(400,%{srcname},%{srcname}) %{_datadir}/%{srcname}/tpm_cert_store/*.pem %attr(500,%{srcname},%{srcname}) %dir %{_sharedstatedir}/%{srcname}/tpm_cert_store %attr(400,%{srcname},%{srcname}) %{_sharedstatedir}/%{srcname}/tpm_cert_store/*.pem %{_tmpfilesdir}/%{srcname}.conf @@ -371,6 +434,68 @@ fi %license LICENSE %changelog +* Mon Sep 15 2025 Anderson Toshiyuki Sasaki - 7.12.1-11.2 +- Properly fix the malformed certificate workaround + Resolves: RHEL-111244 + +* Mon Aug 18 2025 Sergio Correia - 7.12.1-11 +- Fix for revocation notifier not closing TLS session correctly + Resolves: RHEL-109656 + +* Wed Aug 13 2025 Sergio Correia - 7.12.1-10 +- Support vendor_db: follow-up fix + Related: RHEL-80455 + +* Tue Aug 12 2025 Sergio Correia - 7.12.1-9 +- Support vendor_db as logged by newer shim versions + Resolves: RHEL-80455 + +* Fri Aug 08 2025 Anderson Toshiyuki Sasaki - 7.12.1-8 +- Fix DB connection leaks + Resolves: RHEL-108263 + +* Tue Jul 22 2025 Sergio Correia - 7.12.1-7 +- Fix tmpfiles.d configuration related to the cert store + Resolves: RHEL-104572 + +* Thu Jul 10 2025 Sergio Correia - 7.12.1-6 +- Populate cert_store_dir with tpmfiles.d + Resolves: RHEL-76926 + +* Thu Jul 10 2025 Sergio Correia - 7.12.1-5 +- Use tmpfiles.d for permissions in /var/lib/keylime and /etc/keylime + Resolves: RHEL-77144 + +* Tue Jul 08 2025 Patrik Koncity - 7.12.1-4 +- Add new keylime-selinux release - removing keylime_var_log_t label + Resolves: RHEL-388 + +* Fri Jun 20 2025 Anderson Toshiyuki Sasaki - 7.12.1-3 +- Avoid changing ownership of /var/log/keylime + Resolves: RHEL-388 + +* Tue May 27 2025 Sergio Correia - 7.12.1-2 +- Revert changes to default server_key_password for verifier/registrar + Resolves: RHEL-93678 + +* Thu May 22 2025 Sergio Correia - 7.12.1-1 +- Update to 7.12.1 + Resolves: RHEL-78418 + +* Wed Feb 05 2025 Sergio Correia - 7.3.0-15 +- Use TLS on revocation notification webhook +- Include system installed CA certificates when verifying webhook + server certificate +- Include the CA certificates added via configuration file option + 'trusted_server_ca' + Resolves: RHEL-78057 + Resolves: RHEL-78313 + Resolves: RHEL-78316 + +* Fri Jan 10 2025 Sergio Correia - 7.3.0-14 +- Backport keylime-policy tool + Resolves: RHEL-75797 + * Fri Jan 05 2024 Sergio Correia - 7.3.0-13 - Backport fix for CVE-2023-3674 Resolves: RHEL-21013