Updating for Keylime release v7.14.1

Resolves: RHEL-140896

Signed-off-by: Sergio Correia <scorreia@redhat.com>
This commit is contained in:
Sergio Correia 2026-02-13 12:57:58 +00:00
parent b0869a59e9
commit 85f1915f1c
No known key found for this signature in database
GPG Key ID: D0D219ED1F7E762C
23 changed files with 93 additions and 5252 deletions

2
.gitignore vendored
View File

@ -35,3 +35,5 @@
/v7.9.0.tar.gz
/v7.12.1.tar.gz
/keylime-selinux-42.1.2.tar.gz
/v7.14.1.tar.gz
/keylime-selinux-43.1.1.tar.gz

View File

@ -0,0 +1,40 @@
From 7cf07986522fda7691d9135ad4f8d31d030e8b59 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Fri, 13 Feb 2026 04:46:20 -0500
Subject: [PATCH 1/2] Fix timestamp conversion to use UTC timezone
Ensure Unix timestamps are converted to UTC datetimes by passing
tz=timezone.utc to datetime.fromtimestamp(). Previously, timestamps
were converted using the local timezone, causing test failures when
epoch (0) was incorrectly converted to 1969 instead of 1970.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/models/base/types/timestamp.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/keylime/models/base/types/timestamp.py b/keylime/models/base/types/timestamp.py
index 8f6782f..22c1fcb 100644
--- a/keylime/models/base/types/timestamp.py
+++ b/keylime/models/base/types/timestamp.py
@@ -36,7 +36,7 @@ class Timestamp(ModelType):
if not ts:
try:
- ts = datetime.fromtimestamp(float(value))
+ ts = datetime.fromtimestamp(float(value), tz=timezone.utc)
except ValueError:
pass
@@ -49,7 +49,7 @@ class Timestamp(ModelType):
return self._load_datetime(ts)
def _load_float(self, value: float) -> datetime:
- ts = datetime.fromtimestamp(value)
+ ts = datetime.fromtimestamp(value, tz=timezone.utc)
return self._load_datetime(ts)
def _load_int(self, value: int) -> datetime:
--
2.53.0

View File

@ -0,0 +1,36 @@
From be3243b5f4f3423b8e8e29245a2401e52dd52baf Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Fri, 13 Feb 2026 07:22:46 -0500
Subject: [PATCH 2/2] Fix efivar availability check in test_create_mb_policy
Import tpm_bootlog_enrich instead of the elparsing package so the
CDLL("libefivar.so.1") load is actually triggered, allowing tests
to skip gracefully when the library is absent.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
test/test_create_mb_policy.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py
index 04ed779..fc79c3b 100644
--- a/test/test_create_mb_policy.py
+++ b/test/test_create_mb_policy.py
@@ -10,9 +10,12 @@ import unittest
from keylime.policy import create_mb_policy
-# Check if efivarlibs is available for measured boot parsing
+# Check if efivarlibs is available for measured boot parsing.
+# We need to import tpm_bootlog_enrich (not just the elparsing package)
+# because the CDLL("libefivar.so.1") load happens at module level in
+# tpm_bootlog_enrich, and importing just the package won't trigger it.
try:
- from keylime.mba import elparsing # pylint: disable=unused-import
+ from keylime.mba.elparsing import tpm_bootlog_enrich # pylint: disable=unused-import
EFIVAR_AVAILABLE = True
except Exception:
--
2.53.0

View File

@ -1,29 +0,0 @@
From 52944972182639a625599e29ebe65b91714a3a41 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Fri, 8 Aug 2025 16:40:01 +0100
Subject: [PATCH 2/3] mb: support EV_EFI_HANDOFF_TABLES events on PCR1
Allow EV_EFI_HANDOFF_TABLES events on PCR1 alongside the existing
EV_EFI_HANDOFF_TABLES2 support to handle different firmware
implementations, in the example policy.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/mba/elchecking/example.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/keylime/mba/elchecking/example.py b/keylime/mba/elchecking/example.py
index 2c6f699..a3d918a 100644
--- a/keylime/mba/elchecking/example.py
+++ b/keylime/mba/elchecking/example.py
@@ -185,6 +185,7 @@ class Example(policies.Policy):
# We only expect one EV_NO_ACTION event at the start.
dispatcher.set((0, "EV_NO_ACTION"), tests.OnceTest(tests.AcceptAll()))
dispatcher.set((1, "EV_CPU_MICROCODE"), tests.OnceTest(tests.AcceptAll()))
+ dispatcher.set((1, "EV_EFI_HANDOFF_TABLES"), tests.OnceTest(tests.AcceptAll()))
dispatcher.set((1, "EV_EFI_HANDOFF_TABLES2"), tests.OnceTest(tests.AcceptAll()))
dispatcher.set((0, "EV_S_CRTM_VERSION"), events_final.get("s_crtms"))
dispatcher.set((0, "EV_EFI_PLATFORM_FIRMWARE_BLOB"), events_final.get("platform_firmware_blobs"))
--
2.47.3

View File

@ -1,356 +0,0 @@
From 34bd283113f13c251114507315c647975beede2f Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Fri, 8 Aug 2025 16:41:54 +0100
Subject: [PATCH 3/3] mb: support vendor_db as logged by newer shim versions
- Updated example policy to properly handle different event structures
for vendor_db validation:
- KeySubsetMulti for EV_EFI_VARIABLE_DRIVER_CONFIG (has SignatureType field)
- SignatureSetMember for EV_EFI_VARIABLE_AUTHORITY (direct signature format)
- Added method to extract vendor_db from EV_EFI_VARIABLE_AUTHORITY events
in reference state generation (keylime-policy create measured-boot and
the legacy create_mb_refstate script)
- Made vendor_db optional for backward compatibility
This fixes attestation failures when vendor_db variables are present but
missing from reference states or validated with incorrect test types.
See: https://github.com/rhboot/shim/pull/728
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/mba/elchecking/example.py | 45 +++++++++
keylime/policy/create_mb_policy.py | 30 ++++++
scripts/create_mb_refstate | 30 ++++++
test/test_create_mb_policy.py | 142 +++++++++++++++++++++++++++++
4 files changed, 247 insertions(+)
diff --git a/keylime/mba/elchecking/example.py b/keylime/mba/elchecking/example.py
index a3d918a..5a933ac 100644
--- a/keylime/mba/elchecking/example.py
+++ b/keylime/mba/elchecking/example.py
@@ -21,6 +21,7 @@ from . import policies, tests
# kek - list of allowed KEK keys
# db - list of allowed db keys
# dbx - list of required dbx keys
+# vendor_db - list of allowed vendor_db keys (optional, for newer shim versions)
# mokdig - list of allowed digests of MoKList (PCR 14 EV_IPL)
# mokxdig - list of allowed digests of MoKListX (PCR 14 EV_IPL)
# kernels - list of allowed {
@@ -121,6 +122,10 @@ class Example(policies.Policy):
if req not in refstate:
raise Exception(f"refstate lacks {req}")
+ # vendor_db is optional for backward compatibility
+ if "vendor_db" not in refstate:
+ refstate["vendor_db"] = []
+
dispatcher = tests.Dispatcher(("PCRIndex", "EventType"))
vd_driver_config = tests.VariableDispatch()
vd_authority = tests.VariableDispatch()
@@ -268,6 +273,34 @@ class Example(policies.Policy):
"db",
db_test,
)
+ # Support vendor_db as logged by newer shim versions
+ # See: https://github.com/rhboot/shim/pull/728
+ if not has_secureboot and not refstate["vendor_db"]:
+ vendor_db_test = tests.OnceTest(tests.AcceptAll())
+ else:
+ vendor_db_test = tests.OnceTest(
+ tests.Or(
+ tests.KeySubsetMulti(
+ ["a159c0a5-e494-a74a-87b5-ab155c2bf072", "2616c4c1-4c50-9240-aca9-41f936934328"],
+ sigs_strip0x(refstate["vendor_db"]),
+ ),
+ tests.KeySubsetMulti(
+ ["a5c059a1-94e4-4aa7-87b5-ab155c2bf072", "c1c41626-504c-4092-aca9-41f936934328"],
+ sigs_strip0x(refstate["vendor_db"]),
+ ),
+ )
+ )
+
+ vd_driver_config.set(
+ "cbb219d7-3a3d-9645-a3bc-dad00e67656f",
+ "vendor_db",
+ vendor_db_test,
+ )
+ vd_driver_config.set(
+ "d719b2cb-3d3a-4596-a3bc-dad00e67656f",
+ "vendor_db",
+ vendor_db_test,
+ )
if not has_secureboot and not refstate["dbx"]:
dbx_test = tests.OnceTest(tests.AcceptAll())
@@ -295,6 +328,18 @@ class Example(policies.Policy):
vd_db_test = tests.OnceTest(tests.AcceptAll())
vd_authority.set("cbb219d7-3a3d-9645-a3bc-dad00e67656f", "db", vd_db_test)
vd_authority.set("d719b2cb-3d3a-4596-a3bc-dad00e67656f", "db", vd_db_test)
+ # Support vendor_db as logged by newer shim versions in EV_EFI_VARIABLE_AUTHORITY events
+ # See: https://github.com/rhboot/shim/pull/728
+ # EV_EFI_VARIABLE_AUTHORITY events have different structure than EV_EFI_VARIABLE_DRIVER_CONFIG
+ # They contain direct signature data without SignatureType field
+ if not has_secureboot and not refstate["vendor_db"]:
+ vendor_db_authority_test = tests.OnceTest(tests.AcceptAll())
+ else:
+ vendor_db_authority_test = tests.OnceTest(
+ tests.IterateTest(tests.SignatureSetMember(sigs_strip0x(refstate["vendor_db"])))
+ )
+ vd_authority.set("cbb219d7-3a3d-9645-a3bc-dad00e67656f", "vendor_db", vendor_db_authority_test)
+ vd_authority.set("d719b2cb-3d3a-4596-a3bc-dad00e67656f", "vendor_db", vendor_db_authority_test)
# Accept all SbatLevels of the Shim, because we already checked the hash of the Shim itself.
vd_sbat_level_test = tests.OnceTest(tests.AcceptAll())
vd_authority.set("50ab5d60-46e0-0043-abb6-3dd810dd8b23", "SbatLevel", vd_sbat_level_test)
diff --git a/keylime/policy/create_mb_policy.py b/keylime/policy/create_mb_policy.py
index 859e652..b2b48f7 100644
--- a/keylime/policy/create_mb_policy.py
+++ b/keylime/policy/create_mb_policy.py
@@ -93,6 +93,35 @@ def get_keys(events: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
return out
+def get_vendor_db(events: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
+ """Get vendor_db signatures from EV_EFI_VARIABLE_AUTHORITY events."""
+ out: Dict[str, List[Any]] = {"vendor_db": []}
+
+ for event in events:
+ if "EventType" not in event:
+ continue
+ if event["EventType"] != "EV_EFI_VARIABLE_AUTHORITY":
+ continue
+ if "Event" not in event or "UnicodeName" not in event["Event"]:
+ continue
+
+ event_name = event["Event"]["UnicodeName"].lower()
+ if event_name == "vendor_db":
+ data = None
+ if "VariableData" in event["Event"]:
+ data = event["Event"]["VariableData"]
+
+ if data is not None:
+ # VariableData for EV_EFI_VARIABLE_AUTHORITY is a list of signatures
+ for entry in data:
+ if "SignatureOwner" in entry and "SignatureData" in entry:
+ out["vendor_db"].append(
+ {"SignatureOwner": entry["SignatureOwner"], "SignatureData": f"0x{entry['SignatureData']}"}
+ )
+
+ return out
+
+
def get_kernel(events: List[Dict[str, Any]], secure_boot: bool) -> Dict[str, List[Dict[str, Any]]]:
"""Extract digest for Shim, Grub, Linux Kernel and initrd."""
out = []
@@ -259,6 +288,7 @@ def create_mb_refstate(args: argparse.Namespace) -> Optional[Dict[str, object]]:
}
],
**get_keys(events),
+ **get_vendor_db(events),
**get_mok(events),
**get_kernel(events, has_secureboot),
}
diff --git a/scripts/create_mb_refstate b/scripts/create_mb_refstate
index 23cafb9..c98e61d 100755
--- a/scripts/create_mb_refstate
+++ b/scripts/create_mb_refstate
@@ -78,6 +78,35 @@ def get_keys(events):
return out
+def get_vendor_db(events):
+ """Get vendor_db signatures from EV_EFI_VARIABLE_AUTHORITY events."""
+ out = {"vendor_db": []}
+
+ for event in events:
+ if "EventType" not in event:
+ continue
+ if event["EventType"] != "EV_EFI_VARIABLE_AUTHORITY":
+ continue
+ if "Event" not in event or "UnicodeName" not in event["Event"]:
+ continue
+
+ event_name = event["Event"]["UnicodeName"].lower()
+ if event_name == "vendor_db":
+ data = None
+ if "VariableData" in event["Event"]:
+ data = event["Event"]["VariableData"]
+
+ if data is not None:
+ # VariableData for EV_EFI_VARIABLE_AUTHORITY is a list of signatures
+ for entry in data:
+ if "SignatureOwner" in entry and "SignatureData" in entry:
+ out["vendor_db"].append(
+ {"SignatureOwner": entry["SignatureOwner"], "SignatureData": f"0x{entry['SignatureData']}"}
+ )
+
+ return out
+
+
def get_kernel(events, secure_boot):
"""
Extract digest for Shim, Grub, Linux Kernel and initrd.
@@ -197,6 +226,7 @@ def main():
}
],
**get_keys(events),
+ **get_vendor_db(events),
**get_mok(events),
**get_kernel(events, has_secureboot),
}
diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py
index eaed0e3..aa7a4b9 100644
--- a/test/test_create_mb_policy.py
+++ b/test/test_create_mb_policy.py
@@ -362,6 +362,148 @@ class CreateMeasuredBootPolicy_Test(unittest.TestCase):
for c in test_cases:
self.assertDictEqual(create_mb_policy.get_mok(c["events"]), c["expected"])
+ def test_get_vendor_db(self):
+ test_cases = [
+ {"events": [], "expected": {"vendor_db": []}},
+ # No EV_EFI_VARIABLE_AUTHORITY events.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG",
+ "Event": {"UnicodeName": "vendor_db", "VariableData": []},
+ }
+ ],
+ "expected": {"vendor_db": []},
+ },
+ # Good vendor_db event with EV_EFI_VARIABLE_AUTHORITY.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_AUTHORITY",
+ "Event": {
+ "UnicodeName": "vendor_db",
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ }
+ ],
+ },
+ }
+ ],
+ "expected": {
+ "vendor_db": [
+ {"SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", "SignatureData": "0xsig-data-1"}
+ ]
+ },
+ },
+ # Multiple vendor_db signatures.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_AUTHORITY",
+ "Event": {
+ "UnicodeName": "vendor_db",
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ },
+ {
+ "SignatureOwner": "77fa9abd-0359-4d32-bd60-28f4e78f784b",
+ "SignatureData": "sig-data-2",
+ },
+ ],
+ },
+ }
+ ],
+ "expected": {
+ "vendor_db": [
+ {"SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b", "SignatureData": "0xsig-data-1"},
+ {"SignatureOwner": "77fa9abd-0359-4d32-bd60-28f4e78f784b", "SignatureData": "0xsig-data-2"},
+ ]
+ },
+ },
+ # Missing EventType.
+ {
+ "events": [
+ {
+ "Event": {
+ "UnicodeName": "vendor_db",
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ }
+ ],
+ }
+ }
+ ],
+ "expected": {"vendor_db": []},
+ },
+ # Wrong EventType.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_DRIVER_CONFIG",
+ "Event": {
+ "UnicodeName": "vendor_db",
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ }
+ ],
+ },
+ }
+ ],
+ "expected": {"vendor_db": []},
+ },
+ # Missing Event.
+ {
+ "events": [{"EventType": "EV_EFI_VARIABLE_AUTHORITY"}],
+ "expected": {"vendor_db": []},
+ },
+ # Missing UnicodeName.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_AUTHORITY",
+ "Event": {
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ }
+ ]
+ },
+ }
+ ],
+ "expected": {"vendor_db": []},
+ },
+ # Wrong UnicodeName.
+ {
+ "events": [
+ {
+ "EventType": "EV_EFI_VARIABLE_AUTHORITY",
+ "Event": {
+ "UnicodeName": "db",
+ "VariableData": [
+ {
+ "SignatureOwner": "0223eddb-9079-4388-af77-2d65b1c35d3b",
+ "SignatureData": "sig-data-1",
+ }
+ ],
+ },
+ }
+ ],
+ "expected": {"vendor_db": []},
+ },
+ ]
+
+ for c in test_cases:
+ self.assertDictEqual(create_mb_policy.get_vendor_db(c["events"]), c["expected"])
+
def test_get_kernel(self):
test_cases = [
{"events": [], "secureboot": False, "expected": {}},
--
2.47.3

View File

@ -1,42 +0,0 @@
From c530c332321c1daffa5bfcd08754179012dd21cc Mon Sep 17 00:00:00 2001
From: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
Date: Mon, 18 Aug 2025 12:12:16 +0000
Subject: [PATCH 4/7] verifier: Gracefully shutdown on signal
Wait for the processes to finish when interrupted by a signal. Do not
call exit(0) in the signal handler.
Assisted-by: Claude 4 Sonnet
Signed-off-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
---
keylime/cloud_verifier_tornado.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py
index 7553ac8..7065661 100644
--- a/keylime/cloud_verifier_tornado.py
+++ b/keylime/cloud_verifier_tornado.py
@@ -2138,7 +2138,7 @@ def main() -> None:
revocation_notifier.stop_broker()
for p in processes:
p.join()
- sys.exit(0)
+ # Do not call sys.exit(0) here as it interferes with multiprocessing cleanup
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
@@ -2159,3 +2159,11 @@ def main() -> None:
process = Process(target=server_process, args=(task_id, active_agents))
process.start()
processes.append(process)
+
+ # Wait for all worker processes to complete
+ try:
+ for p in processes:
+ p.join()
+ except KeyboardInterrupt:
+ # Signal handler will take care of cleanup
+ pass
--
2.47.3

View File

@ -1,308 +0,0 @@
From 565889ab6c90823a5096e39a58e9599fa49072f6 Mon Sep 17 00:00:00 2001
From: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
Date: Wed, 23 Jul 2025 15:39:49 +0200
Subject: [PATCH 5/7] revocations: Try to send notifications on shutdown
During verifier shutdown, try to send any pending revocation
notification in a best-effort manner. In future, the pending revocation
notifications should be persisted to be processed during next startup.
Assisted-by: Claude 4 Sonnet
Signed-off-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
---
keylime/cloud_verifier_tornado.py | 7 +
keylime/revocation_notifier.py | 239 ++++++++++++++++++++++--------
2 files changed, 184 insertions(+), 62 deletions(-)
diff --git a/keylime/cloud_verifier_tornado.py b/keylime/cloud_verifier_tornado.py
index 7065661..89aa703 100644
--- a/keylime/cloud_verifier_tornado.py
+++ b/keylime/cloud_verifier_tornado.py
@@ -2109,6 +2109,10 @@ def main() -> None:
# Stop server to not accept new incoming connections
server.stop()
+ # Gracefully shutdown webhook workers to prevent connection errors
+ if "webhook" in revocation_notifier.get_notifiers():
+ revocation_notifier.shutdown_webhook_workers()
+
# Wait for all connections to be closed and then stop ioloop
async def stop() -> None:
await server.close_all_connections()
@@ -2136,6 +2140,9 @@ def main() -> None:
def sig_handler(*_: Any) -> None:
if run_revocation_notifier:
revocation_notifier.stop_broker()
+ # Gracefully shutdown webhook workers to prevent connection errors
+ if "webhook" in revocation_notifier.get_notifiers():
+ revocation_notifier.shutdown_webhook_workers()
for p in processes:
p.join()
# Do not call sys.exit(0) here as it interferes with multiprocessing cleanup
diff --git a/keylime/revocation_notifier.py b/keylime/revocation_notifier.py
index 5a7cc4b..c154028 100644
--- a/keylime/revocation_notifier.py
+++ b/keylime/revocation_notifier.py
@@ -18,6 +18,174 @@ broker_proc: Optional[Process] = None
_SOCKET_PATH = "/var/run/keylime/keylime.verifier.ipc"
+# Global webhook manager instance (initialized when needed)
+_webhook_manager: Optional["WebhookNotificationManager"] = None
+
+
+class WebhookNotificationManager:
+ """Manages webhook worker threads and graceful shutdown for revocation notifications."""
+
+ def __init__(self) -> None:
+ self._shutdown_event = threading.Event()
+ self._workers: Set[threading.Thread] = set()
+ self._workers_lock = threading.Lock()
+
+ def notify_webhook(self, tosend: Dict[str, Any]) -> None:
+ """Send webhook notification with worker thread management."""
+ url = config.get("verifier", "webhook_url", section="revocations", fallback="")
+ # Check if a url was specified
+ if url == "":
+ return
+
+ # Similarly to notify(), let's convert `tosend' to str to prevent
+ # possible issues with json handling by python-requests.
+ tosend = json.bytes_to_str(tosend)
+
+ def worker_webhook(tosend: Dict[str, Any], url: str) -> None:
+ is_shutdown_mode = False
+ try:
+ interval = config.getfloat("verifier", "retry_interval")
+ exponential_backoff = config.getboolean("verifier", "exponential_backoff")
+
+ max_retries = config.getint("verifier", "max_retries")
+ if max_retries <= 0:
+ logger.info("Invalid value found in 'max_retries' option for verifier, using default value")
+ max_retries = 5
+
+ # During shutdown, use fewer retries but still make best effort
+ if self._shutdown_event.is_set():
+ is_shutdown_mode = True
+ max_retries = min(max_retries, 3) # Reduce retries during shutdown but still try
+ logger.info(
+ "Shutdown mode: attempting to send critical revocation notification with %d retries",
+ max_retries,
+ )
+
+ # Get TLS options from the configuration
+ (cert, key, trusted_ca, key_password), verify_server_cert = web_util.get_tls_options(
+ "verifier", is_client=True, logger=logger
+ )
+
+ # Generate the TLS context using the obtained options
+ tls_context = web_util.generate_tls_context(
+ cert, key, trusted_ca, key_password, is_client=True, logger=logger
+ )
+
+ logger.info("Sending revocation event via webhook to %s ...", url)
+ for i in range(max_retries):
+ next_retry = retry.retry_time(exponential_backoff, interval, i, logger)
+
+ with RequestsClient(
+ url,
+ verify_server_cert,
+ tls_context,
+ ) as client:
+ try:
+ res = client.post("", json=tosend, timeout=5)
+ except requests.exceptions.SSLError as ssl_error:
+ if "TLSV1_ALERT_UNKNOWN_CA" in str(ssl_error):
+ logger.warning(
+ "Keylime does not recognize certificate from peer. Check if verifier 'trusted_server_ca' is configured correctly"
+ )
+
+ raise ssl_error from ssl_error
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
+ # During shutdown, only suppress errors on the final attempt after all retries exhausted
+ if is_shutdown_mode and i == max_retries - 1:
+ logger.warning(
+ "Final attempt to send revocation notification failed during shutdown: %s", e
+ )
+ return
+ # Otherwise, let the retry logic handle it
+ raise e
+
+ if res and res.status_code in [200, 202]:
+ if is_shutdown_mode:
+ logger.info("Successfully sent revocation notification during shutdown")
+ break
+
+ logger.debug(
+ "Unable to publish revocation message %d times via webhook, "
+ "trying again in %d seconds. "
+ "Server returned status code: %s",
+ i + 1,
+ next_retry,
+ res.status_code,
+ )
+
+ # During shutdown, use shorter retry intervals to complete faster
+ if is_shutdown_mode:
+ next_retry = min(next_retry, 2.0) # Cap retry interval during shutdown
+
+ time.sleep(next_retry)
+
+ except Exception as e:
+ # Only suppress errors during final shutdown phase and log appropriately
+ if is_shutdown_mode:
+ logger.warning("Failed to send revocation notification during shutdown: %s", e)
+ else:
+ logger.error("Error in webhook worker: %s", e)
+ finally:
+ # Remove this worker from the active set
+ current_thread = threading.current_thread()
+ with self._workers_lock:
+ self._workers.discard(current_thread)
+
+ w = functools.partial(worker_webhook, tosend, url)
+ t = threading.Thread(target=w, daemon=True)
+
+ # Add this worker to the active set
+ with self._workers_lock:
+ self._workers.add(t)
+
+ t.start()
+
+ def shutdown_workers(self) -> None:
+ """Signal webhook workers to shut down gracefully and wait for them to complete.
+
+ This gives workers time to complete their critical revocation notifications
+ before the service shuts down completely.
+ """
+ logger.info("Shutting down webhook workers gracefully...")
+ self._shutdown_event.set()
+
+ # Give workers generous time to complete critical revocation notifications
+ timeout = 30.0 # Increased timeout for critical security notifications
+ end_time = time.time() + timeout
+
+ with self._workers_lock:
+ workers_to_wait = list(self._workers)
+
+ if workers_to_wait:
+ logger.info("Waiting for %d webhook workers to complete revocation notifications...", len(workers_to_wait))
+
+ for worker in workers_to_wait:
+ remaining_time = max(0, end_time - time.time())
+ if remaining_time > 0:
+ logger.debug(
+ "Waiting for webhook worker %s to complete (timeout: %.1f seconds)", worker.name, remaining_time
+ )
+ worker.join(timeout=remaining_time)
+ if worker.is_alive():
+ logger.warning("Webhook worker %s did not complete within timeout", worker.name)
+ else:
+ logger.warning("Timeout exceeded while waiting for webhook workers")
+ break
+
+ # Clean up completed workers
+ with self._workers_lock:
+ self._workers.clear()
+
+ logger.info("Webhook workers shutdown complete")
+
+
+def _get_webhook_manager() -> WebhookNotificationManager:
+ """Get the global webhook manager instance, creating it if needed."""
+ global _webhook_manager
+ if _webhook_manager is None:
+ _webhook_manager = WebhookNotificationManager()
+ return _webhook_manager
+
# return the revocation notification methods for cloud verifier
def get_notifiers() -> Set[str]:
@@ -83,6 +251,12 @@ def stop_broker() -> None:
broker_proc.kill() # pylint: disable=E1101
+def shutdown_webhook_workers() -> None:
+ """Convenience function to shutdown webhook workers using the global manager."""
+ manager = _get_webhook_manager()
+ manager.shutdown_workers()
+
+
def notify(tosend: Dict[str, Any]) -> None:
assert "zeromq" in get_notifiers()
try:
@@ -127,68 +301,9 @@ def notify(tosend: Dict[str, Any]) -> None:
def notify_webhook(tosend: Dict[str, Any]) -> None:
- url = config.get("verifier", "webhook_url", section="revocations", fallback="")
- # Check if a url was specified
- if url == "":
- return
-
- # Similarly to notify(), let's convert `tosend' to str to prevent
- # possible issues with json handling by python-requests.
- tosend = json.bytes_to_str(tosend)
-
- def worker_webhook(tosend: Dict[str, Any], url: str) -> None:
- interval = config.getfloat("verifier", "retry_interval")
- exponential_backoff = config.getboolean("verifier", "exponential_backoff")
-
- max_retries = config.getint("verifier", "max_retries")
- if max_retries <= 0:
- logger.info("Invalid value found in 'max_retries' option for verifier, using default value")
- max_retries = 5
-
- # Get TLS options from the configuration
- (cert, key, trusted_ca, key_password), verify_server_cert = web_util.get_tls_options(
- "verifier", is_client=True, logger=logger
- )
-
- # Generate the TLS context using the obtained options
- tls_context = web_util.generate_tls_context(cert, key, trusted_ca, key_password, is_client=True, logger=logger)
-
- logger.info("Sending revocation event via webhook to %s ...", url)
- for i in range(max_retries):
- next_retry = retry.retry_time(exponential_backoff, interval, i, logger)
-
- with RequestsClient(
- url,
- verify_server_cert,
- tls_context,
- ) as client:
- try:
- res = client.post("", json=tosend, timeout=5)
- except requests.exceptions.SSLError as ssl_error:
- if "TLSV1_ALERT_UNKNOWN_CA" in str(ssl_error):
- logger.warning(
- "Keylime does not recognize certificate from peer. Check if verifier 'trusted_server_ca' is configured correctly"
- )
-
- raise ssl_error from ssl_error
-
- if res and res.status_code in [200, 202]:
- break
-
- logger.debug(
- "Unable to publish revocation message %d times via webhook, "
- "trying again in %d seconds. "
- "Server returned status code: %s",
- i + 1,
- next_retry,
- res.status_code,
- )
-
- time.sleep(next_retry)
-
- w = functools.partial(worker_webhook, tosend, url)
- t = threading.Thread(target=w, daemon=True)
- t.start()
+ """Send webhook notification using the global webhook manager."""
+ manager = _get_webhook_manager()
+ manager.notify_webhook(tosend)
cert_key = None
--
2.47.3

View File

@ -1,45 +0,0 @@
From e6fb5090df3e35c7d44bc8f7f37d420d7ee8a05c Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Wed, 4 Jun 2025 19:52:37 +0100
Subject: [PATCH 6/7] requests_client: close the session at the end of the
resource manager
We had an issue in the past in which the webhook worker would not
properly close the opened session. This was fixed in #1456 (Close
session in worker_webhook function).
At some later point, in #1566 (revocation_notifier: Take into account CA
certificates added via configuration), some refactoring around the
webhook_worker() in revocation_notifier happened and it started using
the RequestsClient resource manager.
However, the RequestsClient does not close the session at its end, which
in turns makes that the old issue of not closing properly the session
in the webhook_worker() returned.
We now issue a session.close() at the end of the RequestsClient.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/requests_client.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/keylime/requests_client.py b/keylime/requests_client.py
index 16615f7..b7da484 100644
--- a/keylime/requests_client.py
+++ b/keylime/requests_client.py
@@ -40,7 +40,10 @@ class RequestsClient:
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
- pass
+ try:
+ self.session.close()
+ except Exception:
+ pass
def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:
return self.session.request(method, self.base_url + url, **kwargs)
--
2.47.3

View File

@ -1,91 +0,0 @@
From 39ea2efb72b383f729474a1583d4b8c097cf848a Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Thu, 6 Feb 2025 21:29:56 +0000
Subject: [PATCH 07/10] tests: change test_mba_parsing to not need keylime
installed
This test needs the verifier configuration file available, and on
systems that do not have keylime installed (hence, no config file),
it would fail.
This commit changes the test so that it creates a verifier conf file
in a temporary directory with default values, so that it can use it.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
test/test_mba_parsing.py | 52 +++++++++++++++++++++++++++++-----------
1 file changed, 38 insertions(+), 14 deletions(-)
diff --git a/test/test_mba_parsing.py b/test/test_mba_parsing.py
index 670a602..4ee4e3b 100644
--- a/test/test_mba_parsing.py
+++ b/test/test_mba_parsing.py
@@ -1,27 +1,51 @@
import os
+import tempfile
import unittest
+from configparser import RawConfigParser
+from keylime import config
+from keylime.cmd import convert_config
from keylime.common.algorithms import Hash
from keylime.mba import mba
+TEMPLATES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "templates"))
+
class TestMBAParsing(unittest.TestCase):
def test_parse_bootlog(self):
"""Test parsing binary measured boot event log"""
- mba.load_imports()
- # Use the file that triggered https://github.com/keylime/keylime/issues/1153
- mb_log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/mb_log.b64"))
- with open(mb_log_path, encoding="utf-8") as f:
- # Read the base64 input and remove the newlines
- b64 = "".join(f.read().splitlines())
- pcr_hashes, boot_aggregates, measurement_data, failure = mba.bootlog_parse(b64, Hash.SHA256)
-
- self.assertFalse(
- failure, f"Parsing of measured boot log failed with: {list(map(lambda x: x.context, failure.events))}"
- )
- self.assertTrue(isinstance(pcr_hashes, dict))
- self.assertTrue(isinstance(boot_aggregates, dict))
- self.assertTrue(isinstance(measurement_data, dict))
+ # This test requires the verifier configuration file, so let's create
+ # one with the default values to use, so that we do not depend on the
+ # configuration files existing in the test system.
+ with tempfile.TemporaryDirectory() as config_dir:
+ # Let's write the config file for the verifier.
+ verifier_config = convert_config.process_versions(["verifier"], TEMPLATES_DIR, RawConfigParser(), True)
+ convert_config.output(["verifier"], verifier_config, TEMPLATES_DIR, config_dir)
+
+ # As we want to use a config file from a different location, the
+ # proper way would be to define an environment variable for the
+ # module of interest, e.g. in our case it would be the
+ # KEYLIME_VERIFIER_CONFIG variable. However, the config module
+ # reads such env vars at first load, and there is no clean way
+ # to have it re-read them, so for this test we will override it
+ # manually.
+ config.CONFIG_ENV["verifier"] = os.path.abspath(os.path.join(config_dir, "verifier.conf"))
+
+ mba.load_imports()
+ # Use the file that triggered https://github.com/keylime/keylime/issues/1153
+ mb_log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/mb_log.b64"))
+ with open(mb_log_path, encoding="utf-8") as f:
+ # Read the base64 input and remove the newlines
+ b64 = "".join(f.read().splitlines())
+ pcr_hashes, boot_aggregates, measurement_data, failure = mba.bootlog_parse(b64, Hash.SHA256)
+
+ self.assertFalse(
+ failure,
+ f"Parsing of measured boot log failed with: {list(map(lambda x: x.context, failure.events))}",
+ )
+ self.assertTrue(isinstance(pcr_hashes, dict))
+ self.assertTrue(isinstance(boot_aggregates, dict))
+ self.assertTrue(isinstance(measurement_data, dict))
if __name__ == "__main__":
--
2.47.3

View File

@ -1,53 +0,0 @@
From 1496567e4b06f7a8eff9f758ea2e4e00ffa89f9b Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Wed, 4 Jun 2025 07:28:54 +0100
Subject: [PATCH 08/10] tests: skip measured-boot related tests for s390x and
ppc64le
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
test/test_create_mb_policy.py | 2 ++
test/test_mba_parsing.py | 2 ++
2 files changed, 4 insertions(+)
diff --git a/test/test_create_mb_policy.py b/test/test_create_mb_policy.py
index aa7a4b9..cd32bda 100644
--- a/test/test_create_mb_policy.py
+++ b/test/test_create_mb_policy.py
@@ -5,6 +5,7 @@ Copyright 2024 Red Hat, Inc.
import argparse
import os
+import platform
import unittest
from keylime.policy import create_mb_policy
@@ -12,6 +13,7 @@ from keylime.policy import create_mb_policy
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data", "create-mb-policy"))
+@unittest.skipIf(platform.machine() in ["ppc64le", "s390x"], "ppc64le and s390x are not supported")
class CreateMeasuredBootPolicy_Test(unittest.TestCase):
def test_event_to_sha256(self):
test_cases = [
diff --git a/test/test_mba_parsing.py b/test/test_mba_parsing.py
index 4ee4e3b..82e6086 100644
--- a/test/test_mba_parsing.py
+++ b/test/test_mba_parsing.py
@@ -1,4 +1,5 @@
import os
+import platform
import tempfile
import unittest
from configparser import RawConfigParser
@@ -11,6 +12,7 @@ from keylime.mba import mba
TEMPLATES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "templates"))
+@unittest.skipIf(platform.machine() in ["ppc64le", "s390x"], "ppc64le and s390x are not supported")
class TestMBAParsing(unittest.TestCase):
def test_parse_bootlog(self):
"""Test parsing binary measured boot event log"""
--
2.47.3

View File

@ -1,58 +0,0 @@
From be968fd54198042d2014ad63368b78e9d4609169 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Thu, 22 May 2025 11:25:15 -0400
Subject: [PATCH 09/10] tests: fix rpm repo tests from create-runtime-policy
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
.../create-runtime-policy/setup-rpm-tests | 28 +++++++++++++------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/test/data/create-runtime-policy/setup-rpm-tests b/test/data/create-runtime-policy/setup-rpm-tests
index 708438c..b62729b 100755
--- a/test/data/create-runtime-policy/setup-rpm-tests
+++ b/test/data/create-runtime-policy/setup-rpm-tests
@@ -217,20 +217,32 @@ create_rpm() {
# https://github.com/rpm-software-management/rpm/commit/96467dce18f264b278e17ffe1859c88d9b5aa4b6
_pkgname="DUMMY-${_name}-${_version}-${_rel}.noarch.rpm"
- _expected_pkg="${RPMSDIR}/noarch/${_pkgname}"
- [ -e "${_expected_pkg}" ] && return 0
+ # For some reason, it may not store the built package within the
+ # noarch directory, but directly in RPMS, so let's check both
+ # locations.
+ _expected_pkg="${RPMSDIR}/noarch/${_pkgname} ${RPMSDIR}/${_pkgname}"
+ for _expected in ${_expected_pkg}; do
+ if [ -e "${_expected}" ]; then
+ echo "(create_rpm) CREATED RPM: ${_expected}" >&2
+ return 0
+ fi
+ done
# OK, the package was not built where it should. Let us see if
# it was built in ~/rpmbuild instead, and if that is the case,
# copy it to the expected location.
- _bad_location_pkg="${HOME}/rpmbuild/RPMS/noarch/${_pkgname}"
- if [ -e "${_bad_location_pkg}" ]; then
- echo "WARNING: the package ${_pkgname} was built into ~/rpmbuild despite rpmbuild being instructed to build it at a different location. Probably a fallout from https://github.com/rpm-software-management/rpm/commit/96467dce" >&2
- install -D -m644 "${_bad_location_pkg}" "${_expected_pkg}"
- return 0
- fi
+ _bad_location_pkg="${HOME}/rpmbuild/RPMS/noarch/${_pkgname} ${HOME}/rpmbuild/RPMS/${_pkgname}"
+ for _bad_l in ${_bad_location_pkg}; do
+ if [ -e "${_bad_l}" ]; then
+ echo "WARNING: the package ${_pkgname} was built into ~/rpmbuild despite rpmbuild being instructed to build it at a different location. Probably a fallout from https://github.com/rpm-software-management/rpm/commit/96467dce" >&2
+ install -D -m644 "${_bad_l}" "${RPMSDIR}/noarch/${_pkgname}"
+ echo "(create_rpm) CREATED RPM: ${RPMSDIR}/noarch/${_pkgname}" >&2
+ return 0
+ fi
+ done
# Should not be here.
+ echo "create_rpm() ended with error; probably an issue with the location where the RPMs were built" >&2
return 1
}
--
2.47.3

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +0,0 @@
From e9a6615ea3ab60b9248377071ea2f5cc7b45dfda Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Thu, 28 Aug 2025 14:33:59 +0100
Subject: [PATCH] policy/sign: use print() when writing to /dev/stdout
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/policy/sign_runtime_policy.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/keylime/policy/sign_runtime_policy.py b/keylime/policy/sign_runtime_policy.py
index 87529065d..316ee15aa 100644
--- a/keylime/policy/sign_runtime_policy.py
+++ b/keylime/policy/sign_runtime_policy.py
@@ -2,6 +2,7 @@
import argparse
import json
+import sys
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Optional
@@ -191,8 +192,12 @@ def sign_runtime_policy(args: argparse.Namespace) -> Optional[str]:
return None
try:
- with open(args.output_file, "wb") as f:
- f.write(signed_policy.encode("UTF-8"))
+ if args.output_file == "/dev/stdout":
+ # Let's simply print to stdout the regular way.
+ print(signed_policy, file=sys.stdout)
+ else:
+ with open(args.output_file, "wb") as f:
+ f.write(signed_policy.encode("UTF-8"))
except Exception as exc:
logger.error("Unable to write signed policy to destination file '%s': %s", args.output_file, exc)
return None

View File

@ -1,382 +0,0 @@
From 7a723f0938edf9ccc597507a4230922e9235cf18 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Wed, 24 Sep 2025 07:20:53 +0100
Subject: [PATCH 13/18] algorithms: add support for specific ECC curve
algorithms
Extended the Encrypt enum to support specific ECC curves including:
- ecc192 (P-192)
- ecc224 (P-224)
- ecc256 (P-256)
- ecc384 (P-384)
- ecc521 (P-521)
This enables Keylime to accept and validate different ECC curves
for TPM attestation operations.
Also, when agent reports specific algorithm like 'ecc256' but tenant
configuration uses generic 'ecc', the is_accepted function now uses
bidirectional normalization to properly match algorithms.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/common/algorithms.py | 28 +++-
test/test_algorithms.py | 275 ++++++++++++++++++++++++++++++++++-
2 files changed, 301 insertions(+), 2 deletions(-)
diff --git a/keylime/common/algorithms.py b/keylime/common/algorithms.py
index db12c26..bb22fb6 100644
--- a/keylime/common/algorithms.py
+++ b/keylime/common/algorithms.py
@@ -9,7 +9,18 @@ def is_accepted(algorithm: str, accepted: List[Any]) -> bool:
@param algorithm: algorithm to be checked
@param accepted: a list of acceptable algorithms
"""
- return algorithm in accepted
+ # Check direct match first.
+ if algorithm in accepted:
+ return True
+
+ # Check if any accepted algorithm normalizes to the same value as our algorithm
+ # This handles backwards compatibility cases like "ecc" accepting "ecc256".
+ normalized_algorithm = Encrypt.normalize(algorithm)
+ for accepted_alg in accepted:
+ if Encrypt.normalize(str(accepted_alg)) == normalized_algorithm:
+ return True
+
+ return False
class Hash(str, enum.Enum):
@@ -74,11 +85,26 @@ class Hash(str, enum.Enum):
class Encrypt(str, enum.Enum):
RSA = "rsa"
ECC = "ecc"
+ ECC192 = "ecc192"
+ ECC224 = "ecc224"
+ ECC256 = "ecc256"
+ ECC384 = "ecc384"
+ ECC521 = "ecc521"
@staticmethod
def is_recognized(algorithm: str) -> bool:
+ # Handle aliases to match agent behavior
+ if algorithm == "ecc":
+ algorithm = "ecc256" # Default ECC alias maps to P-256, same as the agent.
return algorithm in list(Encrypt)
+ @staticmethod
+ def normalize(algorithm: str) -> str:
+ """Normalize algorithm string to handle aliases, matching the agent behavior"""
+ if algorithm == "ecc":
+ return "ecc256" # Default ECC alias maps to P-256.
+ return algorithm
+
class Sign(str, enum.Enum):
RSASSA = "rsassa"
diff --git a/test/test_algorithms.py b/test/test_algorithms.py
index b5a29c7..8a31fa9 100644
--- a/test/test_algorithms.py
+++ b/test/test_algorithms.py
@@ -2,7 +2,7 @@ import os
import tempfile
import unittest
-from keylime.common.algorithms import Encrypt, Hash, Sign
+from keylime.common.algorithms import Encrypt, Hash, Sign, is_accepted
class TestHash(unittest.TestCase):
@@ -117,11 +117,88 @@ class TestEncrypt(unittest.TestCase):
"enc": "ecc",
"valid": True,
},
+ {
+ "enc": "ecc192",
+ "valid": True,
+ },
+ {
+ "enc": "ecc224",
+ "valid": True,
+ },
+ {
+ "enc": "ecc256",
+ "valid": True,
+ },
+ {
+ "enc": "ecc384",
+ "valid": True,
+ },
+ {
+ "enc": "ecc521",
+ "valid": True,
+ },
]
for c in test_cases:
self.assertEqual(Encrypt.is_recognized(c["enc"]), c["valid"], msg=f"enc = {c['enc']}")
+ def test_enum_membership(self):
+ """Test that all ECC curve algorithms are members of the Encrypt enum"""
+ self.assertTrue(Encrypt.RSA in Encrypt)
+ self.assertTrue(Encrypt.ECC in Encrypt)
+ self.assertTrue(Encrypt.ECC192 in Encrypt)
+ self.assertTrue(Encrypt.ECC224 in Encrypt)
+ self.assertTrue(Encrypt.ECC256 in Encrypt)
+ self.assertTrue(Encrypt.ECC384 in Encrypt)
+ self.assertTrue(Encrypt.ECC521 in Encrypt)
+
+ def test_normalize(self):
+ """Test the normalize method for handling ECC aliases"""
+ test_cases = [
+ {
+ "input": "ecc",
+ "expected": "ecc256",
+ },
+ {
+ "input": "ecc192",
+ "expected": "ecc192",
+ },
+ {
+ "input": "ecc224",
+ "expected": "ecc224",
+ },
+ {
+ "input": "ecc256",
+ "expected": "ecc256",
+ },
+ {
+ "input": "ecc384",
+ "expected": "ecc384",
+ },
+ {
+ "input": "ecc521",
+ "expected": "ecc521",
+ },
+ {
+ "input": "rsa",
+ "expected": "rsa",
+ },
+ ]
+
+ for c in test_cases:
+ self.assertEqual(Encrypt.normalize(c["input"]), c["expected"], msg=f"input = {c['input']}")
+
+ def test_normalize_ecc_alias_behavior(self):
+ """Test that ECC alias normalization matches agent behavior"""
+ # Test that "ecc" is recognized through alias handling
+ self.assertTrue(Encrypt.is_recognized("ecc"))
+
+ # Test that normalize converts "ecc" to "ecc256" (P-256)
+ self.assertEqual(Encrypt.normalize("ecc"), "ecc256")
+
+ # Test that direct ecc256 works
+ self.assertTrue(Encrypt.is_recognized("ecc256"))
+
class TestSign(unittest.TestCase):
def test_is_recognized(self):
@@ -158,3 +235,199 @@ class TestSign(unittest.TestCase):
for c in test_cases:
self.assertEqual(Sign.is_recognized(c["sign"]), c["valid"], msg=f"sign = {c['sign']}")
+
+
+class TestIsAccepted(unittest.TestCase):
+ def test_direct_algorithm_matching(self):
+ """Test that direct algorithm matches work correctly"""
+ test_cases = [
+ {
+ "algorithm": "ecc256",
+ "accepted": ["ecc256"],
+ "expected": True,
+ },
+ {
+ "algorithm": "rsa",
+ "accepted": ["rsa"],
+ "expected": True,
+ },
+ {
+ "algorithm": "ecc384",
+ "accepted": ["ecc256", "ecc384"],
+ "expected": True,
+ },
+ {
+ "algorithm": "ecc521",
+ "accepted": ["ecc256"],
+ "expected": False,
+ },
+ {
+ "algorithm": "unknown",
+ "accepted": ["rsa", "ecc256"],
+ "expected": False,
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(result, c["expected"], msg=f"algorithm='{c['algorithm']}', accepted={c['accepted']}")
+
+ def test_backwards_compatibility_ecc_normalization(self):
+ """Test backwards compatibility: 'ecc' in accepted list should accept specific ECC algorithms"""
+ test_cases = [
+ {
+ "algorithm": "ecc256",
+ "accepted": ["ecc"],
+ "expected": True,
+ "desc": "ecc256 should be accepted when 'ecc' is in accepted list",
+ },
+ {
+ "algorithm": "ecc384",
+ "accepted": ["ecc"],
+ "expected": False,
+ "desc": "ecc384 should NOT be accepted when only 'ecc' is in accepted list (ecc maps to ecc256)",
+ },
+ {
+ "algorithm": "ecc521",
+ "accepted": ["ecc"],
+ "expected": False,
+ "desc": "ecc521 should NOT be accepted when only 'ecc' is in accepted list",
+ },
+ {
+ "algorithm": "ecc192",
+ "accepted": ["ecc"],
+ "expected": False,
+ "desc": "ecc192 should NOT be accepted when only 'ecc' is in accepted list",
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(
+ result, c["expected"], msg=f"{c['desc']} - algorithm='{c['algorithm']}', accepted={c['accepted']}"
+ )
+
+ def test_forward_compatibility_ecc_normalization(self):
+ """Test forward compatibility: specific ECC in accepted list should accept 'ecc' algorithm"""
+ test_cases = [
+ {
+ "algorithm": "ecc",
+ "accepted": ["ecc256"],
+ "expected": True,
+ "desc": "ecc should be accepted when 'ecc256' is in accepted list (both normalize to ecc256)",
+ },
+ {
+ "algorithm": "ecc",
+ "accepted": ["ecc384"],
+ "expected": False,
+ "desc": "ecc should NOT be accepted when only 'ecc384' is in accepted list",
+ },
+ {
+ "algorithm": "ecc",
+ "accepted": ["ecc521"],
+ "expected": False,
+ "desc": "ecc should NOT be accepted when only 'ecc521' is in accepted list",
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(
+ result, c["expected"], msg=f"{c['desc']} - algorithm='{c['algorithm']}', accepted={c['accepted']}"
+ )
+
+ def test_bidirectional_algorithm_matching(self):
+ """Test bidirectional matching scenarios that happen in real usage"""
+ test_cases = [
+ {
+ "algorithm": "ecc256",
+ "accepted": ["rsa", "ecc"],
+ "expected": True,
+ "desc": "Agent reports ecc256, tenant config has generic 'ecc'",
+ },
+ {
+ "algorithm": "ecc",
+ "accepted": ["rsa", "ecc256"],
+ "expected": True,
+ "desc": "Agent reports generic 'ecc', tenant config has specific 'ecc256'",
+ },
+ {
+ "algorithm": "ecc384",
+ "accepted": ["rsa", "ecc"],
+ "expected": False,
+ "desc": "Agent reports ecc384, tenant has generic 'ecc' (should not match)",
+ },
+ {
+ "algorithm": "ecc",
+ "accepted": ["rsa", "ecc384"],
+ "expected": False,
+ "desc": "Agent reports generic 'ecc', tenant has ecc384 (should not match)",
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(
+ result, c["expected"], msg=f"{c['desc']} - algorithm='{c['algorithm']}', accepted={c['accepted']}"
+ )
+
+ def test_mixed_algorithm_types(self):
+ """Test mixing different algorithm types in accepted list"""
+ test_cases = [
+ {
+ "algorithm": "rsa",
+ "accepted": ["ecc", "rsa"],
+ "expected": True,
+ },
+ {
+ "algorithm": "ecc256",
+ "accepted": ["rsa", "ecc"],
+ "expected": True,
+ },
+ {
+ "algorithm": "ecc384",
+ "accepted": ["rsa", "ecc256", "ecc384"],
+ "expected": True,
+ },
+ {
+ "algorithm": "unknown",
+ "accepted": ["rsa", "ecc", "ecc384"],
+ "expected": False,
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(result, c["expected"], msg=f"algorithm='{c['algorithm']}', accepted={c['accepted']}")
+
+ def test_edge_cases(self):
+ """Test edge cases and boundary conditions"""
+ test_cases = [
+ {"algorithm": "", "accepted": ["ecc"], "expected": False, "desc": "Empty algorithm string"},
+ {"algorithm": "ecc256", "accepted": [], "expected": False, "desc": "Empty accepted list"},
+ {"algorithm": "ecc256", "accepted": [""], "expected": False, "desc": "Accepted list with empty string"},
+ {
+ "algorithm": "ECC256",
+ "accepted": ["ecc256"],
+ "expected": False,
+ "desc": "Case sensitivity - uppercase should not match",
+ },
+ {
+ "algorithm": "ecc256",
+ "accepted": ["ecc"],
+ "expected": True,
+ "desc": "ecc256 algorithm should match ecc in accepted list",
+ },
+ {
+ "algorithm": "ecc",
+ "accepted": ["ecc256"],
+ "expected": True,
+ "desc": "ecc algorithm should match ecc256 in accepted list",
+ },
+ ]
+
+ for c in test_cases:
+ result = is_accepted(c["algorithm"], c["accepted"])
+ self.assertEqual(
+ result, c["expected"], msg=f"{c['desc']} - algorithm='{c['algorithm']}', accepted={c['accepted']}"
+ )
--
2.47.3

View File

@ -1,87 +0,0 @@
From eecd2f73642f784b19cb1bb9c78c6d0b1e486dda Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Fri, 26 Sep 2025 00:03:49 +0100
Subject: [PATCH 14/18] algorithms: add support for specific RSA algorithms
Similar to the previous change for ECC, now we extend the Encrypt enum
to support the following specific RSA algorithms:
- RSA1024
- RSA2048
- RSA3072
- RSA4096
Map also 'rsa' to 'rsa2048' for backwards compatibility.
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/common/algorithms.py | 8 ++++++++
test/test_algorithms.py | 13 ++++++++++++-
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/keylime/common/algorithms.py b/keylime/common/algorithms.py
index bb22fb6..32a1ec1 100644
--- a/keylime/common/algorithms.py
+++ b/keylime/common/algorithms.py
@@ -84,6 +84,10 @@ class Hash(str, enum.Enum):
class Encrypt(str, enum.Enum):
RSA = "rsa"
+ RSA1024 = "rsa1024"
+ RSA2048 = "rsa2048"
+ RSA3072 = "rsa3072"
+ RSA4096 = "rsa4096"
ECC = "ecc"
ECC192 = "ecc192"
ECC224 = "ecc224"
@@ -96,6 +100,8 @@ class Encrypt(str, enum.Enum):
# Handle aliases to match agent behavior
if algorithm == "ecc":
algorithm = "ecc256" # Default ECC alias maps to P-256, same as the agent.
+ if algorithm == "rsa":
+ algorithm = "rsa2048" # Default RSA alias maps to RSA-2048, same as the agent.
return algorithm in list(Encrypt)
@staticmethod
@@ -103,6 +109,8 @@ class Encrypt(str, enum.Enum):
"""Normalize algorithm string to handle aliases, matching the agent behavior"""
if algorithm == "ecc":
return "ecc256" # Default ECC alias maps to P-256.
+ if algorithm == "rsa":
+ return "rsa2048" # Default RSA alias maps to RSA-2048.
return algorithm
diff --git a/test/test_algorithms.py b/test/test_algorithms.py
index 8a31fa9..5542c0f 100644
--- a/test/test_algorithms.py
+++ b/test/test_algorithms.py
@@ -181,7 +181,7 @@ class TestEncrypt(unittest.TestCase):
},
{
"input": "rsa",
- "expected": "rsa",
+ "expected": "rsa2048",
},
]
@@ -199,6 +199,17 @@ class TestEncrypt(unittest.TestCase):
# Test that direct ecc256 works
self.assertTrue(Encrypt.is_recognized("ecc256"))
+ def test_normalize_rsa_alias_behavior(self):
+ """Test that RSA alias normalization matches agent behavior"""
+ # Test that "rsa" is recognized through alias handling
+ self.assertTrue(Encrypt.is_recognized("rsa"))
+
+ # Test that normalize converts "rsa" to "rsa2048"
+ self.assertEqual(Encrypt.normalize("rsa"), "rsa2048")
+
+ # Test that direct rsa2048 works
+ self.assertTrue(Encrypt.is_recognized("rsa2048"))
+
class TestSign(unittest.TestCase):
def test_is_recognized(self):
--
2.47.3

View File

@ -1,43 +0,0 @@
From 690a2059be01993f5e7f65a01d994e53b82211e4 Mon Sep 17 00:00:00 2001
From: Thore Sommer <mail@thson.de>
Date: Mon, 3 Mar 2025 15:44:37 +0100
Subject: [PATCH 15/18] tpm_util: fix quote signature extraction for ECDSA
Signed-off-by: Thore Sommer <mail@thson.de>
---
keylime/tpm/tpm_util.py | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/keylime/tpm/tpm_util.py b/keylime/tpm/tpm_util.py
index cdecd32..25c40e0 100644
--- a/keylime/tpm/tpm_util.py
+++ b/keylime/tpm/tpm_util.py
@@ -223,9 +223,7 @@ def checkquote(
pcrblob: The state of the PCRs that were quoted; Intel tpm2-tools specific format
exp_hash_alg: The hash that was expected to have been used for quoting
"""
- sig_alg, hash_alg, sig_size = struct.unpack_from(">HHH", sigblob, 0)
-
- (signature,) = struct.unpack_from(f"{sig_size}s", sigblob, 6)
+ sig_alg, hash_alg = struct.unpack_from(">HH", sigblob, 0)
pubkey = serialization.load_pem_public_key(aikblob, backend=backends.default_backend())
if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):
@@ -236,6 +234,14 @@ def checkquote(
if isinstance(pubkey, EllipticCurvePublicKey) and sig_alg not in [tpm2_objects.TPM_ALG_ECDSA]:
raise ValueError(f"Unsupported quote signature algorithm '{sig_alg:#x}' for EC keys")
+ if sig_alg in [tpm2_objects.TPM_ALG_RSASSA]:
+ (sig_size,) = struct.unpack_from(">H", sigblob, 4)
+ (signature,) = struct.unpack_from(f"{sig_size}s", sigblob, 6)
+ elif sig_alg in [tpm2_objects.TPM_ALG_ECDSA]:
+ signature = ecdsa_der_from_tpm(sigblob)
+ else:
+ raise ValueError(f"Unsupported quote signature algorithm '{sig_alg:#x}'")
+
hashfunc = tpm2_objects.HASH_FUNCS.get(hash_alg)
if not hashfunc:
raise ValueError(f"Unsupported hash with id {hash_alg:#x} in signature blob")
--
2.47.3

View File

@ -1,515 +0,0 @@
From 7c3d81879dba00dcfe917c73b10ca47e8ca7028a Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Sun, 21 Sep 2025 17:49:56 +0100
Subject: [PATCH 16/18] tpm: fix ECC P-521 coordinate validation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
P-521 coordinates can vary from 65-66 bytes due to TPM implementations
padding 521-bit values to byte boundaries or stripping leading zeros.
The previous validation was too strict, rejecting valid coordinates.
Enhanced validation:
- Accepts P-521 coordinate range 65-66 bytes (520-528 bits)
- Validates against actual NIST prime moduli per SEC1 §2.3.5 and
FIPS 186-4 App D (coordinates must be < field prime p)
- Strict rejection of unknown curves for security
The enhanced approach prevents false validation of coordinates that are
the correct byte length but exceed the curve's field prime.
Assisted-by: Claude 4 Sonnet
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/tpm/tpm2_objects.py | 42 +++-
test/test_tpm2_objects.py | 417 ++++++++++++++++++++++++++++++++++++
2 files changed, 455 insertions(+), 4 deletions(-)
create mode 100644 test/test_tpm2_objects.py
diff --git a/keylime/tpm/tpm2_objects.py b/keylime/tpm/tpm2_objects.py
index fcc5bb5..9170628 100644
--- a/keylime/tpm/tpm2_objects.py
+++ b/keylime/tpm/tpm2_objects.py
@@ -31,6 +31,16 @@ TPM_ECC_NIST_P256 = 0x0003
TPM_ECC_NIST_P384 = 0x0004
TPM_ECC_NIST_P521 = 0x0005
+# ECC curve prime moduli lookup table (coordinates must be < p)
+# This structure supports NIST curves and can be extended for other curves.
+ECC_CURVE_PRIMES = {
+ TPM_ECC_NIST_P192: 2**192 - 2**64 - 1, # P-192 prime
+ TPM_ECC_NIST_P224: 2**224 - 2**96 + 1, # P-224 prime
+ TPM_ECC_NIST_P256: 2**256 - 2**224 + 2**192 + 2**96 - 1, # P-256 prime
+ TPM_ECC_NIST_P384: 2**384 - 2**128 - 2**96 + 2**32 - 1, # P-384 prime
+ TPM_ECC_NIST_P521: 2**521 - 1, # P-521 prime
+}
+
TPM_ALG_RSA = 0x0001
TPM_ALG_ECC = 0x0023
@@ -318,10 +328,34 @@ def pubkey_parms_from_tpm2b_public(
if len(rest) != 0:
raise ValueError("Misparsed: more contents after X and Y")
- if (len(x) * 8) != curve.key_size:
- raise ValueError(f"Misparsed either X or curve: {len(x)}*8 != {curve.key_size}")
- if (len(y) * 8) != curve.key_size:
- raise ValueError(f"Misparsed either Y or curve curve: {len(y)}*8 != {curve.key_size}")
+ # ECC coordinates can vary in byte length due to:
+ # 1. Padding to byte boundaries (most common)
+ # 2. Leading zero stripping in some encodings
+ # Validate both byte length and actual coordinate value.
+ max_bytes = (curve.key_size + 7) // 8
+ min_bytes = max_bytes - 1 if curve.key_size % 8 != 0 else max_bytes
+
+ # Get the actual prime modulus for the curve
+ prime_p = ECC_CURVE_PRIMES.get(curve_id)
+ if prime_p is None:
+ raise ValueError(f"Unsupported curve ID {curve_id:#x}: prime modulus not known")
+
+ for label, coord in (("X", x), ("Y", y)):
+ coord_len = len(coord)
+ if coord_len < min_bytes or coord_len > max_bytes:
+ raise ValueError(
+ f"Misparsed {label} coordinate: got {coord_len} bytes, "
+ f"expected {min_bytes}-{max_bytes} for {curve.key_size}-bit curve"
+ )
+
+ coord_int = int.from_bytes(coord, "big")
+ # Coordinates must be reduced modulo the field prime p
+ # (SEC1 §2.3.5, FIPS 186-4 App D). Reject values >= p.
+ if coord_int >= prime_p:
+ raise ValueError(
+ f"{label} coordinate too large: {coord_int.bit_length()} bits, "
+ f"must be < {prime_p.bit_length()}-bit prime modulus"
+ )
bx = int.from_bytes(x, byteorder="big")
by = int.from_bytes(y, byteorder="big")
diff --git a/test/test_tpm2_objects.py b/test/test_tpm2_objects.py
new file mode 100644
index 0000000..c880770
--- /dev/null
+++ b/test/test_tpm2_objects.py
@@ -0,0 +1,417 @@
+import struct
+import unittest
+
+from cryptography.hazmat.primitives.asymmetric import ec
+
+from keylime.tpm.tpm2_objects import (
+ ECC_CURVE_PRIMES,
+ TPM_ECC_NIST_P192,
+ TPM_ECC_NIST_P224,
+ TPM_ECC_NIST_P256,
+ TPM_ECC_NIST_P384,
+ TPM_ECC_NIST_P521,
+ _curve_from_curve_id,
+ _pack_in_tpm2b,
+ pubkey_parms_from_tpm2b_public,
+)
+
+
+class TestTpm2Objects(unittest.TestCase):
+ def test_p521_coordinate_validation_logic(self):
+ """Test the specific coordinate validation logic for P-521"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P521)
+
+ # Test the updated validation logic
+ max_bytes = (curve.key_size + 7) // 8 # Should be 66 bytes for P-521
+ min_bytes = max_bytes - 1 if curve.key_size % 8 != 0 else max_bytes # Should be 65 bytes for P-521
+
+ self.assertEqual(max_bytes, 66)
+ self.assertEqual(min_bytes, 65) # P-521 is not byte-aligned, so allows 65-66 bytes
+
+ # Test coordinate sizes that should be accepted (65-66 bytes for P-521)
+ valid_sizes = [65, 66]
+
+ for size in valid_sizes:
+ # Check that the validation logic would accept this size
+ should_pass = min_bytes <= size <= max_bytes
+ self.assertTrue(should_pass, f"Size {size} bytes should be valid for P-521")
+
+ # Test coordinate sizes that should be rejected
+ invalid_sizes = [64, 67, 32, 68]
+
+ for size in invalid_sizes:
+ # This should fail: not in the valid range
+ should_fail = size < min_bytes or size > max_bytes
+ self.assertTrue(should_fail, f"Size {size} bytes should be invalid for P-521")
+
+ def test_p256_coordinate_validation_logic(self):
+ """Test the coordinate validation logic for P-256 to ensure no regression"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P256)
+
+ max_bytes = (curve.key_size + 7) // 8 # Should be 32 bytes for P-256
+ min_bytes = (
+ max_bytes - 1 if curve.key_size % 8 != 0 else max_bytes
+ ) # Should be 32 bytes for P-256 (byte-aligned)
+
+ self.assertEqual(max_bytes, 32)
+ self.assertEqual(min_bytes, 32) # P-256 is byte-aligned, so only accepts 32 bytes
+
+ # 32 bytes should be accepted
+ size = 32
+ should_pass = min_bytes <= size <= max_bytes
+ self.assertTrue(should_pass, f"P-256 should accept {size} bytes")
+
+ # Other sizes should be rejected
+ invalid_sizes = [31, 33, 64]
+ for size in invalid_sizes:
+ should_fail = size < min_bytes or size > max_bytes
+ self.assertTrue(should_fail, f"P-256 should reject {size} bytes")
+
+ def test_p384_coordinate_validation_logic(self):
+ """Test the coordinate validation logic for P-384 to ensure no regression"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P384)
+
+ max_bytes = (curve.key_size + 7) // 8 # Should be 48 bytes for P-384
+ min_bytes = (
+ max_bytes - 1 if curve.key_size % 8 != 0 else max_bytes
+ ) # Should be 48 bytes for P-384 (byte-aligned)
+
+ self.assertEqual(max_bytes, 48)
+ self.assertEqual(min_bytes, 48) # P-384 is byte-aligned, so only accepts 48 bytes
+
+ # 48 bytes should be accepted
+ size = 48
+ should_pass = min_bytes <= size <= max_bytes
+ self.assertTrue(should_pass, f"P-384 should accept {size} bytes")
+
+ def test_coordinate_size_calculation(self):
+ """Test that coordinate size calculations are correct for different curves"""
+ # P-256: 256 bits -> (256 + 7) // 8 = 32 bytes
+ curve_p256 = _curve_from_curve_id(TPM_ECC_NIST_P256)
+ expected_p256 = (curve_p256.key_size + 7) // 8
+ self.assertEqual(expected_p256, 32)
+ self.assertEqual(curve_p256.key_size, 256)
+
+ # P-384: 384 bits -> (384 + 7) // 8 = 48 bytes
+ curve_p384 = _curve_from_curve_id(TPM_ECC_NIST_P384)
+ expected_p384 = (curve_p384.key_size + 7) // 8
+ self.assertEqual(expected_p384, 48)
+ self.assertEqual(curve_p384.key_size, 384)
+
+ # P-521: 521 bits -> (521 + 7) // 8 = 66 bytes
+ curve_p521 = _curve_from_curve_id(TPM_ECC_NIST_P521)
+ expected_p521 = (curve_p521.key_size + 7) // 8
+ self.assertEqual(expected_p521, 66)
+ self.assertEqual(curve_p521.key_size, 521)
+
+ def test_p521_specific_fix(self):
+ """Test the specific scenario that was fixed: P-521 with 66-byte coordinates"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P521)
+
+ # The key issue: P-521 has 521 bits
+ self.assertEqual(curve.key_size, 521)
+
+ # TPMs pad to 66 bytes (528 bits)
+ tpm_padded_size = 66
+ tpm_padded_bits = tpm_padded_size * 8
+ self.assertEqual(tpm_padded_bits, 528)
+
+ # The old validation would reject: (66 * 8) != 521
+ old_validation_fails = tpm_padded_bits != curve.key_size
+ self.assertTrue(old_validation_fails, "Old validation would incorrectly reject 66-byte coordinates")
+
+ # The new validation should accept: len(x) == expected_bytes OR (len(x) * 8) == curve.key_size
+ expected_bytes = (curve.key_size + 7) // 8
+ new_validation_passes = (tpm_padded_size == expected_bytes) or (tpm_padded_bits == curve.key_size)
+ self.assertTrue(new_validation_passes, "New validation should accept 66-byte coordinates")
+
+ def test_validation_before_and_after_fix(self):
+ """Test that demonstrates the fix by comparing old vs new validation logic"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P521)
+
+ # Test multiple coordinate sizes that P-521 can have
+ test_sizes = [65, 66] # 65 bytes (leading zero stripped), 66 bytes (padded)
+
+ max_bytes = (curve.key_size + 7) // 8 # 66 bytes
+ min_bytes = max_bytes - 1 if curve.key_size % 8 != 0 else max_bytes # 65 bytes for P-521
+
+ for coordinate_size in test_sizes:
+ # Old validation logic (strict bit size match) - would require exactly 65.125 bytes
+ # which is impossible since we can't have fractional bytes
+
+ # New validation logic (accept range for non-byte-aligned curves)
+ new_logic_passes = min_bytes <= coordinate_size <= max_bytes
+ self.assertTrue(new_logic_passes, f"New logic should accept {coordinate_size}-byte coordinates for P-521")
+
+ # Verify the calculations
+ self.assertEqual(max_bytes, 66)
+ self.assertEqual(min_bytes, 65)
+
+ def test_p521_coordinate_range_validation(self):
+ """Test that P-521 accepts coordinates in the range 65-66 bytes (520-528 bits)"""
+ curve = _curve_from_curve_id(TPM_ECC_NIST_P521)
+
+ # P-521: 521 bits, padded to 66 bytes (528 bits), or 65 bytes with leading zero stripped
+ max_bytes = (curve.key_size + 7) // 8 # 66 bytes
+ min_bytes = max_bytes - 1 # 65 bytes (since 521 % 8 != 0)
+
+ # Test all valid sizes
+ valid_sizes = [65, 66]
+ for size in valid_sizes:
+ is_valid = min_bytes <= size <= max_bytes
+ self.assertTrue(is_valid, f"P-521 should accept {size} bytes ({size * 8} bits)")
+
+ # Test invalid sizes
+ invalid_sizes = [64, 67, 68, 32]
+ for size in invalid_sizes:
+ is_invalid = size < min_bytes or size > max_bytes
+ self.assertTrue(is_invalid, f"P-521 should reject {size} bytes ({size * 8} bits)")
+
+ def test_coordinate_value_validation(self):
+ """Test that coordinate values are validated against actual prime moduli"""
+ # Test P-521 with actual prime
+ # curve_p521 = _curve_from_curve_id(TPM_ECC_NIST_P521) # Not needed for this test
+ p521_prime = ECC_CURVE_PRIMES[TPM_ECC_NIST_P521]
+
+ # Test valid coordinate value (within range)
+ valid_coord_int = p521_prime - 1 # Largest valid value
+ valid_coord_bytes = valid_coord_int.to_bytes(66, "big") # 66 bytes, padded
+
+ # Test the validation logic
+ coord_int = int.from_bytes(valid_coord_bytes, "big")
+ is_valid_value = coord_int < p521_prime
+ self.assertTrue(is_valid_value, "Coordinate value should be valid for P-521")
+
+ # Test invalid coordinate value (>= prime)
+ invalid_coord_int = p521_prime # Equal to prime (invalid)
+ invalid_coord_bytes = invalid_coord_int.to_bytes(66, "big") # 66 bytes, but value too large
+
+ coord_int = int.from_bytes(invalid_coord_bytes, "big")
+ is_invalid_value = coord_int >= p521_prime
+ self.assertTrue(is_invalid_value, "Coordinate value >= prime should be invalid for P-521")
+
+ def test_prime_constants_accuracy(self):
+ """Test that our hardcoded prime constants are correct"""
+ # Verify the NIST prime values
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P192], 2**192 - 2**64 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P224], 2**224 - 2**96 + 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P256], 2**256 - 2**224 + 2**192 + 2**96 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P384], 2**384 - 2**128 - 2**96 + 2**32 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P521], 2**521 - 1)
+
+ # Verify they are actually less than 2^m for all curves except P-521
+ self.assertLess(ECC_CURVE_PRIMES[TPM_ECC_NIST_P192], 2**192)
+ self.assertLess(ECC_CURVE_PRIMES[TPM_ECC_NIST_P224], 2**224)
+ self.assertLess(ECC_CURVE_PRIMES[TPM_ECC_NIST_P256], 2**256)
+ self.assertLess(ECC_CURVE_PRIMES[TPM_ECC_NIST_P384], 2**384)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P521], 2**521 - 1) # P-521 is special case
+
+ def test_prime_lookup_table(self):
+ """Test that the prime lookup table works correctly"""
+ # Test known curves
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P192], 2**192 - 2**64 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P224], 2**224 - 2**96 + 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P256], 2**256 - 2**224 + 2**192 + 2**96 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P384], 2**384 - 2**128 - 2**96 + 2**32 - 1)
+ self.assertEqual(ECC_CURVE_PRIMES[TPM_ECC_NIST_P521], 2**521 - 1)
+
+ # Test rejection of unknown curve
+ unknown_curve_id = 0x9999
+ unknown_prime = ECC_CURVE_PRIMES.get(unknown_curve_id)
+ self.assertIsNone(unknown_prime, "Unknown curves should not be in ECC_CURVE_PRIMES")
+
+ def test_error_message_formatting(self):
+ """Test that error messages use bit_length() instead of full integers"""
+ # Create a large coordinate value
+ large_value = ECC_CURVE_PRIMES[TPM_ECC_NIST_P521] # This would be hundreds of digits
+
+ # Verify bit_length() is much more reasonable than the full number
+ bit_length = large_value.bit_length()
+ self.assertEqual(bit_length, 521) # Much more readable than 150+ digit number
+
+ # The error message should use bit lengths, not full integers
+ expected_msg_pattern = f"coordinate too large: {bit_length} bits"
+ self.assertIn("521 bits", expected_msg_pattern)
+
+ def test_unknown_curve_rejection(self):
+ """Test that unknown curves are strictly rejected"""
+ # This tests the design decision to be strict rather than use fallbacks
+ unknown_curve_id = 0x9999
+
+ # The strict approach: unknown curves should not have fallback behavior
+ # This ensures we only validate curves we explicitly understand
+ result = ECC_CURVE_PRIMES.get(unknown_curve_id)
+ self.assertIsNone(result, "Unknown curves should be explicitly rejected, not given fallback primes")
+
+
+class TestEccPublicKeySecurityValidation(unittest.TestCase):
+ """Test that ECC public key validation includes all required security checks:
+ 1. Point is on the curve
+ 2. Point is not zero or infinity
+ 3. Point is not in a small subgroup (not applicable to NIST curves with cofactor=1)
+ """
+
+ def create_ecc_tpm2b_public(self, x: int, y: int, curve_id: int = TPM_ECC_NIST_P256) -> bytes:
+ """Helper to create a TPM2B_PUBLIC structure for ECC key with given coordinates"""
+ # Get coordinate size based on curve
+ curve = _curve_from_curve_id(curve_id)
+ coord_bytes = (curve.key_size + 7) // 8
+
+ # Convert coordinates to bytes
+ x_bytes = x.to_bytes(coord_bytes, "big")
+ y_bytes = y.to_bytes(coord_bytes, "big")
+
+ # Build TPMT_PUBLIC structure
+ # alg_type (TPM_ALG_ECC = 0x0023)
+ tpmt = struct.pack(">H", 0x0023)
+ # name_alg (TPM_ALG_SHA256 = 0x000B)
+ tpmt += struct.pack(">H", 0x000B)
+ # object_attributes (4 bytes)
+ tpmt += struct.pack(">I", 0x00040072)
+ # auth_policy (empty TPM2B)
+ tpmt += struct.pack(">H", 0)
+ # symmetric (TPM_ALG_NULL)
+ tpmt += struct.pack(">H", 0x0010)
+ # scheme (TPM_ALG_NULL)
+ tpmt += struct.pack(">H", 0x0010)
+ # curve_id
+ tpmt += struct.pack(">H", curve_id)
+ # kdf_scheme (TPM_ALG_NULL)
+ tpmt += struct.pack(">H", 0x0010)
+ # x coordinate (TPM2B)
+ tpmt += _pack_in_tpm2b(x_bytes)
+ # y coordinate (TPM2B)
+ tpmt += _pack_in_tpm2b(y_bytes)
+
+ # Wrap in TPM2B_PUBLIC
+ return _pack_in_tpm2b(tpmt)
+
+ def test_point_on_curve_validation(self):
+ """Test that points not on the curve are rejected (Security Check #1)"""
+ # For P-256, the curve equation is: y² = x³ - 3x + b (mod p)
+ # Choose coordinates that don't satisfy this equation
+ x = 1
+ y = 1 # (1, 1) is not on the P-256 curve
+
+ tpm2b_public = self.create_ecc_tpm2b_public(x, y, TPM_ECC_NIST_P256)
+
+ # The cryptography library should reject this point as not being on the curve
+ with self.assertRaises(ValueError) as cm:
+ pubkey_parms_from_tpm2b_public(tpm2b_public)
+ self.assertIn("not on the curve", str(cm.exception).lower())
+
+ def test_point_at_infinity_validation(self):
+ """Test that the point at infinity (0, 0) is rejected (Security Check #2)"""
+ # The point at infinity should be rejected
+ x = 0
+ y = 0
+
+ tpm2b_public = self.create_ecc_tpm2b_public(x, y, TPM_ECC_NIST_P256)
+
+ # The cryptography library should reject the point at infinity
+ with self.assertRaises(ValueError) as cm:
+ pubkey_parms_from_tpm2b_public(tpm2b_public)
+ self.assertIn("not on the curve", str(cm.exception).lower())
+
+ def test_valid_point_accepted(self):
+ """Test that a valid point on the curve is accepted"""
+ # Generate a valid key and extract its coordinates
+ private_key = ec.generate_private_key(ec.SECP256R1())
+ public_key = private_key.public_key()
+ numbers = public_key.public_numbers()
+
+ # Create TPM2B_PUBLIC with valid coordinates
+ tpm2b_public = self.create_ecc_tpm2b_public(numbers.x, numbers.y, TPM_ECC_NIST_P256)
+
+ # Should parse successfully
+ parsed_key, _ = pubkey_parms_from_tpm2b_public(tpm2b_public)
+ self.assertIsInstance(parsed_key, ec.EllipticCurvePublicKey)
+
+ # Verify the coordinates match
+ assert isinstance(parsed_key, ec.EllipticCurvePublicKey) # Type narrowing for pyright
+ parsed_numbers = parsed_key.public_numbers()
+ self.assertEqual(parsed_numbers.x, numbers.x)
+ self.assertEqual(parsed_numbers.y, numbers.y)
+
+ def test_small_subgroup_not_applicable_to_nist_curves(self):
+ """Test documenting that small subgroup checks are not needed for NIST curves (Security Check #3)
+
+ NIST P-curves (P-192, P-224, P-256, P-384, P-521) all have cofactor h=1,
+ meaning the entire curve has prime order. There are no small subgroups to check.
+
+ Curves with cofactor > 1 (like Curve25519 with h=8) require additional validation
+ to ensure the point is not in a small subgroup, but this is not applicable to
+ the NIST curves used by TPMs.
+ """
+ # This test documents the cofactor=1 property for all supported NIST curves
+ # The cryptography library's point validation is sufficient for these curves
+
+ test_curves = [
+ (TPM_ECC_NIST_P192, ec.SECP192R1()),
+ (TPM_ECC_NIST_P224, ec.SECP224R1()),
+ (TPM_ECC_NIST_P256, ec.SECP256R1()),
+ (TPM_ECC_NIST_P384, ec.SECP384R1()),
+ (TPM_ECC_NIST_P521, ec.SECP521R1()),
+ ]
+
+ for curve_id, curve_obj in test_curves:
+ with self.subTest(curve=curve_obj.name):
+ try:
+ # Generate a valid key for this curve
+ # Note: P-192 may not be supported in newer OpenSSL versions
+ private_key = ec.generate_private_key(curve_obj)
+ except Exception: # pylint: disable=broad-except
+ # Skip this specific curve if not supported by OpenSSL (e.g., P-192)
+ self.skipTest(f"Curve {curve_obj.name} not supported by OpenSSL")
+
+ public_key = private_key.public_key()
+ numbers = public_key.public_numbers()
+
+ # Create TPM2B_PUBLIC and verify it parses successfully
+ tpm2b_public = self.create_ecc_tpm2b_public(numbers.x, numbers.y, curve_id)
+ parsed_key, _ = pubkey_parms_from_tpm2b_public(tpm2b_public)
+
+ # All NIST curves have cofactor = 1, so no small subgroup attacks possible
+ # The point validation by the cryptography library is sufficient
+ self.assertIsInstance(parsed_key, ec.EllipticCurvePublicKey)
+
+ def test_coordinate_exceeds_field_prime_rejected(self):
+ """Test that coordinates >= field prime are rejected"""
+ # Use a coordinate value that's >= the field prime for P-256
+ p256_prime = ECC_CURVE_PRIMES[TPM_ECC_NIST_P256]
+
+ # x coordinate exceeds the field prime
+ x = p256_prime + 1
+ y = 1
+
+ tpm2b_public = self.create_ecc_tpm2b_public(x, y, TPM_ECC_NIST_P256)
+
+ # Should be rejected during coordinate validation
+ with self.assertRaises(ValueError) as cm:
+ pubkey_parms_from_tpm2b_public(tpm2b_public)
+ # Will fail either at coordinate validation or curve validation
+ self.assertTrue(
+ "coordinate too large" in str(cm.exception).lower() or "not on the curve" in str(cm.exception).lower()
+ )
+
+ def test_p521_point_validation(self):
+ """Test point validation works correctly for P-521 (non-byte-aligned curve)"""
+ # Generate a valid P-521 key
+ private_key = ec.generate_private_key(ec.SECP521R1())
+ public_key = private_key.public_key()
+ numbers = public_key.public_numbers()
+
+ # Valid point should be accepted
+ tpm2b_public = self.create_ecc_tpm2b_public(numbers.x, numbers.y, TPM_ECC_NIST_P521)
+ parsed_key, _ = pubkey_parms_from_tpm2b_public(tpm2b_public)
+ self.assertIsInstance(parsed_key, ec.EllipticCurvePublicKey)
+
+ # Invalid point should be rejected
+ tpm2b_public_invalid = self.create_ecc_tpm2b_public(1, 1, TPM_ECC_NIST_P521)
+ with self.assertRaises(ValueError) as cm:
+ pubkey_parms_from_tpm2b_public(tpm2b_public_invalid)
+ self.assertIn("not on the curve", str(cm.exception).lower())
+
+
+if __name__ == "__main__":
+ unittest.main()
--
2.47.3

View File

@ -1,221 +0,0 @@
From 2acb35cb5b203f08aa281c571d341406ff1602c2 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Thu, 25 Sep 2025 14:25:18 +0100
Subject: [PATCH 17/18] tpm: fix ECC P-521 credential activation with
consistent marshaling
The TPM credential activation was failing for P-521 curves due to
inconsistent ECC point marshaling in tpms_ecc_point_marshal().
The function used bit_length() which varies for P-521 coordinates
(520-521 bits), producing different blob sizes and causing TPM
integrity check failures during ActivateCredential operations.
Assisted-by: Claude 4 Sonnet
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/tpm/tpm2_objects.py | 11 +--
keylime/tpm/tpm_util.py | 7 +-
test/test_tpm2_objects.py | 130 ++++++++++++++++++++++++++++++++++++
3 files changed, 142 insertions(+), 6 deletions(-)
diff --git a/keylime/tpm/tpm2_objects.py b/keylime/tpm/tpm2_objects.py
index 9170628..d33ebaa 100644
--- a/keylime/tpm/tpm2_objects.py
+++ b/keylime/tpm/tpm2_objects.py
@@ -597,9 +597,12 @@ def unmarshal_tpml_pcr_selection(tpml_pcr_selection: bytes) -> Tuple[Dict[int, i
def tpms_ecc_point_marshal(public_key: EllipticCurvePublicKey) -> bytes:
pn = public_key.public_numbers()
+ curve = public_key.curve
- sz = (pn.x.bit_length() + 7) // 8
- secret = struct.pack(">H", sz) + pn.x.to_bytes(sz, "big")
+ # Use fixed coordinate size based on curve to ensure consistent marshaling
+ # This is critical for P-521 where bit_length() can vary (520-521 bits)
+ # leading to credential activation failures due to inconsistent blob sizes
+ coord_size = (curve.key_size + 7) // 8
- sz = (pn.y.bit_length() + 7) // 8
- return secret + struct.pack(">H", sz) + pn.y.to_bytes(sz, "big")
+ secret = struct.pack(">H", coord_size) + pn.x.to_bytes(coord_size, "big")
+ return secret + struct.pack(">H", coord_size) + pn.y.to_bytes(coord_size, "big")
diff --git a/keylime/tpm/tpm_util.py b/keylime/tpm/tpm_util.py
index 25c40e0..fbbe557 100644
--- a/keylime/tpm/tpm_util.py
+++ b/keylime/tpm/tpm_util.py
@@ -318,11 +318,14 @@ def crypt_secret_encrypt_ecc(public_key: EllipticCurvePublicKey, hashfunc: hashe
digest_size = hashfunc.digest_size
+ # Use fixed coordinate size for consistent marshaling
+ coord_size = (public_key.curve.key_size + 7) // 8
+
x = my_public_key.public_numbers().x
- party_x = x.to_bytes((x.bit_length() + 7) >> 3, "big")
+ party_x = x.to_bytes(coord_size, "big")
x = public_key.public_numbers().x
- party_y = x.to_bytes((x.bit_length() + 7) >> 3, "big")
+ party_y = x.to_bytes(coord_size, "big")
data = crypt_kdfe(hashfunc, ecc_secret_x, "IDENTITY", party_x, party_y, digest_size << 3)
diff --git a/test/test_tpm2_objects.py b/test/test_tpm2_objects.py
index c880770..48d6a43 100644
--- a/test/test_tpm2_objects.py
+++ b/test/test_tpm2_objects.py
@@ -1,6 +1,7 @@
import struct
import unittest
+from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from keylime.tpm.tpm2_objects import (
@@ -13,7 +14,9 @@ from keylime.tpm.tpm2_objects import (
_curve_from_curve_id,
_pack_in_tpm2b,
pubkey_parms_from_tpm2b_public,
+ tpms_ecc_point_marshal,
)
+from keylime.tpm.tpm_util import crypt_secret_encrypt_ecc
class TestTpm2Objects(unittest.TestCase):
@@ -413,5 +416,132 @@ class TestEccPublicKeySecurityValidation(unittest.TestCase):
self.assertIn("not on the curve", str(cm.exception).lower())
+class TestEccMarshaling(unittest.TestCase):
+ """Test ECC point marshaling consistency fixes"""
+
+ def test_p521_marshaling_consistency(self):
+ """Test that P-521 marshaling produces consistent blob sizes regardless of coordinate values"""
+ # Generate multiple P-521 keys to test with different coordinate values
+ keys = []
+ for _ in range(10):
+ private_key = ec.generate_private_key(ec.SECP521R1())
+ keys.append(private_key.public_key())
+
+ # Marshal all keys and check that blob sizes are consistent
+ blob_sizes = []
+ for key in keys:
+ blob = tpms_ecc_point_marshal(key)
+ blob_sizes.append(len(blob))
+
+ # All blobs should be the same size for P-521
+ self.assertEqual(len(set(blob_sizes)), 1, "All P-521 marshaled blobs should have the same size")
+
+ # Expected size: 2 bytes (x size) + 66 bytes (x coord) + 2 bytes (y size) + 66 bytes (y coord) = 136 bytes
+ expected_size = 2 + 66 + 2 + 66
+ self.assertEqual(blob_sizes[0], expected_size, f"P-521 marshaled blob should be {expected_size} bytes")
+
+ def test_marshaling_coordinate_sizes(self):
+ """Test that marshaled coordinates use fixed sizes based on curve key size"""
+ # Test P-521: 521 bits -> (521 + 7) // 8 = 66 bytes per coordinate
+ p521_key = ec.generate_private_key(ec.SECP521R1()).public_key()
+ p521_blob = tpms_ecc_point_marshal(p521_key)
+
+ # Parse the blob to check coordinate sizes
+ x_size = struct.unpack(">H", p521_blob[:2])[0]
+ y_size = struct.unpack(">H", p521_blob[2 + x_size : 2 + x_size + 2])[0]
+
+ self.assertEqual(x_size, 66, "P-521 X coordinate should be 66 bytes")
+ self.assertEqual(y_size, 66, "P-521 Y coordinate should be 66 bytes")
+
+ # Test P-256: 256 bits -> (256 + 7) // 8 = 32 bytes per coordinate
+ p256_key = ec.generate_private_key(ec.SECP256R1()).public_key()
+ p256_blob = tpms_ecc_point_marshal(p256_key)
+
+ x_size = struct.unpack(">H", p256_blob[:2])[0]
+ y_size = struct.unpack(">H", p256_blob[2 + x_size : 2 + x_size + 2])[0]
+
+ self.assertEqual(x_size, 32, "P-256 X coordinate should be 32 bytes")
+ self.assertEqual(y_size, 32, "P-256 Y coordinate should be 32 bytes")
+
+ def test_p521_credential_activation_consistency(self):
+ """Test the specific issue: P-521 credential activation with consistent marshaling"""
+ # This test verifies the fix for credential activation failures
+ # Generate two P-521 keys with potentially different bit lengths for coordinates
+ key1 = ec.generate_private_key(ec.SECP521R1()).public_key()
+ key2 = ec.generate_private_key(ec.SECP521R1()).public_key()
+
+ # Marshal both keys
+ blob1 = tpms_ecc_point_marshal(key1)
+ blob2 = tpms_ecc_point_marshal(key2)
+
+ # The critical fix: both blobs should be the same size regardless of coordinate bit lengths
+ self.assertEqual(
+ len(blob1), len(blob2), "P-521 marshaled blobs must be same size regardless of coordinate bit lengths"
+ )
+
+ # Both should use the fixed coordinate size (66 bytes)
+ expected_total_size = 2 + 66 + 2 + 66 # size_x + x + size_y + y
+ self.assertEqual(len(blob1), expected_total_size)
+ self.assertEqual(len(blob2), expected_total_size)
+
+ def test_marshaling_format_correctness(self):
+ """Test that marshaling follows the correct TPM format: size(2) + coord(n) + size(2) + coord(n)"""
+ key = ec.generate_private_key(ec.SECP521R1()).public_key()
+ blob = tpms_ecc_point_marshal(key)
+
+ # Parse the blob structure
+ if len(blob) < 4:
+ self.fail("Marshaled blob too short")
+
+ x_size = struct.unpack(">H", blob[:2])[0]
+ self.assertEqual(x_size, 66, "X coordinate size should be 66 for P-521")
+
+ if len(blob) < 2 + x_size + 2:
+ self.fail("Marshaled blob missing Y coordinate size")
+
+ y_size = struct.unpack(">H", blob[2 + x_size : 2 + x_size + 2])[0]
+ self.assertEqual(y_size, 66, "Y coordinate size should be 66 for P-521")
+
+ # Total size should be: 2 + 66 + 2 + 66 = 136
+ expected_total = 2 + x_size + 2 + y_size
+ self.assertEqual(len(blob), expected_total, "Total marshaled blob size incorrect")
+
+ def test_crypt_secret_encrypt_ecc_consistency(self):
+ """Test that crypt_secret_encrypt_ecc produces consistent results with fixed coordinate sizes"""
+ # Generate a P-521 key to test with
+ public_key = ec.generate_private_key(ec.SECP521R1()).public_key()
+ hashfunc = hashes.SHA256()
+
+ # Call the function multiple times and check consistency
+ results = []
+ for _ in range(5):
+ data, point = crypt_secret_encrypt_ecc(public_key, hashfunc)
+ results.append((data, point))
+
+ # Check that all returned points have consistent marshaling
+ # (the data will be different due to random key generation, but point marshaling should be consistent)
+ point_sizes = [len(point) for _, point in results]
+ self.assertEqual(len(set(point_sizes)), 1, "All marshaled points should have the same size")
+
+ # For P-521, the marshaled point should be 136 bytes (2+66+2+66)
+ expected_point_size = 2 + 66 + 2 + 66
+ self.assertEqual(
+ point_sizes[0], expected_point_size, f"P-521 marshaled point should be {expected_point_size} bytes"
+ )
+
+ # All data results should be different (due to random ephemeral keys)
+ data_results = [data for data, _ in results]
+ self.assertEqual(
+ len(set(data_results)),
+ len(data_results),
+ "All data results should be different due to random ephemeral keys",
+ )
+
+ # All data results should have the same length (SHA256 digest size)
+ data_sizes = [len(data) for data, _ in results]
+ self.assertEqual(len(set(data_sizes)), 1, "All data results should have the same size")
+ self.assertEqual(data_sizes[0], hashfunc.digest_size, "Data size should match hash digest size")
+
+
if __name__ == "__main__":
unittest.main()
--
2.47.3

View File

@ -1,372 +0,0 @@
From ee4192df70384fa6b23f359a287e042103ba4ea9 Mon Sep 17 00:00:00 2001
From: Sergio Correia <scorreia@redhat.com>
Date: Thu, 25 Sep 2025 14:37:10 +0100
Subject: [PATCH 18/18] tpm: fix ECC signature parsing to support
variable-length coordinates
The previous ECC signature validation implementation incorrectly assumed
fixed-length coordinate encoding, causing failures with mathematically
correct variable-length coordinates (especially P-521 curves where
coordinates are typically 65-66 bytes).
This commit fixes the ecdsa_der_from_tpm() function to properly handle
TSS ESAPI signature format where the sig_size field contains only the
r component size, followed by the s component with its own size header.
This enables proper ECC attestation for the supported NIST curves.
Assisted-by: Claude 4 Sonnet
Signed-off-by: Sergio Correia <scorreia@redhat.com>
---
keylime/tpm/tpm_main.py | 2 +-
keylime/tpm/tpm_util.py | 105 +++++++++++++++++++---
test/test_tpm2_objects.py | 184 +++++++++++++++++++++++++++++++++++++-
3 files changed, 277 insertions(+), 14 deletions(-)
diff --git a/keylime/tpm/tpm_main.py b/keylime/tpm/tpm_main.py
index 6f2e89f..ecbacbe 100644
--- a/keylime/tpm/tpm_main.py
+++ b/keylime/tpm/tpm_main.py
@@ -91,7 +91,7 @@ class Tpm:
if isinstance(iak_pub, EllipticCurvePublicKey):
if sig_alg in [tpm2_objects.TPM_ALG_ECDSA]:
try:
- der_sig = tpm_util.ecdsa_der_from_tpm(iak_sign)
+ der_sig = tpm_util.ecdsa_der_from_tpm(iak_sign, iak_pub)
tpm_util.verify(iak_pub, der_sig, digest, hashfunc)
logger.info("Agent %s AIK verified with IAK", uuid)
return True
diff --git a/keylime/tpm/tpm_util.py b/keylime/tpm/tpm_util.py
index fbbe557..f554f94 100644
--- a/keylime/tpm/tpm_util.py
+++ b/keylime/tpm/tpm_util.py
@@ -59,6 +59,43 @@ logger = keylime_logging.init_logging("tpm_util")
SupportedKeyTypes = Union[RSAPublicKey, EllipticCurvePublicKey]
+# ECC signature parsing constants.
+# Raw signature sizes for different ECC curves (r||s concatenated format).
+# r and s are the two mathematical components of an ECDSA signature.
+ECC_SECP192R1_SIGNATURE_SIZE = 48 # 24 bytes each for r,s.
+ECC_SECP224R1_SIGNATURE_SIZE = 56 # 28 bytes each for r,s.
+ECC_SECP256R1_SIGNATURE_SIZE = 64 # 32 bytes each for r,s.
+ECC_SECP384R1_SIGNATURE_SIZE = 96 # 48 bytes each for r,s.
+ECC_SECP521R1_SIGNATURE_SIZE = 132 # 66 bytes each for r,s.
+
+# TPM2B_ECDSA_SIGNATURE format constants.
+TPM2B_SIZE_FIELD_LENGTH = 2 # 2 bytes for size field.
+TPM2B_MIN_HEADER_SIZE = 4 # Minimum: 2 bytes r_size + 2 bytes s_size.
+
+# DER encoding constants.
+DER_SEQUENCE_TAG = 0x30
+
+# Signature blob header offset (skip alg, hash_alg, sig_size headers).
+SIGNATURE_BLOB_HEADER_SIZE = 6
+
+# ECC curve to signature size mapping for raw r||s format.
+ECC_RAW_SIGNATURE_SIZES = {
+ "secp192r1": ECC_SECP192R1_SIGNATURE_SIZE,
+ "secp224r1": ECC_SECP224R1_SIGNATURE_SIZE,
+ "secp256r1": ECC_SECP256R1_SIGNATURE_SIZE,
+ "secp384r1": ECC_SECP384R1_SIGNATURE_SIZE,
+ "secp521r1": ECC_SECP521R1_SIGNATURE_SIZE,
+}
+
+# ECC curve coordinate size ranges (min, max) for validation.
+ECC_COORDINATE_SIZE_RANGES = {
+ "secp192r1": (1, 24), # 192 bits = 24 bytes max
+ "secp224r1": (1, 28), # 224 bits = 28 bytes max
+ "secp256r1": (1, 32), # 256 bits = 32 bytes max
+ "secp384r1": (1, 48), # 384 bits = 48 bytes max
+ "secp521r1": (1, 66), # 521 bits = 66 bytes max (521 bits, not 512)
+}
+
def verify(
pubkey: SupportedKeyTypes,
@@ -106,17 +143,59 @@ def der_len(encoded_int_len: int) -> bytes:
return bytes((0x80 | len(bin_str),)) + bin_str
-def ecdsa_der_from_tpm(sigblob: bytes) -> bytes:
- _, _, sig_size_r = struct.unpack_from(">HHH", sigblob, 0)
- sig_r = sigblob[6 : 6 + sig_size_r]
- encoded_sig_r = der_int(sig_r)
- sigblob = sigblob[6 + sig_size_r :]
- sig_size_s = struct.unpack_from(">H", sigblob, 0)[0]
- sig_s = sigblob[2 : 2 + sig_size_s]
- encoded_sig_s = der_int(sig_s)
- total_size = len(encoded_sig_r) + len(encoded_sig_s)
- der_sig = bytes.fromhex(f"30{total_size:x}") + encoded_sig_r + encoded_sig_s
- return der_sig
+def ecdsa_der_from_tpm(sigblob: bytes, pubkey: EllipticCurvePublicKey) -> bytes:
+ """Convert ECC signature from TPM format to DER format for cryptographic verification.
+
+ This function handles TSS ESAPI signature format where the signature header's
+ sig_size field contains the size of the r component, followed by the s component
+ with its own size header.
+
+ Parameters
+ ----------
+ sigblob: TPM signature blob containing signature headers and signature data
+ pubkey: ECC public key to determine expected signature size
+
+ Returns
+ -------
+ DER-encoded ECDSA signature suitable for cryptographic library verification
+
+ Raises
+ ------
+ ValueError: If signature format cannot be parsed or is invalid
+ """
+ # Extract signature header information.
+ _sig_alg, _hash_alg, sig_size_r = struct.unpack_from(">HHH", sigblob, 0)
+
+ # Extract the r component (size is in sig_size_r field).
+ sig_r = sigblob[SIGNATURE_BLOB_HEADER_SIZE : SIGNATURE_BLOB_HEADER_SIZE + sig_size_r]
+
+ # The s component follows immediately after r, with its own size header.
+ s_offset = SIGNATURE_BLOB_HEADER_SIZE + sig_size_r
+ if s_offset + 2 <= len(sigblob):
+ sig_size_s = struct.unpack_from(">H", sigblob, s_offset)[0]
+ s_start = s_offset + 2
+ if s_start + sig_size_s <= len(sigblob):
+ sig_s = sigblob[s_start : s_start + sig_size_s]
+
+ # Validate coordinate sizes against curve requirements.
+ curve_name = pubkey.curve.name
+ coordinate_range = ECC_COORDINATE_SIZE_RANGES.get(curve_name)
+ if coordinate_range:
+ min_size, max_size = coordinate_range
+ if not min_size <= len(sig_r) <= max_size:
+ raise ValueError(f"Invalid r coordinate size {len(sig_r)} for curve {curve_name}")
+ if not min_size <= len(sig_s) <= max_size:
+ raise ValueError(f"Invalid s coordinate size {len(sig_s)} for curve {curve_name}")
+
+ # Convert to DER format.
+ encoded_sig_r = der_int(sig_r)
+ encoded_sig_s = der_int(sig_s)
+ total_size = len(encoded_sig_r) + len(encoded_sig_s)
+ der_length = der_len(total_size)
+ der_sig = bytes([DER_SEQUENCE_TAG]) + der_length + encoded_sig_r + encoded_sig_s
+ return der_sig
+
+ raise ValueError("Unable to parse ECC signature from TPM format")
def __get_pcrs_from_blob(pcrblob: bytes) -> Tuple[int, Dict[int, int], List[bytes]]:
@@ -238,7 +317,9 @@ def checkquote(
(sig_size,) = struct.unpack_from(">H", sigblob, 4)
(signature,) = struct.unpack_from(f"{sig_size}s", sigblob, 6)
elif sig_alg in [tpm2_objects.TPM_ALG_ECDSA]:
- signature = ecdsa_der_from_tpm(sigblob)
+ if not isinstance(pubkey, EllipticCurvePublicKey):
+ raise ValueError(f"ECDSA signature algorithm requires EllipticCurvePublicKey, got {type(pubkey)}")
+ signature = ecdsa_der_from_tpm(sigblob, pubkey)
else:
raise ValueError(f"Unsupported quote signature algorithm '{sig_alg:#x}'")
diff --git a/test/test_tpm2_objects.py b/test/test_tpm2_objects.py
index 48d6a43..c0e4c0a 100644
--- a/test/test_tpm2_objects.py
+++ b/test/test_tpm2_objects.py
@@ -16,7 +16,7 @@ from keylime.tpm.tpm2_objects import (
pubkey_parms_from_tpm2b_public,
tpms_ecc_point_marshal,
)
-from keylime.tpm.tpm_util import crypt_secret_encrypt_ecc
+from keylime.tpm.tpm_util import crypt_secret_encrypt_ecc, der_int, der_len, ecdsa_der_from_tpm
class TestTpm2Objects(unittest.TestCase):
@@ -543,5 +543,187 @@ class TestEccMarshaling(unittest.TestCase):
self.assertEqual(data_sizes[0], hashfunc.digest_size, "Data size should match hash digest size")
+class TestEccSignatureParsing(unittest.TestCase):
+ """Test ECC signature parsing improvements for variable-length coordinates"""
+
+ def create_test_signature_blob(self, sig_r: bytes, sig_s: bytes) -> bytes:
+ """Create a test TPM signature blob with given r and s components"""
+ # TPM signature format: sig_alg(2) + hash_alg(2) + sig_size_r(2) + r_data + sig_size_s(2) + s_data
+ sig_alg = 0x0018 # TPM_ALG_ECDSA
+ hash_alg = 0x000B # TPM_ALG_SHA256
+
+ blob = struct.pack(">HHH", sig_alg, hash_alg, len(sig_r))
+ blob += sig_r
+ blob += struct.pack(">H", len(sig_s))
+ blob += sig_s
+
+ return blob
+
+ def test_p521_variable_length_coordinates(self):
+ """Test that P-521 signatures with variable-length coordinates are parsed correctly"""
+ # Generate a P-521 key for testing
+ private_key = ec.generate_private_key(ec.SECP521R1())
+ public_key = private_key.public_key()
+
+ # Test with 65-byte coordinates (leading zero stripped)
+ sig_r_65 = b"\x00" * 1 + b"\x01" * 64 # 65 bytes
+ sig_s_65 = b"\x00" * 1 + b"\x02" * 64 # 65 bytes
+
+ blob_65 = self.create_test_signature_blob(sig_r_65, sig_s_65)
+
+ # Should parse successfully
+ der_sig_65 = ecdsa_der_from_tpm(blob_65, public_key)
+ self.assertIsInstance(der_sig_65, bytes)
+ self.assertTrue(len(der_sig_65) > 0)
+
+ # Test with 66-byte coordinates (full padding)
+ sig_r_66 = b"\x00" * 2 + b"\x01" * 64 # 66 bytes
+ sig_s_66 = b"\x00" * 2 + b"\x02" * 64 # 66 bytes
+
+ blob_66 = self.create_test_signature_blob(sig_r_66, sig_s_66)
+
+ # Should parse successfully
+ der_sig_66 = ecdsa_der_from_tpm(blob_66, public_key)
+ self.assertIsInstance(der_sig_66, bytes)
+ self.assertTrue(len(der_sig_66) > 0)
+
+ def test_coordinate_size_validation(self):
+ """Test that coordinate size validation works for different curves"""
+ # Test P-256 with valid coordinates
+ p256_key = ec.generate_private_key(ec.SECP256R1()).public_key()
+
+ # Valid P-256 coordinates (32 bytes each)
+ sig_r_32 = b"\x01" * 32
+ sig_s_32 = b"\x02" * 32
+ blob_p256_valid = self.create_test_signature_blob(sig_r_32, sig_s_32)
+
+ # Should parse successfully
+ der_sig = ecdsa_der_from_tpm(blob_p256_valid, p256_key)
+ self.assertIsInstance(der_sig, bytes)
+
+ # Test P-256 with invalid coordinates (too large)
+ sig_r_invalid = b"\x01" * 50 # Too large for P-256
+ sig_s_invalid = b"\x02" * 50 # Too large for P-256
+ blob_p256_invalid = self.create_test_signature_blob(sig_r_invalid, sig_s_invalid)
+
+ # Should raise ValueError
+ with self.assertRaises(ValueError) as cm:
+ ecdsa_der_from_tpm(blob_p256_invalid, p256_key)
+ self.assertIn("Invalid r coordinate size", str(cm.exception))
+
+ def test_signature_parsing_edge_cases(self):
+ """Test edge cases in signature parsing"""
+ p256_key = ec.generate_private_key(ec.SECP256R1()).public_key()
+
+ # Test with truncated blob (missing s component)
+ truncated_blob = struct.pack(">HHH", 0x0018, 0x000B, 32) + b"\x01" * 32
+ # Missing s component
+
+ with self.assertRaises(ValueError) as cm:
+ ecdsa_der_from_tpm(truncated_blob, p256_key)
+ self.assertIn("Unable to parse ECC signature", str(cm.exception))
+
+ # Test with blob too short for s size header
+ short_blob = struct.pack(">HHH", 0x0018, 0x000B, 32) + b"\x01" * 32 + b"\x00" # Only 1 byte for s size
+
+ with self.assertRaises(ValueError) as cm:
+ ecdsa_der_from_tpm(short_blob, p256_key)
+ self.assertIn("Unable to parse ECC signature", str(cm.exception))
+
+ def test_der_encoding_correctness(self):
+ """Test that DER encoding produces correctly formatted output"""
+ p256_key = ec.generate_private_key(ec.SECP256R1()).public_key()
+
+ # Create test coordinates
+ sig_r = b"\x01" * 32
+ sig_s = b"\x02" * 32
+ blob = self.create_test_signature_blob(sig_r, sig_s)
+
+ der_sig = ecdsa_der_from_tpm(blob, p256_key)
+
+ # DER signature should start with SEQUENCE tag (0x30)
+ self.assertEqual(der_sig[0], 0x30, "DER signature should start with SEQUENCE tag")
+
+ # Should be parseable as DER format
+ # The structure should be: 0x30 + length + INTEGER(r) + INTEGER(s)
+ self.assertTrue(len(der_sig) >= 6, "DER signature should have minimum length")
+
+ def test_multiple_curve_support(self):
+ """Test that signature parsing works for multiple curve types"""
+ test_cases = [
+ (ec.SECP256R1(), 32),
+ (ec.SECP384R1(), 48),
+ (ec.SECP521R1(), 66),
+ ]
+
+ for curve, coord_size in test_cases:
+ with self.subTest(curve=curve.name):
+ private_key = ec.generate_private_key(curve)
+ public_key = private_key.public_key()
+
+ # Create test signature with appropriate coordinate size
+ sig_r = b"\x01" * coord_size
+ sig_s = b"\x02" * coord_size
+ blob = self.create_test_signature_blob(sig_r, sig_s)
+
+ # Should parse successfully
+ der_sig = ecdsa_der_from_tpm(blob, public_key)
+ self.assertIsInstance(der_sig, bytes)
+ self.assertTrue(len(der_sig) > 0)
+ self.assertEqual(der_sig[0], 0x30) # DER SEQUENCE tag
+
+ def test_der_int_encoding(self):
+ """Test DER integer encoding helper function"""
+ # Test positive number that doesn't need padding
+ test_bytes = b"\x7F" # 127, no padding needed
+ der_encoded = der_int(test_bytes)
+ expected = b"\x02\x01\x7F" # INTEGER tag + length + value
+ self.assertEqual(der_encoded, expected)
+
+ # Test positive number that needs zero padding (high bit set)
+ test_bytes = b"\xFF" # 255, needs zero padding
+ der_encoded = der_int(test_bytes)
+ expected = b"\x02\x02\x00\xFF" # INTEGER tag + length + zero padding + value
+ self.assertEqual(der_encoded, expected)
+
+ def test_der_len_encoding(self):
+ """Test DER length encoding helper function"""
+ # Test short form (< 128)
+ short_len = der_len(50)
+ self.assertEqual(short_len, b"\x32") # 50 in hex
+
+ # Test long form (>= 128)
+ long_len = der_len(300) # 0x012C
+ expected = b"\x82\x01\x2C" # Long form: 0x80 | 2 bytes, then 0x012C
+ self.assertEqual(long_len, expected)
+
+ def test_signature_format_validation_comprehensive(self):
+ """Comprehensive test of signature format validation"""
+ p521_key = ec.generate_private_key(ec.SECP521R1()).public_key()
+
+ # Test minimum valid coordinate sizes for P-521
+ valid_sizes = [65, 66]
+ for size in valid_sizes:
+ sig_r = b"\x01" * size
+ sig_s = b"\x02" * size
+ blob = self.create_test_signature_blob(sig_r, sig_s)
+
+ # Should not raise exception
+ der_sig = ecdsa_der_from_tpm(blob, p521_key)
+ self.assertIsInstance(der_sig, bytes)
+
+ # Test invalid coordinate sizes for P-521 (outside the 1-66 range)
+ invalid_sizes = [0, 67, 100] # 0 is too small, 67+ is too large
+ for size in invalid_sizes:
+ with self.subTest(size=size):
+ sig_r = b"\x01" * size if size > 0 else b""
+ sig_s = b"\x02" * size if size > 0 else b""
+ blob = self.create_test_signature_blob(sig_r, sig_s)
+
+ with self.assertRaises(ValueError) as cm:
+ ecdsa_der_from_tpm(blob, p521_key)
+ self.assertIn("coordinate size", str(cm.exception))
+
+
if __name__ == "__main__":
unittest.main()
--
2.47.3

View File

@ -1,974 +0,0 @@
From af74d817bd7c8c2a2e014e1728b46f41049ac066 Mon Sep 17 00:00:00 2001
From: Marek Safarik <msafarik@redhat.com>
Date: Tue, 14 Oct 2025 15:14:26 +0200
Subject: [PATCH] Added manpages for keylime services and the tenant
Here I am adding manpages for keylime tenant, verifier, registrar, policy.
The RST formatting has been fixed for rst2man compatibility.
Co-authored-by: Marek Safarik <msafarik@redhat.com>
Co-authored-by: Sergio Correia <scorreia@redhat.com>
---
docs/man/keylime-policy.1.rst | 179 +++++++++++++++++++++
docs/man/keylime_agent.8.rst | 218 +++++++++++++++++++++++++
docs/man/keylime_registrar.8.rst | 142 +++++++++++++++++
docs/man/keylime_tenant.1.rst | 263 +++++++++++++++++++++++++++++++
docs/man/keylime_verifier.8.rst | 116 ++++++++++++++
5 files changed, 918 insertions(+)
create mode 100644 docs/man/keylime-policy.1.rst
create mode 100644 docs/man/keylime_agent.8.rst
create mode 100644 docs/man/keylime_registrar.8.rst
create mode 100644 docs/man/keylime_tenant.1.rst
create mode 100644 docs/man/keylime_verifier.8.rst
diff --git a/docs/man/keylime-policy.1.rst b/docs/man/keylime-policy.1.rst
new file mode 100644
index 0000000..b24c372
--- /dev/null
+++ b/docs/man/keylime-policy.1.rst
@@ -0,0 +1,179 @@
+==============
+keylime-policy
+==============
+
+------------------------------------------
+Keylime policy creation and signing tool
+------------------------------------------
+
+:Manual section: 1
+:Author: Keylime Developers
+:Date: September 2025
+
+SYNOPSIS
+========
+
+**keylime-policy** {create,sign} [*OPTIONS*]
+
+(Requires root privileges, use with sudo)
+
+DESCRIPTION
+===========
+
+keylime-policy is a utility for creating and signing Keylime policies. It supports creating
+runtime policies (for IMA/filesystem attestation) and measured boot policies (for boot-time
+attestation), as well as signing runtime policies using DSSE (Dead Simple Signing Envelope).
+
+COMMANDS
+========
+
+**keylime-policy create runtime** [*OPTIONS*]
+
+ Create runtime policies from filesystem, allowlists, RPM repositories, or IMA measurement lists.
+
+ Options:
+
+ **-o, --output** *OUTPUT*
+ Output file (defaults to stdout)
+
+ **-p, --base-policy** *BASE_POLICY*
+ Merge new data into existing JSON runtime policy
+
+ **-k, --keyrings**
+ Create keyrings policy entries
+
+ **-b, --ima-buf**
+ Process ima-buf entries other than keyrings
+
+ **-a, --allowlist** *ALLOWLIST*
+ Read checksums from plain-text allowlist
+
+ **-e, --excludelist** *EXCLUDE_LIST_FILE*
+ Add IMA exclude list to policy
+
+ **-m, --ima-measurement-list** *[IMA_MEASUREMENT_LIST]*
+ Use IMA measurement list for hash/keyring extraction
+
+ **--ignored-keyrings** *IGNORED_KEYRINGS*
+ Ignore specified keyring (repeatable)
+
+ **--add-ima-signature-verification-key** *IMA_SIGNATURE_KEYS*
+ Add x509/key to tenant_keyring (repeatable)
+
+ **--show-legacy-allowlist**
+ Display digests in legacy allowlist format
+
+ **-v, --verbose**
+ Set log level to DEBUG
+
+ Filesystem scanning:
+
+ **--algo** *{sha1,sha256,sha384,sha512,sm3_256}*
+ Checksum algorithm
+
+ **--ramdisk-dir** *RAMDISK_DIR*
+ Path to initrds (e.g., /boot)
+
+ **--rootfs** *ROOTFS*
+ Path to root filesystem (e.g., /)
+
+ **-s, --skip-path** *SKIP_PATH*
+ Comma-separated directories to skip
+
+ Repository scanning:
+
+ **--local-rpm-repo** *LOCAL_RPM_REPO*
+ Local RPM repository directory
+
+ **--remote-rpm-repo** *REMOTE_RPM_REPO*
+ Remote RPM repository URL
+
+**keylime-policy create measured-boot** [*OPTIONS*]
+
+ Create measured boot reference state policies from UEFI event logs.
+
+ Options:
+
+ **-e, --eventlog-file** *EVENTLOG_FILE*
+ Binary UEFI eventlog (required)
+
+ **--without-secureboot, -i**
+ Create policy without SecureBoot (MeasuredBoot only)
+
+ **-o, --output** *OUTPUT*
+ Output path for generated measured boot policy
+
+**keylime-policy sign runtime** [*OPTIONS*]
+
+ Sign runtime policies using DSSE.
+
+ Options:
+
+ **-o, --output** *OUTPUT_FILE*
+ Output file for DSSE-signed policy
+
+ **-r, --runtime-policy** *POLICY*
+ Runtime policy file to sign (required)
+
+ **-k, --keyfile** *KEYFILE*
+ EC private key for signing
+
+ **-p, --keypath** *KEYPATH*
+ Output filename for created private key
+
+ **-b, --backend** *{ecdsa,x509}*
+ DSSE backend (ecdsa or x509)
+
+ **-c, --cert-outfile** *CERT_OUTFILE*
+ Output file for x509 certificate (x509 backend)
+
+EXAMPLES
+========
+
+**Create runtime policy from filesystem:**
+
+.. code-block:: bash
+
+ sudo keylime-policy create runtime --rootfs / --output my-policy.json
+
+**Create runtime policy from allowlist:**
+
+.. code-block:: bash
+
+ sudo keylime-policy create runtime --allowlist my-allowlist.txt --output policy.json
+
+**Create measured boot policy:**
+
+.. code-block:: bash
+
+ sudo keylime-policy create measured-boot -e /sys/kernel/security/tpm0/binary_bios_measurements -o mb-policy.json
+
+**Sign runtime policy:**
+
+.. code-block:: bash
+
+ sudo keylime-policy sign runtime -r policy.json -k signing-key.pem -o signed-policy.json
+
+ENVIRONMENT
+===========
+
+**KEYLIME_LOGGING_CONFIG**
+ Path to logging.conf
+
+NOTES
+=====
+
+- All operations require root privileges
+- Runtime policies use JSON format
+- Measured boot policies require binary UEFI event logs
+- DSSE signing supports both ECDSA and x509 backends
+
+SEE ALSO
+========
+
+**keylime_tenant**\(1), **keylime_verifier**\(8), **keylime_registrar**\(8)
+
+BUGS
+====
+
+Report bugs at https://github.com/keylime/keylime/issues
diff --git a/docs/man/keylime_agent.8.rst b/docs/man/keylime_agent.8.rst
new file mode 100644
index 0000000..dffcc09
--- /dev/null
+++ b/docs/man/keylime_agent.8.rst
@@ -0,0 +1,218 @@
+=============
+keylime_agent
+=============
+
+-----------------------------------------------
+Keylime agent service for TPM-based attestation
+-----------------------------------------------
+
+:Manual section: 8
+:Author: Keylime Developers
+:Date: September 2025
+
+SYNOPSIS
+========
+
+**keylime_agent**
+
+(Most operations require root privileges, use with sudo)
+
+DESCRIPTION
+===========
+
+The agent is a long-running service that runs on systems to be attested. It communicates with
+the TPM to generate quotes, collects IMA and measured boot event logs, and provides secure
+payload functionality. The service does not accept command-line options; behavior is configured
+via TOML configuration files.
+
+CONFIGURATION
+=============
+
+Primary configuration is read from ``/etc/keylime/agent.conf`` (or an override via env).
+Configuration uses TOML format. All options are under the ``[agent]`` section.
+
+Drop-in overrides: files in ``/etc/keylime/agent.conf.d/`` are applied in lexicographic order.
+
+Essential configuration options:
+
+**uuid**
+ Agent identifier (``generate``, ``hash_ek``, ``environment``, ``dmidecode``, ``hostname``, or explicit UUID)
+
+**ip**, **port**
+ Bind address and port (default: 9002)
+
+**contact_ip**, **contact_port**
+ External contact address (optional)
+
+**registrar_ip**, **registrar_port**
+ Registrar endpoint
+
+**enable_agent_mtls**
+ Enable mTLS communication
+
+**tls_dir**
+ TLS material location (``generate`` for auto-generate under ``$KEYLIME_DIR/cv_ca``, ``default`` for ``$KEYLIME_DIR/secure``)
+
+**server_key**, **server_key_password**, **server_cert**
+ TLS files (self-signed cert)
+
+**trusted_client_ca**
+ Trusted client CA list
+
+**enc_keyname**
+ Payload encryption key file name
+
+**dec_payload_file**
+ Decrypted payload file name
+
+**secure_size**
+ tmpfs partition size for secure storage
+
+**tpm_ownerpassword**
+ TPM owner password (``generate`` for random)
+
+**extract_payload_zip**
+ Auto-extract zip payloads (bool)
+
+**enable_revocation_notifications**
+ Listen for revocation via ZeroMQ (bool)
+
+**revocation_notification_ip**, **revocation_notification_port**
+ ZeroMQ endpoint
+
+**revocation_cert**
+ Certificate to verify revocation messages
+
+**revocation_actions**
+ Python scripts to run on revocation
+
+**payload_script**
+ Script to run after payload extraction
+
+**enable_insecure_payload**
+ Allow payloads without mTLS (insecure)
+
+**measure_payload_pcr**
+ PCR to extend with payload (-1 to disable)
+
+**exponential_backoff**, **retry_interval**, **max_retries**
+ TPM communication retry
+
+**tpm_hash_alg**, **tpm_encryption_alg**, **tpm_signing_alg**
+ TPM algorithms
+
+**ek_handle**
+ EK handle (``generate`` or explicit handle like ``0x81000000``)
+
+**enable_iak_idevid**
+ Enable IAK/IDevID usage (bool)
+
+**iak_idevid_template**, **iak_idevid_asymmetric_alg**, **iak_idevid_name_alg**
+ IAK/IDevID config
+
+**idevid_password**, **idevid_handle**, **iak_password**, **iak_handle**
+ Persistent key handles
+
+**iak_cert**, **idevid_cert**
+ Certificate file names
+
+**run_as**
+ User:group to drop privileges to
+
+**ima_ml_path**
+ IMA measurement log path (default: ``/sys/kernel/security/ima/ascii_runtime_measurements``)
+
+**measuredboot_ml_path**
+ Measured boot log path (default: ``/sys/kernel/security/tpm0/binary_bios_measurements``)
+
+ENVIRONMENT
+===========
+
+**KEYLIME_AGENT_CONFIG**
+ Path to agent.conf (highest priority)
+
+**KEYLIME_LOGGING_CONFIG**
+ Path to logging.conf
+
+**KEYLIME_DIR**
+ Working directory (default: ``/var/lib/keylime``)
+
+**KEYLIME_AGENT_UUID**
+ UUID when ``uuid = environment``
+
+**KEYLIME_AGENT_IAK_CERT**
+ Override iak_cert path
+
+**KEYLIME_AGENT_IDEVID_CERT**
+ Override idevid_cert path
+
+**KEYLIME_TEST**
+ ``on/true/1`` enables testing mode
+
+FILES
+=====
+
+``/etc/keylime/agent.conf``
+ TOML format configuration file
+
+``/etc/keylime/agent.conf.d/``
+ Drop-in snippets; read in lexicographic order
+
+``/etc/keylime/logging.conf``
+ Logging configuration
+
+``$KEYLIME_DIR/secure/``
+ Secure tmpfs mount for keys/payloads
+
+``$KEYLIME_DIR/cv_ca/``
+ TLS certificates when ``tls_dir = generate``
+
+``$KEYLIME_DIR/tpmdata.yml``
+ TPM state persistence
+
+RUNTIME
+=======
+
+Start from system install:
+
+.. code-block:: bash
+
+ sudo keylime_agent
+
+Start as a systemd service:
+
+.. code-block:: bash
+
+ sudo systemctl enable --now keylime_agent
+
+Open firewall port:
+
+.. code-block:: bash
+
+ sudo firewall-cmd --add-port=9002/tcp
+ sudo firewall-cmd --runtime-to-permanent
+
+PREREQUISITES
+=============
+
+- Root privileges (use sudo)
+- TPM 2.0 available (verify with ``tpm2_pcrread``)
+- IMA enabled in kernel
+- Network connectivity to registrar
+
+NOTES
+=====
+
+- Agent uses TOML configuration format (unlike other Keylime components).
+- The Rust agent is the current implementation; Python agent is deprecated.
+- Agent generates self-signed certificates for mTLS if not provided.
+
+SEE ALSO
+========
+
+**keylime_verifier**\(8), **keylime_registrar**\(8), **keylime_tenant**\(1)
+
+BUGS
+====
+
+Report bugs at https://github.com/keylime/rust-keylime/issues
diff --git a/docs/man/keylime_registrar.8.rst b/docs/man/keylime_registrar.8.rst
new file mode 100644
index 0000000..9841c34
--- /dev/null
+++ b/docs/man/keylime_registrar.8.rst
@@ -0,0 +1,142 @@
+=================
+keylime_registrar
+=================
+
+------------------------------------------------
+Keylime registrar service for agent registration
+------------------------------------------------
+
+:Manual section: 8
+:Author: Keylime Developers
+:Date: September 2025
+
+SYNOPSIS
+========
+
+**keylime_registrar**
+
+(Most operations require root privileges, use with sudo)
+
+DESCRIPTION
+===========
+
+The registrar is a long-running service used by agents. It maintains its own database where it stores data
+of registered agents. The service does not accept command-line options; behavior is
+configured via configuration files and environment variables, and is managed by keylime tenant.
+
+CONFIGURATION
+=============
+
+Primary configuration is read from ``/etc/keylime/registrar.conf`` (or an override via env).
+All options are under the ``[registrar]`` section.
+
+Essential configuration options:
+
+**ip**
+ Bind address
+
+**port**
+ HTTP port
+
+**tls_port**
+ HTTPS port
+
+**tls_dir**
+ TLS material location (``generate`` for auto-generate CA, keys, certs under ``$KEYLIME_DIR/reg_ca``, ``default`` for shared verifier CA under ``$KEYLIME_DIR/cv_ca``)
+
+**server_key**, **server_key_password**, **server_cert**, **trusted_client_ca**
+ TLS files
+
+**database_url**
+ SQLAlchemy URL; value ``sqlite`` maps to ``$KEYLIME_DIR/reg_data.sqlite``
+
+**database_pool_sz_ovfl**
+ Pool size, overflow (non-sqlite)
+
+**auto_migrate_db**
+ Apply DB migrations on startup
+
+**max_upload_size**
+ Request body limit (bytes)
+
+**tpm_identity**
+ Allowed identity (``default``, ``ek_cert_or_iak_idevid``, ``ek_cert``, ``iak_idevid``)
+
+**malformed_cert_action**
+ ``warn`` (default), ``reject``, or ``ignore``
+
+**durable_attestation_import** (optional)
+ Python import path to enable Durable Attestation
+
+ENVIRONMENT
+===========
+
+**KEYLIME_REGISTRAR_CONFIG**
+ Path to registrar.conf (highest priority)
+
+**KEYLIME_LOGGING_CONFIG**
+ Path to logging.conf
+
+**KEYLIME_DIR**
+ Working directory (default: ``/var/lib/keylime``)
+
+**KEYLIME_TEST**
+ ``on/true/1`` enables testing mode (looser checks; WORK_DIR becomes CWD)
+
+FILES
+=====
+
+``/etc/keylime/registrar.conf``
+ Registrar configuration file
+
+``/etc/keylime/logging.conf``
+ Logging configuration
+
+``$KEYLIME_DIR/reg_data.sqlite``
+ Database file when ``database_url = sqlite``
+
+``$KEYLIME_DIR/reg_ca``
+ TLS certificates when ``tls_dir = generate``
+
+``$KEYLIME_DIR/cv_ca``
+ Shared verifier certificates when ``tls_dir = default``
+
+RUNTIME
+=======
+
+Start from system install:
+
+.. code-block:: bash
+
+ sudo keylime_registrar
+
+Start as a systemd service:
+
+.. code-block:: bash
+
+ systemctl enable --now keylime_registrar
+
+Open firewall ports (adjust if you changed ports):
+
+.. code-block:: bash
+
+ firewall-cmd --add-port=8890/tcp --add-port=8891/tcp
+ firewall-cmd --runtime-to-permanent
+
+NOTES
+=====
+
+- HTTPS is required for routes unless explicitly allowed insecure by the service.
+- With ``tls_dir = default``, start the verifier before the registrar so the shared CA/certs exist in ``$KEYLIME_DIR/cv_ca``.
+- The service forks worker processes (default: CPU count).
+- Registrar and verifier may run on the same host or on separate hosts.
+
+SEE ALSO
+========
+
+**keylime_verifier**\(8), **keylime_tenant**\(1), **keylime_agent**\(8)
+
+BUGS
+====
+
+Report bugs at https://github.com/keylime/keylime/issues
diff --git a/docs/man/keylime_tenant.1.rst b/docs/man/keylime_tenant.1.rst
new file mode 100644
index 0000000..7be1319
--- /dev/null
+++ b/docs/man/keylime_tenant.1.rst
@@ -0,0 +1,263 @@
+==============
+keylime_tenant
+==============
+
+---------------------------------------------------------------------------
+Keylime tenant management tool for agent provisioning and policy management
+---------------------------------------------------------------------------
+
+:Manual section: 1
+:Author: Keylime Developers
+:Date: July 2025
+
+SYNOPSIS
+========
+
+**keylime_tenant** [*OPTIONS*] [*COMMAND*]
+
+(Most operations require root privileges, use with sudo)
+
+DESCRIPTION
+===========
+
+keylime_tenant is the primary command-line interface for managing Keylime agents and policies.
+It allows users to provision agents with TPM-based attestation, manage runtime policies,
+measured boot policies, and interact with Keylime registrar and verifier services.
+
+The tenant can add, delete, update, and monitor agents, as well as manage various types of
+policies including runtime policies (for IMA/EVM attestation) and measured boot policies
+(for boot-time attestation). It supports both push and pull models for agent communication.
+
+You can run keylime_tenant on the same system as the Keylime registrar or verifier, or on a separate system.
+
+COMMANDS
+========
+
+**-c, --command** *COMMAND*
+ Specify the command to execute. Valid commands are:
+
+ - **add**: Add a new agent to the system (default)
+ - **delete**: Remove an agent from the system
+ - **update**: Update an existing agent's configuration
+ - **regstatus**: Show agent status from registrar
+ - **cvstatus**: Show agent status from cloud verifier
+ - **status**: Show combined agent status
+ - **reglist**: List all agents in registrar
+ - **cvlist**: List all agents in cloud verifier
+ - **reactivate**: Reactivate a failed agent
+ - **regdelete**: Delete agent from registrar only
+ - **bulkinfo**: Get bulk information about agents
+ - **addruntimepolicy**: Add a runtime policy (requires --runtime-policy or --allowlist)
+ - **showruntimepolicy**: Display a runtime policy (requires --runtime-policy-name)
+ - **deleteruntimepolicy**: Remove a runtime policy (requires --runtime-policy-name)
+ - **updateruntimepolicy**: Update a runtime policy (requires --runtime-policy-name)
+ - **listruntimepolicy**: List all runtime policies
+ - **addmbpolicy**: Add a measured boot policy (requires --mb-policy-name)
+ - **showmbpolicy**: Display a measured boot policy (requires --mb-policy-name)
+ - **deletembpolicy**: Remove a measured boot policy (requires --mb-policy-name)
+ - **updatembpolicy**: Update a measured boot policy (requires --mb-policy-name)
+ - **listmbpolicy**: List all measured boot policies
+
+OPTIONS
+=======
+
+**-h, --help**
+ Show help message and exit
+
+**--push-model**
+ Enable push model (avoid requests to keylime-agent)
+
+**-t, --targethost** *AGENT_IP*
+ The IP address of the host to provision
+
+**-tp, --targetport** *AGENT_PORT*
+ The port of the host to provision
+
+**-r, --registrarhost** *REGISTRAR_IP*
+ The IP address of the registrar where to retrieve the agents data from
+
+**-rp, --registrarport** *REGISTRAR_PORT*
+ The port of the registrar
+
+**--cv_targethost** *CV_AGENT_IP*
+ The IP address of the host to provision that the verifier will use (optional).
+ Use only if different than argument to option -t/--targethost
+
+**-v, --cv** *VERIFIER_IP*
+ The IP address of the cloud verifier
+
+**-vp, --cvport** *VERIFIER_PORT*
+ The port of the cloud verifier
+
+**-vi, --cvid** *VERIFIER_ID*
+ The unique identifier of a cloud verifier
+
+**-nvc, --no-verifier-check**
+ Disable the check to confirm if the agent is being processed by the specified verifier.
+ Use only with -c/--command delete or reactivate
+
+**-u, --uuid** *AGENT_UUID*
+ UUID for the agent to provision
+
+**-f, --file** *FILE*
+ Deliver the specified plaintext file to the provisioned agent
+
+**--cert** *CA_DIR*
+ Create and deliver a certificate using a CA created by ca-util.
+ Pass in the CA directory or use "default" to use the standard directory
+
+**-k, --key** *KEYFILE*
+ An intermediate key file produced by user_data_encrypt
+
+**-p, --payload** *PAYLOAD*
+ Specify the encrypted payload to deliver with encrypted keys specified by -k
+
+**--include** *INCL_DIR*
+ Include additional files in provided directory in certificate zip file.
+ Must be specified with --cert
+
+**--runtime-policy** *RUNTIME_POLICY*
+ Specify the file path of a runtime policy
+
+**--runtime-policy-checksum** *RUNTIME_POLICY_CHECKSUM*
+ Specify the SHA-256 checksum of a runtime policy
+
+**--runtime-policy-sig-key** *RUNTIME_POLICY_SIG_KEY*
+ Specify the public key file used to validate the runtime policy signature
+
+**--runtime-policy-url** *RUNTIME_POLICY_URL*
+ Specify the URL of a remote runtime policy
+
+**--runtime-policy-name** *RUNTIME_POLICY_NAME*
+ The name of the runtime policy to operate with
+
+**--mb-policy** *MB_POLICY*
+ The measured boot policy to operate with
+
+**--mb-policy-name** *MB_POLICY_NAME*
+ The name of the measured boot policy to operate with
+
+**--tpm_policy** *TPM_POLICY*
+ Specify a TPM policy in JSON format.
+ Example: {"15":"0000000000000000000000000000000000000000"}
+
+**--verify**
+ Block on cryptographically checked key derivation confirmation from the agent
+ once it has been provisioned
+
+**--supported-version** *SUPPORTED_VERSION*
+ API version that is supported by the agent. Detected automatically by default
+
+DEPRECATED OPTIONS
+==================
+
+The following options are deprecated and may be removed in future versions:
+
+**--allowlist** *ALLOWLIST*
+ **DEPRECATED**: Migrate to runtime policies for continued functionality.
+ Specify the file path of an allowlist
+
+**--allowlist-url** *ALLOWLIST_URL*
+ **DEPRECATED**: Migrate to runtime policies for continued functionality.
+ Specify the URL of a remote allowlist
+
+**--allowlist-name** *ALLOWLIST_NAME*
+ **DEPRECATED**: Migrate to runtime policies for continued functionality.
+ The name of allowlist to operate with
+
+**--exclude** *IMA_EXCLUDE*
+ **DEPRECATED**: Migrate to runtime policies for continued functionality.
+ Specify the location of an IMA exclude list
+
+**--mb_refstate** *MB_POLICY*
+ **DEPRECATED**: Use --mb-policy instead.
+ Specify the location of a measured boot reference state
+
+**--signature-verification-key** *IMA_SIGN_VERIFICATION_KEYS*
+ **DEPRECATED**: Provide verification keys as part of a runtime policy for continued functionality.
+ Specify an IMA file signature verification key
+
+EXAMPLES
+========
+
+**Add a new agent:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c add -t 192.168.1.100 -u agent-001
+
+**Add an agent with runtime policy:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c add -t 192.168.1.100 -u agent-001 --runtime-policy /path/to/policy.json
+
+**Check agent status:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c status -u agent-001
+
+**Delete an agent:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c delete -u agent-001
+
+**List all agents:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c cvlist
+
+**Add a runtime policy:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c addruntimepolicy --runtime-policy-name my-policy --runtime-policy /path/to/policy.json
+
+**Add a measured boot policy:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c addmbpolicy --mb-policy-name my-mb-policy --mb-policy /path/to/mb-policy.json
+
+**Provision agent with certificate delivery:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c add -t 192.168.1.100 -u agent-001 --cert default
+
+**Provision agent with custom verifier:**
+
+.. code-block:: bash
+
+ sudo keylime_tenant -c add -t 192.168.1.100 -u agent-001 -v 192.168.1.200 -vp 8881
+
+FILES
+=====
+
+/etc/keylime/tenant.conf
+ Default configuration file for keylime_tenant. Contains all tenant related settings.
+
+
+PREREQUISITES
+=============
+
+- Keylime verifier service running (default: 127.0.0.1:8881)
+- Keylime registrar service running (default: 127.0.0.1:8891)
+- Root privileges (use sudo)
+- Network connectivity to registrar and verifier services
+- Valid TLS configuration in /etc/keylime/tenant.conf
+
+SEE ALSO
+========
+
+**keylime_verifier**\(8), **keylime_registrar**\(8), **keylime_agent**\(8)
+
+For more information about Keylime, visit: https://keylime.dev
+
+BUGS
+====
+
+Report bugs to the Keylime project at: https://github.com/keylime/keylime/issues
diff --git a/docs/man/keylime_verifier.8.rst b/docs/man/keylime_verifier.8.rst
new file mode 100644
index 0000000..fd7cfb9
--- /dev/null
+++ b/docs/man/keylime_verifier.8.rst
@@ -0,0 +1,116 @@
+================
+keylime_verifier
+================
+
+----------------------------------------------
+Keylime verifier service for agent attestation
+----------------------------------------------
+
+:Manual section: 8
+:Author: Keylime Developers
+:Date: September 2025
+
+SYNOPSIS
+========
+
+**keylime_verifier**
+
+(Most operations require root privileges, use with sudo)
+
+DESCRIPTION
+===========
+
+The verifier is a long-running service that attests registered agents. It accesses
+the registrar database to obtain agent data, and optionally performs measured boot evaluation and durable
+attestation. The service does not accept command-line options; its behavior is configured via
+configuration files and environment variables, and it is managed by keylime tenant.
+
+CONFIGURATION
+=============
+
+Primary configuration is read from ``/etc/keylime/verifier.conf`` (or an override via env).
+All options are under the ``[verifier]`` section.
+
+Essentials:
+- **uuid**: Unique identifier for this verifier instance
+- **ip**, **port**: Bind address and HTTP port
+- **registrar_ip**, **registrar_port**: Registrar endpoint
+- **enable_agent_mtls**: Enable mTLS with agents and tenant
+- **tls_dir**: TLS material location
+
+ - ``generate``: auto-generate CA, client and server keys/certs under ``$KEYLIME_DIR/cv_ca``
+ - ``default``: use existing materials under ``$KEYLIME_DIR/cv_ca``
+
+- **server_key**, **server_key_password**, **server_cert**: Server TLS files
+- **client_key**, **client_key_password**, **client_cert**: Client TLS files
+- **trusted_client_ca**, **trusted_server_ca**: CA lists
+- **database_url**: SQLAlchemy URL; value ``sqlite`` maps to ``$KEYLIME_DIR/cv_data.sqlite``
+- **database_pool_sz_ovfl**: Pool size, overflow (non-sqlite)
+- **auto_migrate_db**: Apply DB migrations on startup
+- **num_workers**: Number of worker processes (``0`` = CPU count)
+- **exponential_backoff**, **retry_interval**, **max_retries**: Retry behavior for agent comm
+- **quote_interval**: Time between integrity checks (seconds)
+- **max_upload_size**: Upload size limit (bytes)
+- **request_timeout**: Agent request timeout (seconds)
+- **measured_boot_policy_name**, **measured_boot_imports**, **measured_boot_evaluate**: measured boot policy settings
+- **severity_labels**, **severity_policy**: revocation severity config
+- **ignore_tomtou_errors**: handle ToMToU IMA entries (bool)
+- **durable_attestation_import** and related **persistent_store_url**, **transparency_log_url**,
+ **time_stamp_authority_url**, **time_stamp_authority_certs_path**, **persistent_store_format**,
+ **persistent_store_encoding**, **transparency_log_sign_algo**, **signed_attributes**: durable attestation
+- **require_allow_list_signatures**: require signed allowlists (bool)
+
+ENVIRONMENT
+===========
+
+- **KEYLIME_VERIFIER_CONFIG**: Path to verifier.conf (highest priority)
+- **KEYLIME_LOGGING_CONFIG**: Path to logging.conf
+- **KEYLIME_DIR**: Working directory (default: ``/var/lib/keylime``)
+- **KEYLIME_TEST**: ``on/true/1`` enables testing mode (looser checks; WORK_DIR becomes CWD)
+
+FILES
+=====
+
+- ``/etc/keylime/verifier.conf``
+- ``/etc/keylime/logging.conf``
+- ``$KEYLIME_DIR/cv_data.sqlite`` (when ``database_url = sqlite``)
+- ``$KEYLIME_DIR/cv_ca`` (when ``tls_dir = default`` or ``generate``)
+- systemd unit: ``keylime_verifier.service``
+
+RUNTIME
+=======
+
+Start from system install:
+
+.. code-block:: bash
+
+ sudo keylime_verifier
+
+Start as a systemd service:
+
+.. code-block:: bash
+
+ systemctl enable --now keylime_verifier
+
+Open firewall ports (adjust if you changed ports):
+
+.. code-block:: bash
+
+ firewall-cmd --add-port 8881/tcp
+ firewall-cmd --runtime-to-permanent
+
+NOTES
+=====
+
+- Verifier initializes measured boot components on startup.
+- With ``tls_dir = generate``, the verifier creates CA/keys/certs in ``$KEYLIME_DIR/cv_ca`` used by other components.
+
+SEE ALSO
+========
+
+**keylime_registrar**\(8), **keylime_tenant**\(1), **keylime_agent**\(8)
+
+BUGS
+====
+
+Report bugs at https://github.com/keylime/keylime/issues
--
2.51.0

View File

@ -1,5 +1,5 @@
%global srcname keylime
%global policy_version 42.1.2
%global policy_version 43.1.1
# Package is actually noarch, but it has an optional dependency that is
# arch-specific.
@ -8,8 +8,8 @@
%global selinuxtype targeted
Name: keylime
Version: 7.12.1
Release: 14%{?dist}
Version: 7.14.1
Release: 1%{?dist}
Summary: Open source TPM software for Bootstrapping and Maintaining Trust
URL: https://github.com/keylime/keylime
@ -19,51 +19,8 @@ Source1: https://github.com/RedHat-SP-Security/%{name}-selinux/archive/v%
Source2: %{srcname}.sysusers
Source3: %{srcname}.tmpfiles
# Backported from https://github.com/keylime/keylime/pull/1782
# Fixes DB connections leaks (https://issues.redhat.com/browse/RHEL-102995)
Patch: keylime-fix-db-connection-leaks.patch
# Backported from https://github.com/keylime/keylime/pull/1791
Patch: 0002-mb-support-EV_EFI_HANDOFF_TABLES-events-on-PCR1.patch
Patch: 0003-mb-support-vendor_db-as-logged-by-newer-shim-version.patch
# Backported from https://github.com/keylime/keylime/pull/1784
# and https://github.com/keylime/keylime/pull/1785
Patch: 0004-verifier-Gracefully-shutdown-on-signal.patch
Patch: 0005-revocations-Try-to-send-notifications-on-shutdown.patch
Patch: 0006-requests_client-close-the-session-at-the-end-of-the-.patch
# Backported from https://github.com/keylime/keylime/pull/1736,
# https://github.com/keylime/keylime/commit/11c6b7f and
# https://github.com/keylime/keylime/commit/dd63459
Patch: 0007-tests-change-test_mba_parsing-to-not-need-keylime-in.patch
Patch: 0008-tests-skip-measured-boot-related-tests-for-s390x-and.patch
Patch: 0009-tests-fix-rpm-repo-tests-from-create-runtime-policy.patch
# Backported from https://github.com/keylime/keylime/pull/1793
Patch: 0010-mba-normalize-vendor_db-in-EV_EFI_VARIABLE_AUTHORITY.patch
# Backported from https://github.com/keylime/keylime/pull/1794
Patch: 0011-fix-malformed-certs-workaround.patch
# Backported from https://github.com/keylime/keylime/pull/1795
Patch: 0012-keylime-policy-avoid-opening-dev-stdout.patch
# Backported from:
# - https://github.com/keylime/keylime/pull/1746
# - https://github.com/keylime/keylime/pull/1803
# - https://github.com/keylime/keylime/pull/1808
# ECC attestation support.
Patch: 0013-algorithms-add-support-for-specific-ECC-curve-algori.patch
Patch: 0014-algorithms-add-support-for-specific-RSA-algorithms.patch
Patch: 0015-tpm_util-fix-quote-signature-extraction-for-ECDSA.patch
Patch: 0016-tpm-fix-ECC-P-521-coordinate-validation.patch
Patch: 0017-tpm-fix-ECC-P-521-credential-activation-with-consist.patch
Patch: 0018-tpm-fix-ECC-signature-parsing-to-support-variable-le.patch
# Backported from:
# - https://github.com/keylime/keylime/pull/1802
# - https://github.com/keylime/keylime/pull/1813
Patch: 0019-Added-manpages-for-keylime-services-and-the-tenant.patch
Patch: 0001-Fix-timestamp-conversion-to-use-UTC-timezone.patch
Patch: 0002-Fix-efivar-availability-check-in-test_create_mb_poli.patch
# Main program: Apache-2.0
# Icons: MIT
@ -76,9 +33,11 @@ BuildRequires: python3-devel
BuildRequires: python3-dbus
BuildRequires: python3-jinja2
BuildRequires: python3-cryptography
BuildRequires: python3-docutils
BuildRequires: python3-gpg
BuildRequires: python3-pyasn1
BuildRequires: python3-pyasn1-modules
BuildRequires: python3-requests
BuildRequires: python3-tornado
BuildRequires: python3-sqlalchemy
BuildRequires: python3-lark
@ -90,7 +49,6 @@ BuildRequires: systemd-rpm-macros
BuildRequires: rpm-sign
BuildRequires: createrepo_c
BuildRequires: tpm2-tools
BuildRequires: python3-docutils
Requires: python3-%{srcname} = %{version}-%{release}
Requires: %{srcname}-base = %{version}-%{release}
@ -261,7 +219,7 @@ bzip2 -9 %{srcname}.pp
mkdir -p manpages
rst2man --syntax-highlight=none docs/man/keylime_tenant.1.rst manpages/keylime_tenant.1
rst2man --syntax-highlight=none docs/man/keylime-policy.1.rst manpages/keylime-policy.1
rst2man --syntax-highlight=none docs/man/keylime-policy.1.rst manpages/keylime-policy.1
rst2man --syntax-highlight=none docs/man/keylime_registrar.8.rst manpages/keylime_registrar.8
rst2man --syntax-highlight=none docs/man/keylime_verifier.8.rst manpages/keylime_verifier.8
@ -286,8 +244,10 @@ done
# Ship the ek-openssl-verify script.
mkdir -p %{buildroot}/%{_datadir}/%{srcname}/scripts
install -Dpm 755 scripts/ek-openssl-verify \
%{buildroot}/%{_datadir}/%{srcname}/scripts/ek-openssl-verify
for s in ek-openssl-verify keylime_oneshot_attestation; do
install -Dpm 755 scripts/"${s}" \
%{buildroot}/%{_datadir}/%{srcname}/scripts/"${s}"
done
# Ship configuration templates.
cp -r ./templates %{buildroot}%{_datadir}/%{srcname}/templates/
@ -493,6 +453,7 @@ fi
%{_tmpfilesdir}/%{srcname}.conf
%{_sysusersdir}/%{srcname}.conf
%{_datadir}/%{srcname}/scripts/ek-openssl-verify
%{_datadir}/%{srcname}/scripts/keylime_oneshot_attestation
%{_datadir}/%{srcname}/templates
%{_bindir}/keylime_upgrade_config

View File

@ -1,2 +1,2 @@
SHA512 (v7.12.1.tar.gz) = c1297ebfc659102d73283255cfda4a977dfbff9bdd3748e05de405dadb70f752ad39aa5848edda9143d8ec620d07c21f1551fa4a914c99397620ab1682e58458
SHA512 (keylime-selinux-42.1.2.tar.gz) = cb7b7b10d1d81af628a7ffdadc1be5af6d75851a44f58cff04edc575cbba1613447e56bfa1fb86660ec7c15e5fcf16ba51f2984094550ba3e08f8095b800b741
SHA512 (v7.14.1.tar.gz) = d94cd1e25ec31e43fea05d0c404dd25c05b6b28435db2f8ca34546f6ff8bfd5da12d2dcd3b5cf4772c44688ae8968468dc2470da23596714e7615dbf6dfbe841
SHA512 (keylime-selinux-43.1.1.tar.gz) = 1b0a850f68321e4872bb01eb99f5b000f1b5cbe3f1882e781bff519868ba5f4ca50f25b328b3662895969833add5c30d00e2a2361d2d626e7cffd95c0243ec39