import python3-3.6.8-45.el8

This commit is contained in:
CentOS Sources 2022-05-10 03:09:40 -04:00 committed by Stepan Oksanichenko
parent 3a450dae9d
commit a62df8a19c
6 changed files with 638 additions and 1 deletions

View File

@ -0,0 +1,40 @@
diff --git a/Lib/threading.py b/Lib/threading.py
index 7ab9ad8..dcedd3b 100644
--- a/Lib/threading.py
+++ b/Lib/threading.py
@@ -3,7 +3,7 @@
import sys as _sys
import _thread
-from time import monotonic as _time
+from time import monotonic as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
from itertools import islice as _islice, count as _count
@@ -296,7 +296,25 @@ class Condition:
gotit = True
else:
if timeout > 0:
- gotit = waiter.acquire(True, timeout)
+ # rhbz#2003758: Avoid waiter.acquire(True, timeout) since
+ # it uses the system clock internally.
+ #
+ # Balancing act: We can't afford a pure busy loop, so we
+ # have to sleep; but if we sleep the whole timeout time,
+ # we'll be unresponsive. The scheme here sleeps very
+ # little at first, longer as time goes on, but never longer
+ # than 20 times per second (or the timeout time remaining).
+ endtime = _time() + timeout
+ delay = 0.0005 # 500 us -> initial delay of 1 ms
+ while True:
+ gotit = waiter.acquire(0)
+ if gotit:
+ break
+ remaining = min(endtime - _time(), timeout)
+ if remaining <= 0:
+ break
+ delay = min(delay * 2, remaining, .05)
+ _sleep(delay)
else:
gotit = waiter.acquire(False)
return gotit

View File

@ -0,0 +1,119 @@
From f7fb35b563a9182c22fbdd03c72ec3724dafe918 Mon Sep 17 00:00:00 2001
From: Gen Xu <xgbarry@gmail.com>
Date: Wed, 5 May 2021 15:42:41 -0700
Subject: [PATCH] bpo-44022: Fix http client infinite line reading (DoS) after
a HTTP 100 Continue (GH-25916)
Fixes http.client potential denial of service where it could get stuck reading lines from a malicious server after a 100 Continue response.
Co-authored-by: Gregory P. Smith <greg@krypto.org>
(cherry picked from commit 47895e31b6f626bc6ce47d175fe9d43c1098909d)
Co-authored-by: Gen Xu <xgbarry@gmail.com>
---
Lib/http/client.py | 38 ++++++++++---------
Lib/test/test_httplib.py | 10 ++++-
.../2021-05-05-17-37-04.bpo-44022.bS3XJ9.rst | 2 +
3 files changed, 32 insertions(+), 18 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2021-05-05-17-37-04.bpo-44022.bS3XJ9.rst
diff --git a/Lib/http/client.py b/Lib/http/client.py
index 53581eca20587..07e675fac5981 100644
--- a/Lib/http/client.py
+++ b/Lib/http/client.py
@@ -205,15 +205,11 @@ def getallmatchingheaders(self, name):
lst.append(line)
return lst
-def parse_headers(fp, _class=HTTPMessage):
- """Parses only RFC2822 headers from a file pointer.
-
- email Parser wants to see strings rather than bytes.
- But a TextIOWrapper around self.rfile would buffer too many bytes
- from the stream, bytes which we later need to read as bytes.
- So we read the correct bytes here, as bytes, for email Parser
- to parse.
+def _read_headers(fp):
+ """Reads potential header lines into a list from a file pointer.
+ Length of line is limited by _MAXLINE, and number of
+ headers is limited by _MAXHEADERS.
"""
headers = []
while True:
@@ -225,6 +221,19 @@ def parse_headers(fp, _class=HTTPMessage):
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
+ return headers
+
+def parse_headers(fp, _class=HTTPMessage):
+ """Parses only RFC2822 headers from a file pointer.
+
+ email Parser wants to see strings rather than bytes.
+ But a TextIOWrapper around self.rfile would buffer too many bytes
+ from the stream, bytes which we later need to read as bytes.
+ So we read the correct bytes here, as bytes, for email Parser
+ to parse.
+
+ """
+ headers = _read_headers(fp)
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
@@ -312,15 +321,10 @@ def begin(self):
if status != CONTINUE:
break
# skip the header from the 100 response
- while True:
- skip = self.fp.readline(_MAXLINE + 1)
- if len(skip) > _MAXLINE:
- raise LineTooLong("header line")
- skip = skip.strip()
- if not skip:
- break
- if self.debuglevel > 0:
- print("header:", skip)
+ skipped_headers = _read_headers(self.fp)
+ if self.debuglevel > 0:
+ print("headers:", skipped_headers)
+ del skipped_headers
self.code = self.status = status
self.reason = reason.strip()
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index 03e049b13fd21..0db287507c7bf 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -971,6 +971,14 @@ def test_overflowing_header_line(self):
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
+ def test_overflowing_header_limit_after_100(self):
+ body = (
+ 'HTTP/1.1 100 OK\r\n'
+ 'r\n' * 32768
+ )
+ resp = client.HTTPResponse(FakeSocket(body))
+ self.assertRaises(client.HTTPException, resp.begin)
+
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
@@ -1377,7 +1385,7 @@ def readline(self, limit):
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
- expected = {"responses"} # White-list documented dict() object
+ expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
diff --git a/Misc/NEWS.d/next/Security/2021-05-05-17-37-04.bpo-44022.bS3XJ9.rst b/Misc/NEWS.d/next/Security/2021-05-05-17-37-04.bpo-44022.bS3XJ9.rst
new file mode 100644
index 0000000000000..cf6b63e396155
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2021-05-05-17-37-04.bpo-44022.bS3XJ9.rst
@@ -0,0 +1,2 @@
+mod:`http.client` now avoids infinitely reading potential HTTP headers after a
+``100 Continue`` status response from the server.

View File

@ -0,0 +1,74 @@
diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py
index 11ebcf1..ee3d960 100644
--- a/Lib/logging/handlers.py
+++ b/Lib/logging/handlers.py
@@ -181,14 +181,17 @@ class RotatingFileHandler(BaseRotatingHandler):
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
+ # See bpo-45401: Never rollover anything other than regular files
+ if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
+ return False
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
- return 1
- return 0
+ return True
+ return False
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
@@ -335,10 +338,13 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
+ # See bpo-45401: Never rollover anything other than regular files
+ if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
+ return False
t = int(time.time())
if t >= self.rolloverAt:
- return 1
- return 0
+ return True
+ return False
def getFilesToDelete(self):
"""
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index 45b72e3..055b8e3 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -4219,6 +4219,13 @@ class RotatingFileHandlerTest(BaseFileTest):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
+ # bpo-45401 - test with special file
+ # We set maxBytes to 1 so that rollover would normally happen, except
+ # for the check for regular files
+ rh = logging.handlers.RotatingFileHandler(
+ os.devnull, encoding="utf-8", maxBytes=1)
+ self.assertFalse(rh.shouldRollover(self.next_rec()))
+ rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
@@ -4294,6 +4301,15 @@ class RotatingFileHandlerTest(BaseFileTest):
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
+ def test_should_not_rollover(self):
+ # See bpo-45401. Should only ever rollover regular files
+ fh = logging.handlers.TimedRotatingFileHandler(
+ os.devnull, 'S', encoding="utf-8", backupCount=1)
+ time.sleep(1.1) # a little over a second ...
+ r = logging.makeLogRecord({'msg': 'testing - device file'})
+ self.assertFalse(fh.shouldRollover(r))
+ fh.close()
+
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',

View File

@ -0,0 +1,267 @@
diff --git a/Makefile.pre.in b/Makefile.pre.in
index 8da1965..9864fe2 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -884,7 +884,8 @@ regen-opcode-targets:
$(srcdir)/Python/opcode_targets.h.new
$(UPDATE_FILE) $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/opcode_targets.h.new
-Python/ceval.o: $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/ceval_gil.h
+Python/ceval.o: $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/ceval_gil.h \
+ $(srcdir)/Python/condvar.h
Python/frozen.o: $(srcdir)/Python/importlib.h $(srcdir)/Python/importlib_external.h
@@ -1706,7 +1707,7 @@ patchcheck: @DEF_MAKE_RULE@
# Dependencies
-Python/thread.o: @THREADHEADERS@
+Python/thread.o: @THREADHEADERS@ $(srcdir)/Python/condvar.h
# Declare targets that aren't real files
.PHONY: all build_all sharedmods check-clean-src oldsharedmods test quicktest
diff --git a/Python/ceval.c b/Python/ceval.c
index 0b30cc1..3f1300c 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -232,6 +232,7 @@ PyEval_InitThreads(void)
{
if (gil_created())
return;
+ PyThread_init_thread();
create_gil();
take_gil(PyThreadState_GET());
main_thread = PyThread_get_thread_ident();
diff --git a/Python/condvar.h b/Python/condvar.h
index 9a71b17..39a420f 100644
--- a/Python/condvar.h
+++ b/Python/condvar.h
@@ -59,20 +59,6 @@
#include <pthread.h>
-#define PyCOND_ADD_MICROSECONDS(tv, interval) \
-do { /* TODO: add overflow and truncation checks */ \
- tv.tv_usec += (long) interval; \
- tv.tv_sec += tv.tv_usec / 1000000; \
- tv.tv_usec %= 1000000; \
-} while (0)
-
-/* We assume all modern POSIX systems have gettimeofday() */
-#ifdef GETTIMEOFDAY_NO_TZ
-#define PyCOND_GETTIMEOFDAY(ptv) gettimeofday(ptv)
-#else
-#define PyCOND_GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL)
-#endif
-
/* The following functions return 0 on success, nonzero on error */
#define PyMUTEX_T pthread_mutex_t
#define PyMUTEX_INIT(mut) pthread_mutex_init((mut), NULL)
@@ -81,32 +67,30 @@ do { /* TODO: add overflow and truncation checks */ \
#define PyMUTEX_UNLOCK(mut) pthread_mutex_unlock(mut)
#define PyCOND_T pthread_cond_t
-#define PyCOND_INIT(cond) pthread_cond_init((cond), NULL)
+#define PyCOND_INIT(cond) _PyThread_cond_init(cond)
#define PyCOND_FINI(cond) pthread_cond_destroy(cond)
#define PyCOND_SIGNAL(cond) pthread_cond_signal(cond)
#define PyCOND_BROADCAST(cond) pthread_cond_broadcast(cond)
#define PyCOND_WAIT(cond, mut) pthread_cond_wait((cond), (mut))
+/* These private functions are implemented in Python/thread_pthread.h */
+int _PyThread_cond_init(PyCOND_T *cond);
+void _PyThread_cond_after(long long us, struct timespec *abs);
+
/* return 0 for success, 1 on timeout, -1 on error */
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
{
- int r;
- struct timespec ts;
- struct timeval deadline;
-
- PyCOND_GETTIMEOFDAY(&deadline);
- PyCOND_ADD_MICROSECONDS(deadline, us);
- ts.tv_sec = deadline.tv_sec;
- ts.tv_nsec = deadline.tv_usec * 1000;
-
- r = pthread_cond_timedwait((cond), (mut), &ts);
- if (r == ETIMEDOUT)
+ struct timespec abs;
+ _PyThread_cond_after(us, &abs);
+ int ret = pthread_cond_timedwait(cond, mut, &abs);
+ if (ret == ETIMEDOUT) {
return 1;
- else if (r)
+ }
+ if (ret) {
return -1;
- else
- return 0;
+ }
+ return 0;
}
#elif defined(NT_THREADS)
diff --git a/Python/thread.c b/Python/thread.c
index 63eeb1e..c5d0e59 100644
--- a/Python/thread.c
+++ b/Python/thread.c
@@ -6,6 +6,7 @@
Stuff shared by all thread_*.h files is collected here. */
#include "Python.h"
+#include "condvar.h"
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h
index baea71f..7dc295e 100644
--- a/Python/thread_pthread.h
+++ b/Python/thread_pthread.h
@@ -66,16 +66,6 @@
#endif
#endif
-#if !defined(pthread_attr_default)
-# define pthread_attr_default ((pthread_attr_t *)NULL)
-#endif
-#if !defined(pthread_mutexattr_default)
-# define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
-#endif
-#if !defined(pthread_condattr_default)
-# define pthread_condattr_default ((pthread_condattr_t *)NULL)
-#endif
-
/* Whether or not to use semaphores directly rather than emulating them with
* mutexes and condition variables:
@@ -120,6 +110,56 @@ do { \
} while(0)
+/*
+ * pthread_cond support
+ */
+
+#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+// monotonic is supported statically. It doesn't mean it works on runtime.
+#define CONDATTR_MONOTONIC
+#endif
+
+// NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported.
+static pthread_condattr_t *condattr_monotonic = NULL;
+
+static void
+init_condattr()
+{
+#ifdef CONDATTR_MONOTONIC
+ static pthread_condattr_t ca;
+ pthread_condattr_init(&ca);
+ if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
+ condattr_monotonic = &ca; // Use monotonic clock
+ }
+#endif
+}
+
+int
+_PyThread_cond_init(PyCOND_T *cond)
+{
+ return pthread_cond_init(cond, condattr_monotonic);
+}
+
+void
+_PyThread_cond_after(long long us, struct timespec *abs)
+{
+#ifdef CONDATTR_MONOTONIC
+ if (condattr_monotonic) {
+ clock_gettime(CLOCK_MONOTONIC, abs);
+ abs->tv_sec += us / 1000000;
+ abs->tv_nsec += (us % 1000000) * 1000;
+ abs->tv_sec += abs->tv_nsec / 1000000000;
+ abs->tv_nsec %= 1000000000;
+ return;
+ }
+#endif
+
+ struct timespec ts;
+ MICROSECONDS_TO_TIMESPEC(us, ts);
+ *abs = ts;
+}
+
+
/* A pthread mutex isn't sufficient to model the Python lock type
* because, according to Draft 5 of the docs (P1003.4a/D5), both of the
* following are undefined:
@@ -175,6 +215,7 @@ PyThread__init_thread(void)
extern void pthread_init(void);
pthread_init();
#endif
+ init_condattr();
}
#endif /* !_HAVE_BSDI */
@@ -449,8 +490,7 @@ PyThread_allocate_lock(void)
memset((void *)lock, '\0', sizeof(pthread_lock));
lock->locked = 0;
- status = pthread_mutex_init(&lock->mut,
- pthread_mutexattr_default);
+ status = pthread_mutex_init(&lock->mut, NULL);
CHECK_STATUS_PTHREAD("pthread_mutex_init");
/* Mark the pthread mutex underlying a Python mutex as
pure happens-before. We can't simply mark the
@@ -459,8 +499,7 @@ PyThread_allocate_lock(void)
will cause errors. */
_Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
- status = pthread_cond_init(&lock->lock_released,
- pthread_condattr_default);
+ status = _PyThread_cond_init(&lock->lock_released);
CHECK_STATUS_PTHREAD("pthread_cond_init");
if (error) {
@@ -519,9 +558,10 @@ PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
success = PY_LOCK_ACQUIRED;
}
else if (microseconds != 0) {
- struct timespec ts;
- if (microseconds > 0)
- MICROSECONDS_TO_TIMESPEC(microseconds, ts);
+ struct timespec abs;
+ if (microseconds > 0) {
+ _PyThread_cond_after(microseconds, &abs);
+ }
/* continue trying until we get the lock */
/* mut must be locked by me -- part of the condition
@@ -530,10 +570,13 @@ PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
if (microseconds > 0) {
status = pthread_cond_timedwait(
&thelock->lock_released,
- &thelock->mut, &ts);
+ &thelock->mut, &abs);
+ if (status == 1) {
+ break;
+ }
if (status == ETIMEDOUT)
break;
- CHECK_STATUS_PTHREAD("pthread_cond_timed_wait");
+ CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
}
else {
status = pthread_cond_wait(
diff --git a/configure.ac b/configure.ac
index a0e3613..8a17559 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3582,7 +3582,7 @@ AC_CHECK_FUNCS(alarm accept4 setitimer getitimer bind_textdomain_codeset chown \
memrchr mbrtowc mkdirat mkfifo \
mkfifoat mknod mknodat mktime mremap nice openat pathconf pause pipe2 plock poll \
posix_fallocate posix_fadvise pread \
- pthread_init pthread_kill putenv pwrite readlink readlinkat readv realpath renameat \
+ pthread_condattr_setclock pthread_init pthread_kill putenv pwrite readlink readlinkat readv realpath renameat \
select sem_open sem_timedwait sem_getvalue sem_unlink sendfile setegid seteuid \
setgid sethostname \
setlocale setregid setreuid setresuid setresgid setsid setpgid setpgrp setpriority setuid setvbuf \

View File

@ -0,0 +1,80 @@
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
index 2ff251a..385e432 100644
--- a/Lib/ftplib.py
+++ b/Lib/ftplib.py
@@ -104,6 +104,8 @@ class FTP:
welcome = None
passiveserver = 1
encoding = "latin-1"
+ # Disables https://bugs.python.org/issue43285 security if set to True.
+ trust_server_pasv_ipv4_address = False
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
@@ -333,8 +335,13 @@ class FTP:
return sock
def makepasv(self):
+ """Internal: Does the PASV or EPSV handshake -> (address, port)"""
if self.af == socket.AF_INET:
- host, port = parse227(self.sendcmd('PASV'))
+ untrusted_host, port = parse227(self.sendcmd('PASV'))
+ if self.trust_server_pasv_ipv4_address:
+ host = untrusted_host
+ else:
+ host = self.sock.getpeername()[0]
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py
index 4ff2f71..3ca7cc1 100644
--- a/Lib/test/test_ftplib.py
+++ b/Lib/test/test_ftplib.py
@@ -94,6 +94,10 @@ class DummyFTPHandler(asynchat.async_chat):
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
+ # We use this as the string IPv4 address to direct the client
+ # to in response to a PASV command. To test security behavior.
+ # https://bugs.python.org/issue43285/.
+ self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
@@ -136,7 +140,8 @@ class DummyFTPHandler(asynchat.async_chat):
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
- ip, port = sock.getsockname()[:2]
+ port = sock.getsockname()[1]
+ ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
@@ -694,6 +699,26 @@ class TestFTPClass(TestCase):
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
+ def test_makepasv_issue43285_security_disabled(self):
+ """Test the opt-in to the old vulnerable behavior."""
+ self.client.trust_server_pasv_ipv4_address = True
+ bad_host, port = self.client.makepasv()
+ self.assertEqual(
+ bad_host, self.server.handler_instance.fake_pasv_server_ip)
+ # Opening and closing a connection keeps the dummy server happy
+ # instead of timing out on accept.
+ socket.create_connection((self.client.sock.getpeername()[0], port),
+ timeout=TIMEOUT).close()
+
+ def test_makepasv_issue43285_security_enabled_default(self):
+ self.assertFalse(self.client.trust_server_pasv_ipv4_address)
+ trusted_host, port = self.client.makepasv()
+ self.assertNotEqual(
+ trusted_host, self.server.handler_instance.fake_pasv_server_ip)
+ # Opening and closing a connection keeps the dummy server happy
+ # instead of timing out on accept.
+ socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
+
def test_with_statement(self):
self.client.quit()

View File

@ -14,7 +14,7 @@ URL: https://www.python.org/
# WARNING When rebasing to a new Python version,
# remember to update the python3-docs package as well
Version: %{pybasever}.8
Release: 41%{?dist}
Release: 45%{?dist}
License: Python
@ -344,6 +344,16 @@ Patch189: 00189-use-rpm-wheels.patch
# Fedora Change: https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe
Patch251: 00251-change-user-install-location.patch
# 00257 #
# Use the monotonic clock for threading.Condition.wait() as to not be affected by
# the system clock changes.
# This patch works around the issue.
# Implemented by backporting the respective python2 code:
# https://github.com/python/cpython/blob/v2.7.18/Lib/threading.py#L331
# along with our downstream patch for python2 fixing the same issue.
# Downstream only.
Patch257: 00257-threading-condition-wait.patch
# 00262 #
# Backport of PEP 538: Coercing the legacy C locale to a UTF-8 based locale
# https://www.python.org/dev/peps/pep-0538/
@ -616,6 +626,31 @@ Patch364: 00364-thread-exit.patch
# Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=1995234
Patch366: 00366-CVE-2021-3733.patch
# 00368 #
# CVE-2021-3737: client can enter an infinite loop on a 100 Continue response from the server
# Upstream: https://bugs.python.org/issue44022
# Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=1995162
Patch368: 00368-CVE-2021-3737.patch
# 00369 #
# Change shouldRollover() methods of logging.handlers to only rollover regular files and not devices
# Upstream: https://bugs.python.org/issue45401
# Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2009200
Patch369: 00369-rollover-only-regular-files-in-logging-handlers.patch
# 00370 #
# Utilize the monotonic clock for the global interpreter lock instead of the real-time clock
# to avoid issues when the system time changes
# Upstream: https://bugs.python.org/issue12822
# Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2003758
Patch370: 00370-GIL-monotonic-clock.patch
# 00372 #
# CVE-2021-4189: ftplib should not use the host from the PASV response
# Upstream: https://bugs.python.org/issue43285
# Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=2036020
Patch372: 00372-CVE-2021-4189.patch
# (New patches go here ^^^)
#
# When adding new patches to "python" and "python3" in Fedora, EL, etc.,
@ -916,6 +951,7 @@ rm Lib/ensurepip/_bundled/*.whl
%endif
%patch251 -p1
%patch257 -p1
%patch262 -p1
%patch294 -p1
%patch316 -p1
@ -950,6 +986,10 @@ git apply %{PATCH351}
%patch362 -p1
%patch364 -p1
%patch366 -p1
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch372 -p1
# Remove files that should be generated by the build
# (This is after patching, so that we can use patches directly from upstream)
@ -1875,6 +1915,23 @@ fi
# ======================================================
%changelog
* Fri Jan 07 2022 Charalampos Stratakis <cstratak@redhat.com> - 3.6.8-45
- Security fix for CVE-2021-4189: ftplib should not use the host from the PASV response
Resolves: rhbz#2036020
* Tue Oct 12 2021 Charalampos Stratakis <cstratak@redhat.com> - 3.6.8-44
- Use the monotonic clock for theading.Condition
- Use the monotonic clock for the global interpreter lock
Resolves: rhbz#2003758
* Mon Oct 11 2021 Charalampos Stratakis <cstratak@redhat.com> - 3.6.8-43
- Change shouldRollover() methods of logging.handlers to only rollover regular files
Resolves: rhbz#2009200
* Fri Sep 17 2021 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-42
- Security fix for CVE-2021-3737
Resolves: rhbz#1995162
* Thu Sep 09 2021 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-41
- Security fix for CVE-2021-3733: Denial of service when identifying crafted invalid RFCs
Resolves: rhbz#1995234