Compare commits

...

No commits in common. "c8" and "stream-python27-2.7-rhel-8.10.0" have entirely different histories.

84 changed files with 5953 additions and 1263 deletions

2
.gitignore vendored
View File

@ -1 +1 @@
SOURCES/Python-2.7.17-noexe.tar.xz
/*.tar.*

View File

@ -1 +0,0 @@
e63124a9a86b4b52c09384915a0842adf00b9d45 SOURCES/Python-2.7.17-noexe.tar.xz

844
00146-hashlib-fips.patch Normal file
View File

@ -0,0 +1,844 @@
From ece76465680b0df5b3fce7bf8ff1ff0253933889 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <pviktori@redhat.com>
Date: Mon, 2 Sep 2019 17:33:29 +0200
Subject: [PATCH 01/11] Remove HASH_OBJ_CONSTRUCTOR
See https://github.com/python/cpython/commit/c7e219132aff1e21cb9ccb0a9b570dc6c750039b
---
Modules/_hashopenssl.c | 59 ------------------------------------------
1 file changed, 59 deletions(-)
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index 78445ebabdd3..cb81e9765251 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -48,10 +48,6 @@
* to allow the user to optimize based on the platform they're using. */
#define HASHLIB_GIL_MINSIZE 2048
-#ifndef HASH_OBJ_CONSTRUCTOR
-#define HASH_OBJ_CONSTRUCTOR 0
-#endif
-
#if defined(OPENSSL_VERSION_NUMBER) && (OPENSSL_VERSION_NUMBER >= 0x00908000)
#define _OPENSSL_SUPPORTS_SHA2
#endif
@@ -384,53 +380,6 @@ EVP_repr(PyObject *self)
return PyString_FromString(buf);
}
-#if HASH_OBJ_CONSTRUCTOR
-static int
-EVP_tp_init(EVPobject *self, PyObject *args, PyObject *kwds)
-{
- static char *kwlist[] = {"name", "string", NULL};
- PyObject *name_obj = NULL;
- Py_buffer view = { 0 };
- char *nameStr;
- const EVP_MD *digest;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|s*:HASH", kwlist,
- &name_obj, &view)) {
- return -1;
- }
-
- if (!PyArg_Parse(name_obj, "s", &nameStr)) {
- PyErr_SetString(PyExc_TypeError, "name must be a string");
- PyBuffer_Release(&view);
- return -1;
- }
-
- digest = EVP_get_digestbyname(nameStr);
- if (!digest) {
- PyErr_SetString(PyExc_ValueError, "unknown hash function");
- PyBuffer_Release(&view);
- return -1;
- }
- EVP_DigestInit(self->ctx, digest);
-
- self->name = name_obj;
- Py_INCREF(self->name);
-
- if (view.obj) {
- if (view.len >= HASHLIB_GIL_MINSIZE) {
- Py_BEGIN_ALLOW_THREADS
- EVP_hash(self, view.buf, view.len);
- Py_END_ALLOW_THREADS
- } else {
- EVP_hash(self, view.buf, view.len);
- }
- PyBuffer_Release(&view);
- }
-
- return 0;
-}
-#endif
-
PyDoc_STRVAR(hashtype_doc,
"A hash represents the object used to calculate a checksum of a\n\
@@ -487,9 +436,6 @@ static PyTypeObject EVPtype = {
0, /* tp_descr_set */
0, /* tp_dictoffset */
#endif
-#if HASH_OBJ_CONSTRUCTOR
- (initproc)EVP_tp_init, /* tp_init */
-#endif
};
static PyObject *
@@ -928,11 +874,6 @@ init_hashlib(void)
return;
}
-#if HASH_OBJ_CONSTRUCTOR
- Py_INCREF(&EVPtype);
- PyModule_AddObject(m, "HASH", (PyObject *)&EVPtype);
-#endif
-
/* these constants are used by the convenience constructors */
INIT_CONSTRUCTOR_CONSTANTS(md5);
INIT_CONSTRUCTOR_CONSTANTS(sha1);
From d7339af75678c760f6d6c0eb455b0eb889c22574 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <pviktori@redhat.com>
Date: Mon, 2 Sep 2019 18:02:25 +0200
Subject: [PATCH 02/11] Add the usedforsecurity argument to _hashopenssl
---
Modules/_hashopenssl.c | 63 ++++++++++++++++++++++++++++++++----------
1 file changed, 48 insertions(+), 15 deletions(-)
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index cb81e9765251..f2dbc095cc66 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -441,7 +441,7 @@ static PyTypeObject EVPtype = {
static PyObject *
EVPnew(PyObject *name_obj,
const EVP_MD *digest, const EVP_MD_CTX *initial_ctx,
- const unsigned char *cp, Py_ssize_t len)
+ const unsigned char *cp, Py_ssize_t len, int usedforsecurity)
{
EVPobject *self;
@@ -456,7 +456,23 @@ EVPnew(PyObject *name_obj,
if (initial_ctx) {
EVP_MD_CTX_copy(self->ctx, initial_ctx);
} else {
- EVP_DigestInit(self->ctx, digest);
+ EVP_MD_CTX_init(self->ctx);
+
+ /*
+ If the user has declared that this digest is being used in a
+ non-security role (e.g. indexing into a data structure), set
+ the exception flag for openssl to allow it
+ */
+ if (!usedforsecurity) {
+#ifdef EVP_MD_CTX_FLAG_NON_FIPS_ALLOW
+ EVP_MD_CTX_set_flags(self->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+#endif
+ }
+ if (!EVP_DigestInit_ex(self->ctx, digest, NULL)) {
+ _setException(PyExc_ValueError);
+ Py_DECREF(self);
+ return NULL;
+ }
}
if (cp && len) {
@@ -485,15 +501,16 @@ The MD5 and SHA1 algorithms are always supported.\n");
static PyObject *
EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
{
- static char *kwlist[] = {"name", "string", NULL};
+ static char *kwlist[] = {"name", "string", "usedforsecurity", NULL};
PyObject *name_obj = NULL;
Py_buffer view = { 0 };
PyObject *ret_obj;
char *name;
const EVP_MD *digest;
+ int usedforsecurity = 1;
- if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O|s*:new", kwlist,
- &name_obj, &view)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O|s*i:new", kwlist,
+ &name_obj, &view, &usedforsecurity)) {
return NULL;
}
@@ -506,7 +523,7 @@ EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
digest = EVP_get_digestbyname(name);
ret_obj = EVPnew(name_obj, digest, NULL, (unsigned char*)view.buf,
- view.len);
+ view.len, usedforsecurity);
PyBuffer_Release(&view);
return ret_obj;
@@ -771,30 +788,46 @@ generate_hash_name_list(void)
* the generic one passing it a python string and are noticeably
* faster than calling a python new() wrapper. Thats important for
* code that wants to make hashes of a bunch of small strings.
+ *
+ * For usedforsecurity=False, the optimization is not used.
*/
#define GEN_CONSTRUCTOR(NAME) \
static PyObject * \
- EVP_new_ ## NAME (PyObject *self, PyObject *args) \
+ EVP_new_ ## NAME (PyObject *self, PyObject *args, PyObject *kwdict) \
{ \
+ static char *kwlist[] = {"string", "usedforsecurity", NULL}; \
Py_buffer view = { 0 }; \
PyObject *ret_obj; \
+ int usedforsecurity=1; \
\
- if (!PyArg_ParseTuple(args, "|s*:" #NAME , &view)) { \
+ if (!PyArg_ParseTupleAndKeywords( \
+ args, kwdict, "|s*i:" #NAME, kwlist, \
+ &view, &usedforsecurity \
+ )) { \
return NULL; \
} \
- \
- ret_obj = EVPnew( \
- CONST_ ## NAME ## _name_obj, \
- NULL, \
- CONST_new_ ## NAME ## _ctx_p, \
- (unsigned char*)view.buf, view.len); \
+ if (usedforsecurity == 0) { \
+ ret_obj = EVPnew( \
+ CONST_ ## NAME ## _name_obj, \
+ EVP_get_digestbyname(#NAME), \
+ NULL, \
+ (unsigned char*)view.buf, view.len, \
+ usedforsecurity); \
+ } else { \
+ ret_obj = EVPnew( \
+ CONST_ ## NAME ## _name_obj, \
+ NULL, \
+ CONST_new_ ## NAME ## _ctx_p, \
+ (unsigned char*)view.buf, view.len, \
+ usedforsecurity); \
+ } \
PyBuffer_Release(&view); \
return ret_obj; \
}
/* a PyMethodDef structure for the constructor */
#define CONSTRUCTOR_METH_DEF(NAME) \
- {"openssl_" #NAME, (PyCFunction)EVP_new_ ## NAME, METH_VARARGS, \
+ {"openssl_" #NAME, (PyCFunction)EVP_new_ ## NAME, METH_VARARGS|METH_KEYWORDS, \
PyDoc_STR("Returns a " #NAME \
" hash object; optionally initialized with a string") \
}
From c8102e61fb3ade364d4bb7f2fe3f3452e2018ecd Mon Sep 17 00:00:00 2001
From: David Malcolm <dmalcolm@redhat.com>
Date: Mon, 2 Sep 2019 17:59:53 +0200
Subject: [PATCH 03/11] hashlib.py: Avoid the builtin constructor
---
Lib/hashlib.py | 58 +++++++++++++-------------------------------------
1 file changed, 15 insertions(+), 43 deletions(-)
diff --git a/Lib/hashlib.py b/Lib/hashlib.py
index bbd06b9996ee..404ed6891fb9 100644
--- a/Lib/hashlib.py
+++ b/Lib/hashlib.py
@@ -69,65 +69,37 @@
'pbkdf2_hmac')
-def __get_builtin_constructor(name):
- try:
- if name in ('SHA1', 'sha1'):
- import _sha
- return _sha.new
- elif name in ('MD5', 'md5'):
- import _md5
- return _md5.new
- elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
- import _sha256
- bs = name[3:]
- if bs == '256':
- return _sha256.sha256
- elif bs == '224':
- return _sha256.sha224
- elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
- import _sha512
- bs = name[3:]
- if bs == '512':
- return _sha512.sha512
- elif bs == '384':
- return _sha512.sha384
- except ImportError:
- pass # no extension module, this hash is unsupported.
-
- raise ValueError('unsupported hash type ' + name)
-
-
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
- f()
+ #
+ # We pass "usedforsecurity=False" to disable FIPS-based restrictions:
+ # at this stage we're merely seeing if the function is callable,
+ # rather than using it for actual work.
+ f(usedforsecurity=False)
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
- return __get_builtin_constructor(name)
-
-
-def __py_new(name, string=''):
- """new(name, string='') - Return a new hashing object using the named algorithm;
- optionally initialized with a string.
- """
- return __get_builtin_constructor(name)(string)
+ raise
-def __hash_new(name, string=''):
- """new(name, string='') - Return a new hashing object using the named algorithm;
- optionally initialized with a string.
+def __hash_new(name, string='', usedforsecurity=True):
+ """new(name, string='', usedforsecurity=True) - Return a new hashing object
+ using the named algorithm; optionally initialized with a string.
+
+ Override 'usedforsecurity' to False when using for non-security purposes in
+ a FIPS environment
"""
try:
- return _hashlib.new(name, string)
+ return _hashlib.new(name, string, usedforsecurity)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
- return __get_builtin_constructor(name)(string)
+ raise
try:
@@ -218,4 +190,4 @@ def prf(msg, inner=inner, outer=outer):
# Cleanup locals()
del __always_supported, __func_name, __get_hash
-del __py_new, __hash_new, __get_openssl_constructor
+del __hash_new, __get_openssl_constructor
From 2ade3e5a6c5732c0692c4cc2235a2bbe0948f50b Mon Sep 17 00:00:00 2001
From: David Malcolm <dmalcolm@redhat.com>
Date: Mon, 2 Sep 2019 17:56:46 +0200
Subject: [PATCH 04/11] Adjust docstrings & comments
---
Lib/hashlib.py | 29 ++++++++++++++++++++++-------
Modules/_hashopenssl.c | 9 ++++++++-
2 files changed, 30 insertions(+), 8 deletions(-)
diff --git a/Lib/hashlib.py b/Lib/hashlib.py
index 404ed6891fb9..46d0b470ab4a 100644
--- a/Lib/hashlib.py
+++ b/Lib/hashlib.py
@@ -6,9 +6,12 @@
__doc__ = """hashlib module - A common interface to many hash functions.
-new(name, string='') - returns a new hash object implementing the
- given hash function; initializing the hash
- using the given string data.
+new(name, string='', usedforsecurity=True)
+ - returns a new hash object implementing the given hash function;
+ initializing the hash using the given string data.
+
+ "usedforsecurity" is a non-standard extension for better supporting
+ FIPS-compliant environments (see below)
Named constructor functions are also available, these are much faster
than using new():
@@ -25,6 +28,20 @@
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
+Our implementation of hashlib uses OpenSSL.
+
+OpenSSL has a "FIPS mode", which, if enabled, may restrict the available hashes
+to only those that are compliant with FIPS regulations. For example, it may
+deny the use of MD5, on the grounds that this is not secure for uses such as
+authentication, system integrity checking, or digital signatures.
+
+If you need to use such a hash for non-security purposes (such as indexing into
+a data structure for speed), you can override the keyword argument
+"usedforsecurity" from True to False to signify that your code is not relying
+on the hash for security purposes, and this will allow the hash to be usable
+even in FIPS mode. This is not a standard feature of Python 2.7's hashlib, and
+is included here to better support FIPS mode.
+
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
@@ -82,6 +99,7 @@ def __get_openssl_constructor(name):
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
+ # RHEL only: Fallbacks removed; we always use OpenSSL for hashes.
raise
@@ -95,10 +113,7 @@ def __hash_new(name, string='', usedforsecurity=True):
try:
return _hashlib.new(name, string, usedforsecurity)
except ValueError:
- # If the _hashlib module (OpenSSL) doesn't support the named
- # hash, try using our builtin implementations.
- # This allows for SHA224/256 and SHA384/512 support even though
- # the OpenSSL library prior to 0.9.8 doesn't provide them.
+ # RHEL only: Fallbacks removed; we always use OpenSSL for hashes.
raise
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index f2dbc095cc66..d24432e048bf 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -496,7 +496,14 @@ PyDoc_STRVAR(EVP_new__doc__,
An optional string argument may be provided and will be\n\
automatically hashed.\n\
\n\
-The MD5 and SHA1 algorithms are always supported.\n");
+The MD5 and SHA1 algorithms are always supported.\n \
+\n\
+An optional \"usedforsecurity=True\" keyword argument is provided for use in\n\
+environments that enforce FIPS-based restrictions. Some implementations of\n\
+OpenSSL can be configured to prevent the usage of non-secure algorithms (such\n\
+as MD5). If you have a non-security use for these algorithms (e.g. a hash\n\
+table), you can override this argument by marking the callsite as\n\
+\"usedforsecurity=False\".");
static PyObject *
EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
From 6698e1d84c3f19bbb4438b2b2c78a5ef8bd5ad42 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <pviktori@redhat.com>
Date: Thu, 29 Aug 2019 10:25:28 +0200
Subject: [PATCH 05/11] Expose OpenSSL FIPS_mode as _hashlib.get_fips_mode
---
Modules/_hashopenssl.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index d24432e048bf..74f9ab9ec150 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -860,10 +860,32 @@ GEN_CONSTRUCTOR(sha384)
GEN_CONSTRUCTOR(sha512)
#endif
+static PyObject *
+_hashlib_get_fips_mode(PyObject *module, PyObject *unused)
+{
+ // XXX: This function skips error checking.
+ // This is only appropriate for RHEL.
+
+ // From the OpenSSL docs:
+ // "If the library was built without support of the FIPS Object Module,
+ // then the function will return 0 with an error code of
+ // CRYPTO_R_FIPS_MODE_NOT_SUPPORTED (0x0f06d065)."
+ // In RHEL:
+ // * we do build with FIPS, so the function always succeeds
+ // * even if it didn't, people seem used to errors being left on the
+ // OpenSSL error stack.
+
+ // For more info, see:
+ // https://bugzilla.redhat.com/show_bug.cgi?id=1745499
+
+ return PyInt_FromLong(FIPS_mode());
+}
+
/* List of functions exported by this module */
static struct PyMethodDef EVP_functions[] = {
{"new", (PyCFunction)EVP_new, METH_VARARGS|METH_KEYWORDS, EVP_new__doc__},
+ {"get_fips_mode", (PyCFunction)_hashlib_get_fips_mode, METH_NOARGS, NULL},
CONSTRUCTOR_METH_DEF(md5),
CONSTRUCTOR_METH_DEF(sha1),
#ifdef _OPENSSL_SUPPORTS_SHA2
From 9a8833619658c6be5ca72c60189a64da05536d85 Mon Sep 17 00:00:00 2001
From: David Malcolm <dmalcolm@redhat.com>
Date: Mon, 2 Sep 2019 18:00:26 +0200
Subject: [PATCH 06/11] Adjust tests
---
Lib/test/test_hashlib.py | 118 ++++++++++++++++++++++++---------------
1 file changed, 74 insertions(+), 44 deletions(-)
diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py
index b8d6388feaf9..b03fc84f82b4 100644
--- a/Lib/test/test_hashlib.py
+++ b/Lib/test/test_hashlib.py
@@ -34,6 +34,8 @@ def hexstr(s):
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
+from _hashlib import get_fips_mode
+
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
@@ -63,10 +65,10 @@ def __init__(self, *args, **kwargs):
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
- def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
+ def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm, usedforsecurity=True):
if data is None:
- return hashlib.new(_alg)
- return hashlib.new(_alg, data)
+ return hashlib.new(_alg, usedforsecurity=usedforsecurity)
+ return hashlib.new(_alg, data, usedforsecurity=usedforsecurity)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
@@ -80,28 +82,13 @@ def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
if constructor:
constructors.add(constructor)
- _md5 = self._conditional_import_module('_md5')
- if _md5:
- self.constructors_to_test['md5'].add(_md5.new)
- _sha = self._conditional_import_module('_sha')
- if _sha:
- self.constructors_to_test['sha1'].add(_sha.new)
- _sha256 = self._conditional_import_module('_sha256')
- if _sha256:
- self.constructors_to_test['sha224'].add(_sha256.sha224)
- self.constructors_to_test['sha256'].add(_sha256.sha256)
- _sha512 = self._conditional_import_module('_sha512')
- if _sha512:
- self.constructors_to_test['sha384'].add(_sha512.sha384)
- self.constructors_to_test['sha512'].add(_sha512.sha512)
-
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.itervalues()
for cons in itertools.chain.from_iterable(constructors):
- c = cons(a)
+ c = cons(a, usedforsecurity=False)
c.hexdigest()
def test_algorithms_attribute(self):
@@ -122,28 +109,9 @@ def test_unknown_hash(self):
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
- def test_get_builtin_constructor(self):
- get_builtin_constructor = hashlib.__dict__[
- '__get_builtin_constructor']
- self.assertRaises(ValueError, get_builtin_constructor, 'test')
- try:
- import _md5
- except ImportError:
- pass
- # This forces an ImportError for "import _md5" statements
- sys.modules['_md5'] = None
- try:
- self.assertRaises(ValueError, get_builtin_constructor, 'md5')
- finally:
- if '_md5' in locals():
- sys.modules['_md5'] = _md5
- else:
- del sys.modules['_md5']
- self.assertRaises(TypeError, get_builtin_constructor, 3)
-
def test_hexdigest(self):
for name in self.supported_hash_names:
- h = hashlib.new(name)
+ h = hashlib.new(name, usedforsecurity=False)
self.assertTrue(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
@@ -153,16 +121,16 @@ def test_large_update(self):
abcs = aas + bees + cees
for name in self.supported_hash_names:
- m1 = hashlib.new(name)
+ m1 = hashlib.new(name, usedforsecurity=False)
m1.update(aas)
m1.update(bees)
m1.update(cees)
- m2 = hashlib.new(name)
+ m2 = hashlib.new(name, usedforsecurity=False)
m2.update(abcs)
self.assertEqual(m1.digest(), m2.digest(), name+' update problem.')
- m3 = hashlib.new(name, abcs)
+ m3 = hashlib.new(name, abcs, usedforsecurity=False)
self.assertEqual(m1.digest(), m3.digest(), name+' new problem.')
def check(self, name, data, digest):
@@ -170,7 +138,7 @@ def check(self, name, data, digest):
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
- computed = hash_object_constructor(data).hexdigest()
+ computed = hash_object_constructor(data, usedforsecurity=False).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
@@ -195,7 +163,7 @@ def check_update(self, name, data, digest):
def check_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
- expected = hashlib.new(algorithm_name, str(u'spam')).hexdigest()
+ expected = hashlib.new(algorithm_name, str(u'spam'), usedforsecurity=False).hexdigest()
self.check(algorithm_name, u'spam', expected)
def test_unicode(self):
@@ -393,6 +361,68 @@ def hash_in_chunks(chunk_size):
self.assertEqual(expected_hash, hasher.hexdigest())
+ def test_issue9146(self):
+ # Ensure that various ways to use "MD5" from "hashlib" don't segfault:
+ m = hashlib.md5(usedforsecurity=False)
+ m.update(b'abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.new('md5', usedforsecurity=False)
+ m.update(b'abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.md5(b'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.new('md5', b'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ def assertRaisesDisabledForFIPS(self, callable_obj=None, *args, **kwargs):
+ try:
+ callable_obj(*args, **kwargs)
+ except ValueError, e:
+ if not e.args[0].endswith('disabled for FIPS'):
+ self.fail('Incorrect exception raised')
+ else:
+ self.fail('Exception was not raised')
+
+ @unittest.skipUnless(get_fips_mode(),
+ 'FIPS enforcement required for this test.')
+ def test_hashlib_fips_mode(self):
+ # Ensure that we raise a ValueError on vanilla attempts to use MD5
+ # in hashlib in a FIPS-enforced setting:
+ self.assertRaisesDisabledForFIPS(hashlib.md5)
+ self.assertRaisesDisabledForFIPS(hashlib.new, 'md5')
+
+ @unittest.skipUnless(get_fips_mode(),
+ 'FIPS enforcement required for this test.')
+ def test_hashopenssl_fips_mode(self):
+ # Verify the _hashlib module's handling of md5:
+ import _hashlib
+
+ assert hasattr(_hashlib, 'openssl_md5')
+
+ # Ensure that _hashlib raises a ValueError on vanilla attempts to
+ # use MD5 in a FIPS-enforced setting:
+ self.assertRaisesDisabledForFIPS(_hashlib.openssl_md5)
+ self.assertRaisesDisabledForFIPS(_hashlib.new, 'md5')
+
+ # Ensure that in such a setting we can whitelist a callsite with
+ # usedforsecurity=False and have it succeed:
+ m = _hashlib.openssl_md5(usedforsecurity=False)
+ m.update('abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.new('md5', usedforsecurity=False)
+ m.update('abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.openssl_md5('abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.new('md5', 'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
class KDFTests(unittest.TestCase):
pbkdf2_test_vectors = [
From 31e527aa4f57845dfb0c3dd4f0e9192af5a5b4e2 Mon Sep 17 00:00:00 2001
From: David Malcolm <dmalcolm@redhat.com>
Date: Mon, 2 Sep 2019 18:00:47 +0200
Subject: [PATCH 07/11] Don't build non-OpenSSL hash implementations
---
setup.py | 15 ---------------
1 file changed, 15 deletions(-)
diff --git a/setup.py b/setup.py
index 33cecc687573..272d2f1b5bb8 100644
--- a/setup.py
+++ b/setup.py
@@ -874,21 +874,6 @@ def detect_modules(self):
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
- if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
- # The _sha module implements the SHA1 hash algorithm.
- exts.append( Extension('_sha', ['shamodule.c']) )
- # The _md5 module implements the RSA Data Security, Inc. MD5
- # Message-Digest Algorithm, described in RFC 1321. The
- # necessary files md5.c and md5.h are included here.
- exts.append( Extension('_md5',
- sources = ['md5module.c', 'md5.c'],
- depends = ['md5.h']) )
-
- min_sha2_openssl_ver = 0x00908000
- if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
- # OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
- exts.append( Extension('_sha256', ['sha256module.c']) )
- exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
From e9cd6a63ce17a0120b1d017bf08f05f3ed223bb1 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <pviktori@redhat.com>
Date: Mon, 2 Sep 2019 18:33:22 +0200
Subject: [PATCH 08/11] Allow for errros in pre-created context creation
---
Modules/_hashopenssl.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index 74f9ab9ec150..7609e9e490f0 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -813,7 +813,7 @@ generate_hash_name_list(void)
)) { \
return NULL; \
} \
- if (usedforsecurity == 0) { \
+ if (usedforsecurity == 0 || CONST_new_ ## NAME ## _ctx_p == NULL) { \
ret_obj = EVPnew( \
CONST_ ## NAME ## _name_obj, \
EVP_get_digestbyname(#NAME), \
@@ -846,7 +846,9 @@ generate_hash_name_list(void)
CONST_ ## NAME ## _name_obj = PyString_FromString(#NAME); \
if (EVP_get_digestbyname(#NAME)) { \
CONST_new_ ## NAME ## _ctx_p = EVP_MD_CTX_new(); \
- EVP_DigestInit(CONST_new_ ## NAME ## _ctx_p, EVP_get_digestbyname(#NAME)); \
+ if (!EVP_DigestInit(CONST_new_ ## NAME ## _ctx_p, EVP_get_digestbyname(#NAME))) { \
+ CONST_new_ ## NAME ## _ctx_p = NULL; \
+ } \
} \
} \
} while (0);
From d0465ea1c07f24067b4d6f60f73a29c82f2ad03f Mon Sep 17 00:00:00 2001
From: David Malcolm <dmalcolm@redhat.com>
Date: Mon, 2 Sep 2019 18:40:08 +0200
Subject: [PATCH 09/11] use SHA-256 rather than MD5 in
multiprocessing.connection (patch 169; rhbz#879695)
---
Lib/multiprocessing/connection.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index 645a26f069ea..d4dc6ac19d53 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -56,6 +56,10 @@
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
+# The hmac module implicitly defaults to using MD5.
+# Support using a stronger algorithm for the challenge/response code:
+HMAC_DIGEST_NAME='sha256'
+
_mmap_counter = itertools.count()
default_family = 'AF_INET'
@@ -413,12 +417,16 @@ def PipeClient(address):
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'
+def get_digestmod_for_hmac():
+ import hashlib
+ return getattr(hashlib, HMAC_DIGEST_NAME)
+
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
- digest = hmac.new(authkey, message).digest()
+ digest = hmac.new(authkey, message, get_digestmod_for_hmac()).digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
@@ -432,7 +440,7 @@ def answer_challenge(connection, authkey):
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
- digest = hmac.new(authkey, message).digest()
+ digest = hmac.new(authkey, message, get_digestmod_for_hmac()).digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
From 82b181a2c55be0f0766fdf1f0a3e950d22fe0602 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <pviktori@redhat.com>
Date: Mon, 19 Aug 2019 13:59:40 +0200
Subject: [PATCH 10/11] Make uuid.uuid3 work (using libuuid via ctypes)
---
Lib/uuid.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/Lib/uuid.py b/Lib/uuid.py
index 80d33c0bd83f..bfb7477b5f58 100644
--- a/Lib/uuid.py
+++ b/Lib/uuid.py
@@ -455,6 +455,7 @@ def _netbios_getnode():
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_time = _UuidCreate = None
+_uuid_generate_md5 = None
try:
import ctypes, ctypes.util
import sys
@@ -471,6 +472,8 @@ def _netbios_getnode():
continue
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
+ # The library that has uuid_generate_time should have md5 too.
+ _uuid_generate_md5 = getattr(lib, 'uuid_generate_md5')
break
del _libnames
@@ -595,6 +598,11 @@ def uuid1(node=None, clock_seq=None):
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
+ if _uuid_generate_md5:
+ _buffer = ctypes.create_string_buffer(16)
+ _uuid_generate_md5(_buffer, namespace.bytes, name, len(name))
+ return UUID(bytes=_buffer.raw)
+
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)

View File

@ -0,0 +1,70 @@
diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py
index 5021ebf..29a7d1b 100644
--- a/Lib/ensurepip/__init__.py
+++ b/Lib/ensurepip/__init__.py
@@ -1,9 +1,10 @@
#!/usr/bin/env python2
from __future__ import print_function
+import distutils.version
+import glob
import os
import os.path
-import pkgutil
import shutil
import sys
import tempfile
@@ -12,9 +13,19 @@ import tempfile
__all__ = ["version", "bootstrap"]
-_SETUPTOOLS_VERSION = "41.2.0"
+_WHEEL_DIR = "/usr/share/python{}-wheels/".format(sys.version_info[0])
-_PIP_VERSION = "19.2.3"
+def _get_most_recent_wheel_version(pkg):
+ prefix = os.path.join(_WHEEL_DIR, "{}-".format(pkg))
+ suffix = "-py2.py3-none-any.whl"
+ pattern = "{}*{}".format(prefix, suffix)
+ versions = (p[len(prefix):-len(suffix)] for p in glob.glob(pattern))
+ return str(max(versions, key=distutils.version.LooseVersion))
+
+
+_SETUPTOOLS_VERSION = _get_most_recent_wheel_version("setuptools")
+
+_PIP_VERSION = _get_most_recent_wheel_version("pip")
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
@@ -28,8 +39,13 @@ def _run_pip(args, additional_paths=None):
sys.path = additional_paths + sys.path
# Install the bundled software
- import pip._internal
- return pip._internal.main(args)
+ try:
+ # pip 10
+ from pip._internal import main
+ except ImportError:
+ # pip 9
+ from pip import main
+ return main(args)
def version():
@@ -100,12 +116,9 @@ def _bootstrap(root=None, upgrade=False, user=False,
additional_paths = []
for project, version in _PROJECTS:
wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
- whl = pkgutil.get_data(
- "ensurepip",
- "_bundled/{}".format(wheel_name),
- )
- with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
- fp.write(whl)
+ with open(os.path.join(_WHEEL_DIR, wheel_name), "rb") as sfp:
+ with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
+ fp.write(sfp.read())
additional_paths.append(os.path.join(tmpdir, wheel_name))

View File

@ -0,0 +1,53 @@
diff -U3 -r Python-2.7.14.orig/Lib/site.py Python-2.7.14/Lib/site.py
--- Python-2.7.14.orig/Lib/site.py 2018-01-29 15:05:04.517599815 +0100
+++ Python-2.7.14/Lib/site.py 2018-01-30 09:13:17.305270500 +0100
@@ -515,6 +515,41 @@
"'import usercustomize' failed; use -v for traceback"
+def handle_ambiguous_python_version():
+ """Warn or fail if /usr/bin/python is used
+
+ Behavior depends on the value of PYTHON_DISALLOW_AMBIGUOUS_VERSION:
+ - "warn" - print warning to stderr
+ - "1" - print error and exit with positive exit code
+ - otherwise: do nothing
+
+ This is a Fedora modification, see the Change page for details:
+ See https://fedoraproject.org/wiki/Changes/Avoid_usr_bin_python_in_RPM_Build
+ """
+ if sys.executable == "/usr/bin/python":
+ setting = os.environ.get("PYTHON_DISALLOW_AMBIGUOUS_VERSION")
+ if setting == 'warn':
+ print>>sys.stderr, (
+ "DEPRECATION WARNING: python2 invoked with /usr/bin/python.\n"
+ " Use /usr/bin/python3 or /usr/bin/python2\n"
+ " /usr/bin/python will be removed or switched to Python 3"
+ " in the future.\n"
+ " If you cannot make the switch now, please follow"
+ " instructions at"
+ " https://fedoraproject.org/wiki/Changes/"
+ "Avoid_usr_bin_python_in_RPM_Build#Quick_Opt-Out")
+ elif setting == '1':
+ print>>sys.stderr, (
+ "ERROR: python2 invoked with /usr/bin/python.\n"
+ " Use /usr/bin/python3 or /usr/bin/python2\n"
+ " /usr/bin/python will be switched to Python 3"
+ " in the future.\n"
+ " More details are at"
+ " https://fedoraproject.org/wiki/Changes/"
+ "Avoid_usr_bin_python_in_RPM_Build#Quick_Opt-Out")
+ exit(1)
+
+
def main():
global ENABLE_USER_SITE
@@ -543,6 +578,7 @@
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
+ handle_ambiguous_python_version()
main()

View File

@ -0,0 +1,70 @@
From b099ce737f6e6cc9f3a1bf756af78eaa1c1480cd Mon Sep 17 00:00:00 2001
From: Rishi <rishi_devan@mail.com>
Date: Wed, 15 Jul 2020 13:51:00 +0200
Subject: [PATCH] 00351-cve-2019-20907-fix-infinite-loop-in-tarfile.patch
00351 #
Avoid infinite loop when reading specially crafted TAR files using the tarfile module
(CVE-2019-20907).
See: https://bugs.python.org/issue39017
---
Lib/tarfile.py | 2 ++
Lib/test/recursion.tar | Bin 0 -> 516 bytes
Lib/test/test_tarfile.py | 7 +++++++
.../2020-07-12-22-16-58.bpo-39017.x3Cg-9.rst | 1 +
4 files changed, 10 insertions(+)
create mode 100644 Lib/test/recursion.tar
create mode 100644 Misc/NEWS.d/next/Library/2020-07-12-22-16-58.bpo-39017.x3Cg-9.rst
diff --git a/Lib/tarfile.py b/Lib/tarfile.py
index adf91d5..574a6bb 100644
--- a/Lib/tarfile.py
+++ b/Lib/tarfile.py
@@ -1400,6 +1400,8 @@ class TarInfo(object):
length, keyword = match.groups()
length = int(length)
+ if length == 0:
+ raise InvalidHeaderError("invalid header")
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
diff --git a/Lib/test/recursion.tar b/Lib/test/recursion.tar
new file mode 100644
index 0000000000000000000000000000000000000000..b8237251964983f54ed1966297e887636cd0c5f4
GIT binary patch
literal 516
zcmYdFPRz+kEn=W0Fn}74P8%Xw3X=l~85kIuo0>8xq$A1Gm}!7)KUsFc41m#O8A5+e
I1_}|j06>QaCIA2c
literal 0
HcmV?d00001
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index 89bd738..4592156 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -325,6 +325,13 @@ class CommonReadTest(ReadTest):
class MiscReadTest(CommonReadTest):
taropen = tarfile.TarFile.taropen
+ def test_length_zero_header(self):
+ # bpo-39017 (CVE-2019-20907): reading a zero-length header should fail
+ # with an exception
+ with self.assertRaisesRegexp(tarfile.ReadError, "file could not be opened successfully"):
+ with tarfile.open(support.findfile('recursion.tar')) as tar:
+ pass
+
def test_no_name_argument(self):
with open(self.tarname, "rb") as fobj:
tar = tarfile.open(fileobj=fobj, mode=self.mode)
diff --git a/Misc/NEWS.d/next/Library/2020-07-12-22-16-58.bpo-39017.x3Cg-9.rst b/Misc/NEWS.d/next/Library/2020-07-12-22-16-58.bpo-39017.x3Cg-9.rst
new file mode 100644
index 0000000..ad26676
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2020-07-12-22-16-58.bpo-39017.x3Cg-9.rst
@@ -0,0 +1 @@
+Avoid infinite loop when reading specially crafted TAR files using the tarfile module (CVE-2019-20907).
--
2.25.4

View File

@ -0,0 +1,88 @@
diff --git a/Lib/httplib.py b/Lib/httplib.py
index fcc4152..a636774 100644
--- a/Lib/httplib.py
+++ b/Lib/httplib.py
@@ -257,6 +257,10 @@ _contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
+# These characters are not allowed within HTTP method names
+# to prevent http header injection.
+_contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]')
+
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
@@ -935,6 +939,8 @@ class HTTPConnection:
else:
raise CannotSendRequest()
+ self._validate_method(method)
+
# Save the method for use later in the response phase
self._method = method
@@ -1020,6 +1026,16 @@ class HTTPConnection:
# On Python 2, request is already encoded (default)
return request
+ def _validate_method(self, method):
+ """Validate a method name for putrequest."""
+ # prevent http header injection
+ match = _contains_disallowed_method_pchar_re.search(method)
+ if match:
+ raise ValueError(
+ "method can't contain control characters. %r "
+ "(found at least %r)"
+ % (method, match.group()))
+
def _validate_path(self, url):
"""Validate a url for putrequest."""
# Prevent CVE-2019-9740.
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index d8a57f7..96a61dd 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -385,6 +385,29 @@ class HeaderTests(TestCase):
conn.putheader(name, value)
+class HttpMethodTests(TestCase):
+ def test_invalid_method_names(self):
+ methods = (
+ 'GET\r',
+ 'POST\n',
+ 'PUT\n\r',
+ 'POST\nValue',
+ 'POST\nHOST:abc',
+ 'GET\nrHost:abc\n',
+ 'POST\rRemainder:\r',
+ 'GET\rHOST:\n',
+ '\nPUT'
+ )
+
+ for method in methods:
+ with self.assertRaisesRegexp(
+ ValueError, "method can't contain control characters"):
+ conn = httplib.HTTPConnection('example.com')
+ conn.sock = FakeSocket(None)
+ conn.request(method=method, url="/")
+
+
+
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
@@ -1009,9 +1032,9 @@ class TunnelTests(TestCase):
@test_support.reap_threads
def test_main(verbose=None):
- test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
- HTTPTest, HTTPSTest, SourceAddressTest,
- TunnelTests)
+ test_support.run_unittest(HeaderTests, OfflineTest, HttpMethodTests,
+ BasicTest, TimeoutTest, HTTPTest, HTTPSTest,
+ SourceAddressTest, TunnelTests)
if __name__ == '__main__':
test_main()

View File

@ -0,0 +1,42 @@
diff --git a/Lib/test/multibytecodec_support.py b/Lib/test/multibytecodec_support.py
index 5b2329b6d84..53b5d64d453 100644
--- a/Lib/test/multibytecodec_support.py
+++ b/Lib/test/multibytecodec_support.py
@@ -279,30 +279,22 @@ class TestBase_Mapping(unittest.TestCase):
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
- _unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
- unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
+ def unichrs(s):
+ return ''.join(unichr(int(x, 16)) for x in s.split('+'))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
- data = line.split('#')[0].strip().split()
+ data = line.split('#')[0].split()
if len(data) != 2:
continue
- csetval = eval(data[0])
- if csetval <= 0x7F:
- csetch = chr(csetval & 0xff)
- elif csetval >= 0x1000000:
- csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
- chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
- elif csetval >= 0x10000:
- csetch = chr(csetval >> 16) + \
- chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
- elif csetval >= 0x100:
- csetch = chr(csetval >> 8) + chr(csetval & 0xff)
- else:
+ if data[0][:2] != '0x':
+ self.fail("Invalid line: {!r}".format(line))
+ csetch = bytes.fromhex(data[0][2:])
+ if len(csetch) == 1 and 0x80 <= csetch[0]:
continue
unich = unichrs(data[1])

181
00357-CVE-2021-3177.patch Normal file
View File

@ -0,0 +1,181 @@
commit 30e41798f40c684be57d7ccfebf5c6ad94c0ff97
Author: Petr Viktorin <pviktori@redhat.com>
Date: Wed Jan 20 15:21:43 2021 +0100
CVE-2021-3177: Replace snprintf with Python unicode formatting in ctypes param reprs
Backport of Python3 commit 916610ef90a0d0761f08747f7b0905541f0977c7:
https://bugs.python.org/issue42938
https://github.com/python/cpython/pull/24239
diff --git a/Lib/ctypes/test/test_parameters.py b/Lib/ctypes/test/test_parameters.py
index 23c1b6e2259..77300d71ae1 100644
--- a/Lib/ctypes/test/test_parameters.py
+++ b/Lib/ctypes/test/test_parameters.py
@@ -206,6 +206,49 @@ class SimpleTypesTestCase(unittest.TestCase):
with self.assertRaises(ZeroDivisionError):
WorseStruct().__setstate__({}, b'foo')
+ def test_parameter_repr(self):
+ from ctypes import (
+ c_bool,
+ c_char,
+ c_wchar,
+ c_byte,
+ c_ubyte,
+ c_short,
+ c_ushort,
+ c_int,
+ c_uint,
+ c_long,
+ c_ulong,
+ c_longlong,
+ c_ulonglong,
+ c_float,
+ c_double,
+ c_longdouble,
+ c_char_p,
+ c_wchar_p,
+ c_void_p,
+ )
+ self.assertRegexpMatches(repr(c_bool.from_param(True)), r"^<cparam '\?' at 0x[A-Fa-f0-9]+>$")
+ self.assertEqual(repr(c_char.from_param('a')), "<cparam 'c' ('a')>")
+ self.assertRegexpMatches(repr(c_wchar.from_param('a')), r"^<cparam 'u' at 0x[A-Fa-f0-9]+>$")
+ self.assertEqual(repr(c_byte.from_param(98)), "<cparam 'b' (98)>")
+ self.assertEqual(repr(c_ubyte.from_param(98)), "<cparam 'B' (98)>")
+ self.assertEqual(repr(c_short.from_param(511)), "<cparam 'h' (511)>")
+ self.assertEqual(repr(c_ushort.from_param(511)), "<cparam 'H' (511)>")
+ self.assertRegexpMatches(repr(c_int.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_uint.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_long.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_ulong.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_longlong.from_param(20000)), r"^<cparam '[liq]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
+ self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
+ self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
+ self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
+ self.assertRegexpMatches(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
+ self.assertRegexpMatches(repr(c_char_p.from_param(b'hihi')), "^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
+ self.assertRegexpMatches(repr(c_wchar_p.from_param('hihi')), "^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
+ self.assertRegexpMatches(repr(c_void_p.from_param(0x12)), r"^<cparam 'P' \(0x0*12\)>$")
+
################################################################
if __name__ == '__main__':
diff --git a/Misc/NEWS.d/next/Security/2021-01-18-09-27-31.bpo-42938.4Zn4Mp.rst b/Misc/NEWS.d/next/Security/2021-01-18-09-27-31.bpo-42938.4Zn4Mp.rst
new file mode 100644
index 00000000000..7df65a156fe
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2021-01-18-09-27-31.bpo-42938.4Zn4Mp.rst
@@ -0,0 +1,2 @@
+Avoid static buffers when computing the repr of :class:`ctypes.c_double` and
+:class:`ctypes.c_longdouble` values.
diff --git a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
index 066fefc0cca..5cc3c4cf685 100644
--- a/Modules/_ctypes/callproc.c
+++ b/Modules/_ctypes/callproc.c
@@ -460,50 +460,62 @@ PyCArg_dealloc(PyCArgObject *self)
static PyObject *
PyCArg_repr(PyCArgObject *self)
{
- char buffer[256];
switch(self->tag) {
case 'b':
case 'B':
- sprintf(buffer, "<cparam '%c' (%d)>",
+ return PyString_FromFormat("<cparam '%c' (%d)>",
self->tag, self->value.b);
- break;
case 'h':
case 'H':
- sprintf(buffer, "<cparam '%c' (%d)>",
+ return PyString_FromFormat("<cparam '%c' (%d)>",
self->tag, self->value.h);
- break;
case 'i':
case 'I':
- sprintf(buffer, "<cparam '%c' (%d)>",
+ return PyString_FromFormat("<cparam '%c' (%d)>",
self->tag, self->value.i);
- break;
case 'l':
case 'L':
- sprintf(buffer, "<cparam '%c' (%ld)>",
+ return PyString_FromFormat("<cparam '%c' (%ld)>",
self->tag, self->value.l);
- break;
#ifdef HAVE_LONG_LONG
case 'q':
case 'Q':
- sprintf(buffer,
- "<cparam '%c' (%" PY_FORMAT_LONG_LONG "d)>",
+ return PyString_FromFormat("<cparam '%c' (%lld)>",
self->tag, self->value.q);
- break;
#endif
case 'd':
- sprintf(buffer, "<cparam '%c' (%f)>",
- self->tag, self->value.d);
- break;
- case 'f':
- sprintf(buffer, "<cparam '%c' (%f)>",
- self->tag, self->value.f);
- break;
-
+ case 'f': {
+ PyObject *s = PyString_FromFormat("<cparam '%c' (", self->tag);
+ if (s == NULL) {
+ return NULL;
+ }
+ PyObject *f = PyFloat_FromDouble((self->tag == 'f') ? self->value.f : self->value.d);
+ if (f == NULL) {
+ Py_DECREF(s);
+ return NULL;
+ }
+ PyObject *r = PyObject_Repr(f);
+ Py_DECREF(f);
+ if (r == NULL) {
+ Py_DECREF(s);
+ return NULL;
+ }
+ PyString_ConcatAndDel(&s, r);
+ if (s == NULL) {
+ return NULL;
+ }
+ r = PyString_FromString(")>");
+ if (r == NULL) {
+ Py_DECREF(s);
+ return NULL;
+ }
+ PyString_ConcatAndDel(&s, r);
+ return s;
+ }
case 'c':
- sprintf(buffer, "<cparam '%c' (%c)>",
+ return PyString_FromFormat("<cparam '%c' ('%c')>",
self->tag, self->value.c);
- break;
/* Hm, are these 'z' and 'Z' codes useful at all?
Shouldn't they be replaced by the functionality of c_string
@@ -512,16 +524,13 @@ PyCArg_repr(PyCArgObject *self)
case 'z':
case 'Z':
case 'P':
- sprintf(buffer, "<cparam '%c' (%p)>",
+ return PyUnicode_FromFormat("<cparam '%c' (%p)>",
self->tag, self->value.p);
- break;
default:
- sprintf(buffer, "<cparam '%c' at %p>",
- self->tag, self);
- break;
+ return PyString_FromFormat("<cparam '%c' at %p>",
+ (unsigned char)self->tag, (void *)self);
}
- return PyString_FromString(buffer);
}
static PyMemberDef PyCArgType_members[] = {

707
00359-CVE-2021-23336.patch Normal file
View File

@ -0,0 +1,707 @@
From 976a4010aa4e450855dce5fa4c865bcbdc86cccd Mon Sep 17 00:00:00 2001
From: Charalampos Stratakis <cstratak@redhat.com>
Date: Fri, 16 Apr 2021 18:02:00 +0200
Subject: [PATCH] CVE-2021-23336: Add `separator` argument to parse_qs; warn
with default
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Partially backports https://bugs.python.org/issue42967 : [security] Address a web cache-poisoning issue reported in urllib.parse.parse_qsl().
Backported from the python3 branch.
However, this solution is different than the upstream solution in Python 3.
Based on the downstream solution for python 3.6.13 by Petr Viktorin.
An optional argument seperator is added to specify the separator.
It is recommended to set it to '&' or ';' to match the application or proxy in use.
The default can be set with an env variable of a config file.
If neither the argument, env var or config file specifies a separator, "&" is used
but a warning is raised if parse_qs is used on input that contains ';'.
Co-authors of the downstream change:
Co-authored-by: Petr Viktorin <pviktori@redhat.com>
Co-authors of the upstream change (who do not necessarily agree with this):
Co-authored-by: Adam Goldschmidt <adamgold7@gmail.com>
Co-authored-by: Ken Jin <28750310+Fidget-Spinner@users.noreply.github.com>
Co-authored-by: Éric Araujo <merwok@netwok.org>
---
Doc/library/cgi.rst | 5 +-
Doc/library/urlparse.rst | 15 ++-
Lib/cgi.py | 34 +++---
Lib/test/test_cgi.py | 59 ++++++++++-
Lib/test/test_urlparse.py | 210 +++++++++++++++++++++++++++++++++++++-
Lib/urlparse.py | 78 +++++++++++++-
6 files changed, 369 insertions(+), 32 deletions(-)
diff --git a/Doc/library/cgi.rst b/Doc/library/cgi.rst
index ecd62c8c019..a96cd38717b 100644
--- a/Doc/library/cgi.rst
+++ b/Doc/library/cgi.rst
@@ -285,10 +285,10 @@ These are useful if you want more control, or if you want to employ some of the
algorithms implemented in this module in other circumstances.
-.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing]]])
+.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing[, separator]]]])
Parse a query in the environment or from a file (the file defaults to
- ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values* and *strict_parsing* parameters are
+ ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values*, *strict_parsing* and *separator* parameters are
passed to :func:`urlparse.parse_qs` unchanged.
@@ -316,7 +316,6 @@ algorithms implemented in this module in other circumstances.
Note that this does not parse nested multipart parts --- use
:class:`FieldStorage` for that.
-
.. function:: parse_header(string)
Parse a MIME header (such as :mailheader:`Content-Type`) into a main value and a
diff --git a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst
index 0989c88c302..97d1119257c 100644
--- a/Doc/library/urlparse.rst
+++ b/Doc/library/urlparse.rst
@@ -136,7 +136,7 @@ The :mod:`urlparse` module defines the following functions:
now raise :exc:`ValueError`.
-.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields]]])
+.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields[, separator]]]])
Parse a query string given as a string argument (data of type
:mimetype:`application/x-www-form-urlencoded`). Data are returned as a
@@ -157,6 +157,15 @@ The :mod:`urlparse` module defines the following functions:
read. If set, then throws a :exc:`ValueError` if there are more than
*max_num_fields* fields read.
+ The optional argument *separator* is the symbol to use for separating the
+ query arguments. It is recommended to set it to ``'&'`` or ``';'``.
+ It defaults to ``'&'``; a warning is raised if this default is used.
+ This default may be changed with the following environment variable settings:
+
+ - ``PYTHON_URLLIB_QS_SEPARATOR='&'``: use only ``&`` as separator, without warning (as in Python 3.6.13+ or 3.10)
+ - ``PYTHON_URLLIB_QS_SEPARATOR=';'``: use only ``;`` as separator
+ - ``PYTHON_URLLIB_QS_SEPARATOR=legacy``: use both ``&`` and ``;`` (as in previous versions of Python)
+
Use the :func:`urllib.urlencode` function to convert such dictionaries into
query strings.
@@ -186,6 +195,9 @@ The :mod:`urlparse` module defines the following functions:
read. If set, then throws a :exc:`ValueError` if there are more than
*max_num_fields* fields read.
+ The optional argument *separator* is the symbol to use for separating the
+ query arguments. It works as in :py:func:`parse_qs`.
+
Use the :func:`urllib.urlencode` function to convert such lists of pairs into
query strings.
@@ -195,6 +207,7 @@ The :mod:`urlparse` module defines the following functions:
.. versionchanged:: 2.7.16
Added *max_num_fields* parameter.
+
.. function:: urlunparse(parts)
Construct a URL from a tuple as returned by ``urlparse()``. The *parts* argument
diff --git a/Lib/cgi.py b/Lib/cgi.py
index 5b903e03477..1421f2d90e0 100755
--- a/Lib/cgi.py
+++ b/Lib/cgi.py
@@ -121,7 +121,8 @@ log = initlog # The current logging function
# 0 ==> unlimited input
maxlen = 0
-def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+def parse(fp=None, environ=os.environ, keep_blank_values=0,
+ strict_parsing=0, separator=None):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
@@ -140,6 +141,8 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
+
+ separator: str. The symbol to use for separating the query arguments.
"""
if fp is None:
fp = sys.stdin
@@ -171,25 +174,26 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
- return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
+ return urlparse.parse_qs(qs, keep_blank_values, strict_parsing, separator=separator)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatibility.
-def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0, separator=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
PendingDeprecationWarning, 2)
- return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
+ return urlparse.parse_qs(qs, keep_blank_values, strict_parsing,
+ separator=separator)
-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None, separator=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing,
- max_num_fields)
+ max_num_fields, separator=separator)
def parse_multipart(fp, pdict):
"""Parse multipart input.
@@ -288,7 +292,6 @@ def parse_multipart(fp, pdict):
return partdict
-
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
@@ -395,7 +398,7 @@ class FieldStorage:
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0,
- max_num_fields=None):
+ max_num_fields=None, separator=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
@@ -430,6 +433,7 @@ class FieldStorage:
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
+ self.separator = separator
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
@@ -613,7 +617,8 @@ class FieldStorage:
if self.qs_on_post:
qs += '&' + self.qs_on_post
query = urlparse.parse_qsl(qs, self.keep_blank_values,
- self.strict_parsing, self.max_num_fields)
+ self.strict_parsing, self.max_num_fields,
+ self.separator)
self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
@@ -629,7 +634,8 @@ class FieldStorage:
query = urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values,
self.strict_parsing,
- self.max_num_fields)
+ self.max_num_fields,
+ self.separator)
self.list.extend(MiniFieldStorage(key, value)
for key, value in query)
FieldStorageClass = None
@@ -649,7 +655,8 @@ class FieldStorage:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing,
- max_num_fields)
+ max_num_fields,
+ separator=self.separator)
if max_num_fields is not None:
max_num_fields -= 1
@@ -817,10 +824,11 @@ class FormContentDict(UserDict.UserDict):
form.dict == {key: [val, val, ...], ...}
"""
- def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+ def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0, separator=None):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
- strict_parsing=strict_parsing)
+ strict_parsing=strict_parsing,
+ separator=separator)
self.query_string = environ['QUERY_STRING']
diff --git a/Lib/test/test_cgi.py b/Lib/test/test_cgi.py
index 743c2afbd4c..9956ea9d4e8 100644
--- a/Lib/test/test_cgi.py
+++ b/Lib/test/test_cgi.py
@@ -61,12 +61,9 @@ parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
- (";", ValueError("bad query field: ''")),
- (";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
- ("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
@@ -81,8 +78,6 @@ parse_strict_test_cases = [
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
- ("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
- ("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
@@ -177,6 +172,60 @@ class CgiTests(unittest.TestCase):
self.assertItemsEqual(sd.items(),
first_second_elts(expect.items()))
+ def test_separator(self):
+ parse_semicolon = [
+ ("x=1;y=2.0", {'x': ['1'], 'y': ['2.0']}),
+ ("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+ (";", ValueError("bad query field: ''")),
+ (";;", ValueError("bad query field: ''")),
+ ("=;a", ValueError("bad query field: 'a'")),
+ (";b=a", ValueError("bad query field: ''")),
+ ("b;=a", ValueError("bad query field: 'b'")),
+ ("a=a+b;b=b+c", {'a': ['a b'], 'b': ['b c']}),
+ ("a=a+b;a=b+a", {'a': ['a b', 'b a']}),
+ ]
+ for orig, expect in parse_semicolon:
+ env = {'QUERY_STRING': orig}
+ fcd = cgi.FormContentDict(env, separator=';')
+ sd = cgi.SvFormContentDict(env, separator=';')
+ fs = cgi.FieldStorage(environ=env, separator=';')
+ if isinstance(expect, dict):
+ # test dict interface
+ self.assertEqual(len(expect), len(fcd))
+ self.assertItemsEqual(expect.keys(), fcd.keys())
+ self.assertItemsEqual(expect.values(), fcd.values())
+ self.assertItemsEqual(expect.items(), fcd.items())
+ self.assertEqual(fcd.get("nonexistent field", "default"), "default")
+ self.assertEqual(len(sd), len(fs))
+ self.assertItemsEqual(sd.keys(), fs.keys())
+ self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
+ # test individual fields
+ for key in expect.keys():
+ expect_val = expect[key]
+ self.assertTrue(fcd.has_key(key))
+ self.assertItemsEqual(fcd[key], expect[key])
+ self.assertEqual(fcd.get(key, "default"), fcd[key])
+ self.assertTrue(fs.has_key(key))
+ if len(expect_val) > 1:
+ single_value = 0
+ else:
+ single_value = 1
+ try:
+ val = sd[key]
+ except IndexError:
+ self.assertFalse(single_value)
+ self.assertEqual(fs.getvalue(key), expect_val)
+ else:
+ self.assertTrue(single_value)
+ self.assertEqual(val, expect_val[0])
+ self.assertEqual(fs.getvalue(key), expect_val[0])
+ self.assertItemsEqual(sd.getlist(key), expect_val)
+ if single_value:
+ self.assertItemsEqual(sd.values(),
+ first_elts(expect.values()))
+ self.assertItemsEqual(sd.items(),
+ first_second_elts(expect.items()))
+
def test_weird_formcontentdict(self):
# Test the weird FormContentDict classes
env = {'QUERY_STRING': "x=1&y=2.0&z=2-3.%2b0&1=1abc"}
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 86c4a0595c4..21875bb2991 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -3,6 +3,12 @@ import sys
import unicodedata
import unittest
import urlparse
+from test.support import EnvironmentVarGuard
+from warnings import catch_warnings, filterwarnings
+import tempfile
+import contextlib
+import os.path
+import shutil
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
@@ -24,16 +30,29 @@ parse_qsl_test_cases = [
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
+]
+
+parse_qsl_test_cases_semicolon = [
(";", []),
(";;", []),
(";a=b", [('a', 'b')]),
("a=a+b;b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1;a=2", [('a', '1'), ('a', '2')]),
- (b";", []),
- (b";;", []),
- (b";a=b", [(b'a', b'b')]),
- (b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
- (b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
+]
+
+parse_qsl_test_cases_legacy = [
+ ("a=1;a=2&a=3", [('a', '1'), ('a', '2'), ('a', '3')]),
+ ("a=1;b=2&c=3", [('a', '1'), ('b', '2'), ('c', '3')]),
+ ("a=1&b=2&c=3;", [('a', '1'), ('b', '2'), ('c', '3')]),
+]
+
+parse_qsl_test_cases_warn = [
+ (";a=b", [(';a', 'b')]),
+ ("a=a+b;b=b+c", [('a', 'a b;b=b c')]),
+ (b";a=b", [(b';a', b'b')]),
+ (b"a=a+b;b=b+c", [(b'a', b'a b;b=b c')]),
+ ("a=1;a=2&a=3", [('a', '1;a=2'), ('a', '3')]),
+ (b"a=1;a=2&a=3", [(b'a', b'1;a=2'), (b'a', b'3')]),
]
parse_qs_test_cases = [
@@ -57,6 +76,9 @@ parse_qs_test_cases = [
(b"&a=b", {b'a': [b'b']}),
(b"a=a+b&b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1&a=2", {b'a': [b'1', b'2']}),
+]
+
+parse_qs_test_cases_semicolon = [
(";", {}),
(";;", {}),
(";a=b", {'a': ['b']}),
@@ -69,6 +91,24 @@ parse_qs_test_cases = [
(b"a=1;a=2", {b'a': [b'1', b'2']}),
]
+parse_qs_test_cases_legacy = [
+ ("a=1;a=2&a=3", {'a': ['1', '2', '3']}),
+ ("a=1;b=2&c=3", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
+ ("a=1&b=2&c=3;", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
+ (b"a=1;a=2&a=3", {b'a': [b'1', b'2', b'3']}),
+ (b"a=1;b=2&c=3", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
+ (b"a=1&b=2&c=3;", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
+]
+
+parse_qs_test_cases_warn = [
+ (";a=b", {';a': ['b']}),
+ ("a=a+b;b=b+c", {'a': ['a b;b=b c']}),
+ (b";a=b", {b';a': [b'b']}),
+ (b"a=a+b;b=b+c", {b'a':[ b'a b;b=b c']}),
+ ("a=1;a=2&a=3", {'a': ['1;a=2', '3']}),
+ (b"a=1;a=2&a=3", {b'a': [b'1;a=2', b'3']}),
+]
+
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
@@ -141,6 +181,40 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
+ def test_qs_default_warn(self):
+ for orig, expect in parse_qs_test_cases_warn:
+ with catch_warnings(record=True) as w:
+ filterwarnings(action='always',
+ category=urlparse._QueryStringSeparatorWarning)
+ result = urlparse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
+
+ def test_qsl_default_warn(self):
+ for orig, expect in parse_qsl_test_cases_warn:
+ with catch_warnings(record=True) as w:
+ filterwarnings(action='always',
+ category=urlparse._QueryStringSeparatorWarning)
+ result = urlparse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
+
+ def test_default_qs_no_warnings(self):
+ for orig, expect in parse_qs_test_cases:
+ with catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_default_qsl_no_warnings(self):
+ for orig, expect in parse_qsl_test_cases:
+ with catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
def test_roundtrips(self):
testcases = [
('file:///tmp/junk.txt',
@@ -626,6 +700,132 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
+ def test_parse_qs_separator_bytes(self):
+ expected = {b'a': [b'1'], b'b': [b'2']}
+
+ result = urlparse.parse_qs(b'a=1;b=2', separator=b';')
+ self.assertEqual(result, expected)
+ result = urlparse.parse_qs(b'a=1;b=2', separator=';')
+ self.assertEqual(result, expected)
+ result = urlparse.parse_qs('a=1;b=2', separator=';')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2']})
+
+ @contextlib.contextmanager
+ def _qsl_sep_config(self, sep):
+ """Context for the given parse_qsl default separator configured in config file"""
+ old_filename = urlparse._QS_SEPARATOR_CONFIG_FILENAME
+ urlparse._default_qs_separator = None
+ try:
+ tmpdirname = tempfile.mkdtemp()
+ filename = os.path.join(tmpdirname, 'conf.cfg')
+ with open(filename, 'w') as file:
+ file.write('[parse_qs]\n')
+ file.write('PYTHON_URLLIB_QS_SEPARATOR = {}'.format(sep))
+ urlparse._QS_SEPARATOR_CONFIG_FILENAME = filename
+ yield
+ finally:
+ urlparse._QS_SEPARATOR_CONFIG_FILENAME = old_filename
+ urlparse._default_qs_separator = None
+ shutil.rmtree(tmpdirname)
+
+ def test_parse_qs_separator_semicolon(self):
+ for orig, expect in parse_qs_test_cases_semicolon:
+ result = urlparse.parse_qs(orig, separator=';')
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qsl_separator_semicolon(self):
+ for orig, expect in parse_qsl_test_cases_semicolon:
+ result = urlparse.parse_qsl(orig, separator=';')
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qs_separator_legacy(self):
+ for orig, expect in parse_qs_test_cases_legacy:
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qs(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qsl_separator_legacy(self):
+ for orig, expect in parse_qsl_test_cases_legacy:
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
+ result = urlparse.parse_qsl(orig)
+ self.assertEqual(result, expect, "Error parsing %r" % orig)
+ self.assertEqual(len(w), 0)
+
+ def test_parse_qs_separator_bad_value_env_or_config(self):
+ for bad_sep in '', 'abc', 'safe', '&;', 'SEP':
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = bad_sep
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2')
+ with self._qsl_sep_config('bad_sep'), catch_warnings(record=True) as w:
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2')
+
+ def test_parse_qs_separator_bad_value_arg(self):
+ for bad_sep in True, {}, '':
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl('a=1;b=2', separator=bad_sep)
+
+ def test_parse_qs_separator_num_fields(self):
+ for qs, sep in (
+ ('a&b&c', '&'),
+ ('a;b;c', ';'),
+ ('a&b;c', 'legacy'),
+ ):
+ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
+ if sep != 'legacy':
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl(qs, separator=sep, max_num_fields=2)
+ if sep:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = sep
+ with self.assertRaises(ValueError):
+ urlparse.parse_qsl(qs, max_num_fields=2)
+
+ def test_parse_qs_separator_priority(self):
+ # env variable trumps config file
+ with self._qsl_sep_config('~'), EnvironmentVarGuard() as environ:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '!'
+ result = urlparse.parse_qs('a=1!b=2~c=3')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+ # argument trumps config file
+ with self._qsl_sep_config('~'):
+ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+ # argument trumps env variable
+ with EnvironmentVarGuard() as environ:
+ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '~'
+ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
+ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
+
def test_urlsplit_normalization(self):
# Certain characters should never occur in the netloc,
# including under normalization.
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index 798b467b605..69504d8fd93 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -29,6 +29,7 @@ test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
+import os
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
@@ -382,7 +383,8 @@ def unquote(s):
append(item)
return ''.join(res)
-def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None,
+ separator=None):
"""Parse a query given as a string argument.
Arguments:
@@ -405,14 +407,23 @@ def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing,
- max_num_fields):
+ max_num_fields, separator):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
+class _QueryStringSeparatorWarning(RuntimeWarning):
+ """Warning for using default `separator` in parse_qs or parse_qsl"""
+
+# The default "separator" for parse_qsl can be specified in a config file.
+# It's cached after first read.
+_QS_SEPARATOR_CONFIG_FILENAME = '/etc/python/urllib.cfg'
+_default_qs_separator = None
+
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None,
+ separator=None):
"""Parse a query given as a string argument.
Arguments:
@@ -434,15 +445,72 @@ def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
Returns a list, as G-d intended.
"""
+
+ if (not separator or (not isinstance(separator, (str, bytes)))) and separator is not None:
+ raise ValueError("Separator must be of type string or bytes.")
+
+ # Used when both "&" and ";" act as separators. (Need a non-string value.)
+ _legacy = object()
+
+ if separator is None:
+ global _default_qs_separator
+ separator = _default_qs_separator
+ envvar_name = 'PYTHON_URLLIB_QS_SEPARATOR'
+ if separator is None:
+ # Set default separator from environment variable
+ separator = os.environ.get(envvar_name)
+ config_source = 'environment variable'
+ if separator is None:
+ # Set default separator from the configuration file
+ try:
+ file = open(_QS_SEPARATOR_CONFIG_FILENAME)
+ except EnvironmentError:
+ pass
+ else:
+ with file:
+ import ConfigParser
+ config = ConfigParser.ConfigParser()
+ config.readfp(file)
+ separator = config.get('parse_qs', envvar_name)
+ _default_qs_separator = separator
+ config_source = _QS_SEPARATOR_CONFIG_FILENAME
+ if separator is None:
+ # The default is '&', but warn if not specified explicitly
+ if ';' in qs:
+ from warnings import warn
+ warn("The default separator of urlparse.parse_qsl and "
+ + "parse_qs was changed to '&' to avoid a web cache "
+ + "poisoning issue (CVE-2021-23336). "
+ + "By default, semicolons no longer act as query field "
+ + "separators. "
+ + "See https://access.redhat.com/articles/5860431 for "
+ + "more details.",
+ _QueryStringSeparatorWarning, stacklevel=2)
+ separator = '&'
+ elif separator == 'legacy':
+ separator = _legacy
+ elif len(separator) != 1:
+ raise ValueError(
+ '{} (from {}) must contain '.format(envvar_name, config_source)
+ + '1 character, or "legacy". See '
+ + 'https://access.redhat.com/articles/5860431 for more details.'
+ )
+
# If max_num_fields is defined then check that the number of fields
# is less than max_num_fields. This prevents a memory exhaustion DOS
# attack via post bodies with many fields.
if max_num_fields is not None:
- num_fields = 1 + qs.count('&') + qs.count(';')
+ if separator is _legacy:
+ num_fields = 1 + qs.count('&') + qs.count(';')
+ else:
+ num_fields = 1 + qs.count(separator)
if max_num_fields < num_fields:
raise ValueError('Max number of fields exceeded')
- pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+ if separator is _legacy:
+ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+ else:
+ pairs = [s1 for s1 in qs.split(separator)]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
--
2.30.2

35
00366-CVE-2021-3733.patch Normal file
View File

@ -0,0 +1,35 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Lumir Balhar <lbalhar@redhat.com>
Date: Tue, 14 Sep 2021 11:34:43 +0200
Subject: [PATCH] 00366-CVE-2021-3733.patch
00366 #
CVE-2021-3733: Fix ReDoS in urllib AbstractBasicAuthHandler
Fix Regular Expression Denial of Service (ReDoS) vulnerability in
urllib2.AbstractBasicAuthHandler. The ReDoS-vulnerable regex
has quadratic worst-case complexity and it allows cause a denial of
service when identifying crafted invalid RFCs. This ReDoS issue is on
the client side and needs remote attackers to control the HTTP server.
Backported from Python 3 together with another backward-compatible
improvement of the regex from fix for CVE-2020-8492.
Co-authored-by: Yeting Li <liyt@ios.ac.cn>
---
Lib/urllib2.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Lib/urllib2.py b/Lib/urllib2.py
index fd19e1ae943..e286583ecba 100644
--- a/Lib/urllib2.py
+++ b/Lib/urllib2.py
@@ -858,7 +858,7 @@ class AbstractBasicAuthHandler:
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
- rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
+ rx = re.compile('(?:[^,]*,)*[ \t]*([^ \t,]+)[ \t]+'
'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,

89
00368-CVE-2021-3737.patch Normal file
View File

@ -0,0 +1,89 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Lumir Balhar <lbalhar@redhat.com>
Date: Fri, 17 Sep 2021 07:56:50 +0200
Subject: [PATCH] 00368-CVE-2021-3737.patch
00368 #
CVE-2021-3737: http client infinite line reading (DoS) after a HTTP 100 Continue
Fixes http.client potential denial of service where it could get stuck reading
lines from a malicious server after a 100 Continue response.
Backported from Python 3.
Co-authored-by: Gregory P. Smith <greg@krypto.org>
Co-authored-by: Gen Xu <xgbarry@gmail.com>
---
Lib/httplib.py | 32 +++++++++++++++++++++++---------
Lib/test/test_httplib.py | 8 ++++++++
2 files changed, 31 insertions(+), 9 deletions(-)
diff --git a/Lib/httplib.py b/Lib/httplib.py
index a63677477d5..f9a27619e62 100644
--- a/Lib/httplib.py
+++ b/Lib/httplib.py
@@ -365,6 +365,25 @@ class HTTPMessage(mimetools.Message):
# It's not a header line; skip it and try the next line.
self.status = 'Non-header line where header expected'
+
+def _read_headers(fp):
+ """Reads potential header lines into a list from a file pointer.
+ Length of line is limited by _MAXLINE, and number of
+ headers is limited by _MAXHEADERS.
+ """
+ headers = []
+ while True:
+ line = fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ headers.append(line)
+ if len(headers) > _MAXHEADERS:
+ raise HTTPException("got more than %d headers" % _MAXHEADERS)
+ if line in (b'\r\n', b'\n', b''):
+ break
+ return headers
+
+
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
@@ -453,15 +472,10 @@ class HTTPResponse:
if status != CONTINUE:
break
# skip the header from the 100 response
- while True:
- skip = self.fp.readline(_MAXLINE + 1)
- if len(skip) > _MAXLINE:
- raise LineTooLong("header line")
- skip = skip.strip()
- if not skip:
- break
- if self.debuglevel > 0:
- print "header:", skip
+ skipped_headers = _read_headers(self.fp)
+ if self.debuglevel > 0:
+ print("headers:", skipped_headers)
+ del skipped_headers
self.status = status
self.reason = reason.strip()
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index b5fec9aa1ec..d05c0fc28d2 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -700,6 +700,14 @@ class BasicTest(TestCase):
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises(httplib.LineTooLong, resp.begin)
+ def test_overflowing_header_limit_after_100(self):
+ body = (
+ 'HTTP/1.1 100 OK\r\n'
+ 'r\n' * 32768
+ )
+ resp = httplib.HTTPResponse(FakeSocket(body))
+ self.assertRaises(httplib.HTTPException, resp.begin)
+
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'

80
00372-CVE-2021-4189.patch Normal file
View File

@ -0,0 +1,80 @@
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
index 6644554..0550f0a 100644
--- a/Lib/ftplib.py
+++ b/Lib/ftplib.py
@@ -108,6 +108,8 @@ class FTP:
file = None
welcome = None
passiveserver = 1
+ # Disables https://bugs.python.org/issue43285 security if set to True.
+ trust_server_pasv_ipv4_address = False
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
@@ -310,8 +312,13 @@ class FTP:
return sock
def makepasv(self):
+ """Internal: Does the PASV or EPSV handshake -> (address, port)"""
if self.af == socket.AF_INET:
- host, port = parse227(self.sendcmd('PASV'))
+ untrusted_host, port = parse227(self.sendcmd('PASV'))
+ if self.trust_server_pasv_ipv4_address:
+ host = untrusted_host
+ else:
+ host = self.sock.getpeername()[0]
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py
index 8a3eb06..62a3f5e 100644
--- a/Lib/test/test_ftplib.py
+++ b/Lib/test/test_ftplib.py
@@ -67,6 +67,10 @@ class DummyFTPHandler(asynchat.async_chat):
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
+ # We use this as the string IPv4 address to direct the client
+ # to in response to a PASV command. To test security behavior.
+ # https://bugs.python.org/issue43285/.
+ self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
@@ -109,7 +113,8 @@ class DummyFTPHandler(asynchat.async_chat):
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
- ip, port = sock.getsockname()[:2]
+ port = sock.getsockname()[1]
+ ip = self.fake_pasv_server_ip
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
@@ -577,6 +582,26 @@ class TestFTPClass(TestCase):
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
+ def test_makepasv_issue43285_security_disabled(self):
+ """Test the opt-in to the old vulnerable behavior."""
+ self.client.trust_server_pasv_ipv4_address = True
+ bad_host, port = self.client.makepasv()
+ self.assertEqual(
+ bad_host, self.server.handler_instance.fake_pasv_server_ip)
+ # Opening and closing a connection keeps the dummy server happy
+ # instead of timing out on accept.
+ socket.create_connection((self.client.sock.getpeername()[0], port),
+ timeout=TIMEOUT).close()
+
+ def test_makepasv_issue43285_security_enabled_default(self):
+ self.assertFalse(self.client.trust_server_pasv_ipv4_address)
+ trusted_host, port = self.client.makepasv()
+ self.assertNotEqual(
+ trusted_host, self.server.handler_instance.fake_pasv_server_ip)
+ # Opening and closing a connection keeps the dummy server happy
+ # instead of timing out on accept.
+ socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
+
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)

127
00377-CVE-2022-0391.patch Normal file
View File

@ -0,0 +1,127 @@
diff --git a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst
index 97d1119257c..c08c3dc8e8f 100644
--- a/Doc/library/urlparse.rst
+++ b/Doc/library/urlparse.rst
@@ -125,6 +125,9 @@ The :mod:`urlparse` module defines the following functions:
decomposed before parsing, or is not a Unicode string, no error will be
raised.
+ Following the `WHATWG spec`_ that updates RFC 3986, ASCII newline
+ ``\n``, ``\r`` and tab ``\t`` characters are stripped from the URL.
+
.. versionchanged:: 2.5
Added attributes to return value.
@@ -321,6 +324,10 @@ The :mod:`urlparse` module defines the following functions:
.. seealso::
+ `WHATWG`_ - URL Living standard
+ Working Group for the URL Standard that defines URLs, domains, IP addresses, the
+ application/x-www-form-urlencoded format, and their API.
+
:rfc:`3986` - Uniform Resource Identifiers
This is the current standard (STD66). Any changes to urlparse module
should conform to this. Certain deviations could be observed, which are
@@ -345,6 +352,7 @@ The :mod:`urlparse` module defines the following functions:
:rfc:`1738` - Uniform Resource Locators (URL)
This specifies the formal syntax and semantics of absolute URLs.
+.. _WHATWG: https://url.spec.whatwg.org/
.. _urlparse-result-object:
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 21875bb2991..16eefed56f6 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -618,6 +618,55 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p1.path, '863-1234')
self.assertEqual(p1.params, 'phone-context=+1-914-555')
+ def test_urlsplit_remove_unsafe_bytes(self):
+ # Remove ASCII tabs and newlines from input, for http common case scenario.
+ url = "h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "www.python.org")
+ self.assertEqual(p.path, "/javascript:alert('msg')/")
+ self.assertEqual(p.query, "query=something")
+ self.assertEqual(p.fragment, "fragment")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), "http://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Remove ASCII tabs and newlines from input as bytes, for http common case scenario.
+ url = b"h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, b"http")
+ self.assertEqual(p.netloc, b"www.python.org")
+ self.assertEqual(p.path, b"/javascript:alert('msg')/")
+ self.assertEqual(p.query, b"query=something")
+ self.assertEqual(p.fragment, b"fragment")
+ self.assertEqual(p.username, None)
+ self.assertEqual(p.password, None)
+ self.assertEqual(p.hostname, b"www.python.org")
+ self.assertEqual(p.port, None)
+ self.assertEqual(p.geturl(), b"http://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # any scheme
+ url = "x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.geturl(), "x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Remove ASCII tabs and newlines from input as bytes, any scheme.
+ url = b"x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.geturl(), b"x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+ # Unsafe bytes is not returned from urlparse cache.
+ # scheme is stored after parsing, sending an scheme with unsafe bytes *will not* return an unsafe scheme
+ url = "https://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment"
+ scheme = "htt\nps"
+ for _ in range(2):
+ p = urlparse.urlsplit(url, scheme=scheme)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.geturl(), "https://www.python.org/javascript:alert('msg')/?query=something#fragment")
+
+
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index 69504d8fd93..6cc40a8d2fb 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -63,6 +63,9 @@ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'0123456789'
'+-.')
+# Unsafe bytes to be removed per WHATWG spec
+_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
+
MAX_CACHE_SIZE = 20
_parse_cache = {}
@@ -185,12 +188,19 @@ def _checknetloc(netloc):
"under NFKC normalization"
% netloc)
+def _remove_unsafe_bytes_from_url(url):
+ for b in _UNSAFE_URL_BYTES_TO_REMOVE:
+ url = url.replace(b, "")
+ return url
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
+ url = _remove_unsafe_bytes_from_url(url)
+ scheme = _remove_unsafe_bytes_from_url(scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)

View File

@ -0,0 +1,94 @@
From 35f5707b555d3bca92858de16760918e76463a1e Mon Sep 17 00:00:00 2001
From: Sebastian Pipping <sebastian@pipping.org>
Date: Mon, 21 Feb 2022 15:48:32 +0100
Subject: [PATCH] 00378-support-expat-2-4-5.patch
00378 #
Support expat 2.4.5
Curly brackets were never allowed in namespace URIs
according to RFC 3986, and so-called namespace-validating
XML parsers have the right to reject them a invalid URIs.
libexpat >=2.4.5 has become strcter in that regard due to
related security issues; with ET.XML instantiating a
namespace-aware parser under the hood, this test has no
future in CPython.
References:
- https://datatracker.ietf.org/doc/html/rfc3968
- https://www.w3.org/TR/xml-names/
Also, test_minidom.py: Support Expat >=2.4.5
Upstream: https://bugs.python.org/issue46811
Backported from Python 3.
Co-authored-by: Sebastian Pipping <sebastian@pipping.org>
---
Lib/test/test_minidom.py | 8 ++++++--
Lib/test/test_xml_etree.py | 6 ------
.../next/Library/2022-02-20-21-03-31.bpo-46811.8BxgdQ.rst | 1 +
3 files changed, 7 insertions(+), 8 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2022-02-20-21-03-31.bpo-46811.8BxgdQ.rst
diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py
index 2eb6423..2c9a7a3 100644
--- a/Lib/test/test_minidom.py
+++ b/Lib/test/test_minidom.py
@@ -6,12 +6,14 @@ from StringIO import StringIO
from test import support
import unittest
+import pyexpat
import xml.dom
import xml.dom.minidom
import xml.parsers.expat
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
+from xml.parsers.expat import ExpatError
tstfile = support.findfile("test.xml", subdir="xmltestdata")
@@ -1051,8 +1053,10 @@ class MinidomTest(unittest.TestCase):
# Verify that character decoding errors raise exceptions instead
# of crashing
- self.assertRaises(UnicodeDecodeError, parseString,
- '<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
+ self.assertRaises(ExpatError, parseString,
+ '<fran\xe7ais></fran\xe7ais>')
+ self.assertRaises(ExpatError, parseString,
+ '<franais>Comment \xe7a va ? Tr\xe8s bien ?</franais>')
doc.unlink()
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index c75d55f..0855bc0 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -1482,12 +1482,6 @@ class BugsTest(unittest.TestCase):
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>t&#227;g</body>')
- def test_issue3151(self):
- e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
- self.assertEqual(e.tag, '{${stuff}}localname')
- t = ET.ElementTree(e)
- self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
-
def test_issue6565(self):
elem = ET.XML("<body><tag/></body>")
self.assertEqual(summarize_list(elem), ['tag'])
diff --git a/Misc/NEWS.d/next/Library/2022-02-20-21-03-31.bpo-46811.8BxgdQ.rst b/Misc/NEWS.d/next/Library/2022-02-20-21-03-31.bpo-46811.8BxgdQ.rst
new file mode 100644
index 0000000..6969bd1
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2022-02-20-21-03-31.bpo-46811.8BxgdQ.rst
@@ -0,0 +1 @@
+Make test suite support Expat >=2.4.5
--
2.35.1

440
00382-cve-2015-20107.patch Normal file
View File

@ -0,0 +1,440 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <encukou@gmail.com>
Date: Fri, 3 Jun 2022 11:43:35 +0200
Subject: [PATCH] 00382-cve-2015-20107.patch
00382 #
Make mailcap refuse to match unsafe filenames/types/params (GH-91993)
Upstream: https://github.com/python/cpython/issues/68966
Tracker bug: https://bugzilla.redhat.com/show_bug.cgi?id=2075390
Backported from python3.
---
Doc/library/mailcap.rst | 12 +
Lib/mailcap.py | 29 +-
Lib/test/mailcap.txt | 39 +++
Lib/test/test_mailcap.py | 259 ++++++++++++++++++
...2-04-27-18-25-30.gh-issue-68966.gjS8zs.rst | 4 +
5 files changed, 341 insertions(+), 2 deletions(-)
create mode 100644 Lib/test/mailcap.txt
create mode 100644 Lib/test/test_mailcap.py
create mode 100644 Misc/NEWS.d/next/Security/2022-04-27-18-25-30.gh-issue-68966.gjS8zs.rst
diff --git a/Doc/library/mailcap.rst b/Doc/library/mailcap.rst
index 750d085796f..5f75ee6086e 100644
--- a/Doc/library/mailcap.rst
+++ b/Doc/library/mailcap.rst
@@ -54,6 +54,18 @@ standard. However, mailcap files are supported on most Unix systems.
use) to determine whether or not the mailcap line applies. :func:`findmatch`
will automatically check such conditions and skip the entry if the check fails.
+ .. versionchanged:: 3.11
+
+ To prevent security issues with shell metacharacters (symbols that have
+ special effects in a shell command line), ``findmatch`` will refuse
+ to inject ASCII characters other than alphanumerics and ``@+=:,./-_``
+ into the returned command line.
+
+ If a disallowed character appears in *filename*, ``findmatch`` will always
+ return ``(None, None)`` as if no entry was found.
+ If such a character appears elsewhere (a value in *plist* or in *MIMEtype*),
+ ``findmatch`` will ignore all mailcap entries which use that value.
+ A :mod:`warning <warnings>` will be raised in either case.
.. function:: getcaps()
diff --git a/Lib/mailcap.py b/Lib/mailcap.py
index 04077ba0db2..1108b447b1d 100644
--- a/Lib/mailcap.py
+++ b/Lib/mailcap.py
@@ -1,9 +1,18 @@
"""Mailcap file handling. See RFC 1524."""
import os
+import warnings
+import re
__all__ = ["getcaps","findmatch"]
+
+_find_unsafe = re.compile(r'[^\xa1-\xff\w@+=:,./-]').search
+
+class UnsafeMailcapInput(Warning):
+ """Warning raised when refusing unsafe input"""
+
+
# Part 1: top-level interface.
def getcaps():
@@ -144,15 +153,22 @@ def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
entry to use.
"""
+ if _find_unsafe(filename):
+ msg = "Refusing to use mailcap with filename %r. Use a safe temporary filename." % (filename,)
+ warnings.warn(msg, UnsafeMailcapInput)
+ return None, None
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
+ if test is None:
+ continue
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
- return command, e
+ if command is not None:
+ return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
@@ -184,6 +200,10 @@ def subst(field, MIMEtype, filename, plist=[]):
elif c == 's':
res = res + filename
elif c == 't':
+ if _find_unsafe(MIMEtype):
+ msg = "Refusing to substitute MIME type %r into a shell command." % (MIMEtype,)
+ warnings.warn(msg, UnsafeMailcapInput)
+ return None
res = res + MIMEtype
elif c == '{':
start = i
@@ -191,7 +211,12 @@ def subst(field, MIMEtype, filename, plist=[]):
i = i+1
name = field[start:i]
i = i+1
- res = res + findparam(name, plist)
+ param = findparam(name, plist)
+ if _find_unsafe(param):
+ msg = "Refusing to substitute parameter %r (%s) into a shell command" % (param, name)
+ warnings.warn(msg, UnsafeMailcapInput)
+ return None
+ res = res + param
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
diff --git a/Lib/test/mailcap.txt b/Lib/test/mailcap.txt
new file mode 100644
index 00000000000..08a76e65941
--- /dev/null
+++ b/Lib/test/mailcap.txt
@@ -0,0 +1,39 @@
+# Mailcap file for test_mailcap; based on RFC 1524
+# Referred to by test_mailcap.py
+
+#
+# This is a comment.
+#
+
+application/frame; showframe %s; print="cat %s | lp"
+application/postscript; ps-to-terminal %s;\
+ needsterminal
+application/postscript; ps-to-terminal %s; \
+ compose=idraw %s
+application/x-dvi; xdvi %s
+application/x-movie; movieplayer %s; compose=moviemaker %s; \
+ description="Movie"; \
+ x11-bitmap="/usr/lib/Zmail/bitmaps/movie.xbm"
+application/*; echo "This is \"%t\" but \
+ is 50 \% Greek to me" \; cat %s; copiousoutput
+
+audio/basic; showaudio %s; compose=audiocompose %s; edit=audiocompose %s;\
+description="An audio fragment"
+audio/* ; /usr/local/bin/showaudio %t
+
+image/rgb; display %s
+#image/gif; display %s
+image/x-xwindowdump; display %s
+
+# The continuation char shouldn't \
+# make a difference in a comment.
+
+message/external-body; showexternal %s %{access-type} %{name} %{site} \
+ %{directory} %{mode} %{server}; needsterminal; composetyped = extcompose %s; \
+ description="A reference to data stored in an external location"
+
+text/richtext; shownonascii iso-8859-8 -e richtext -p %s; test=test "`echo \
+ %{charset} | tr '[A-Z]' '[a-z]'`" = iso-8859-8; copiousoutput
+
+video/*; animate %s
+video/mpeg; mpeg_play %s
\ No newline at end of file
diff --git a/Lib/test/test_mailcap.py b/Lib/test/test_mailcap.py
new file mode 100644
index 00000000000..35da7fb0741
--- /dev/null
+++ b/Lib/test/test_mailcap.py
@@ -0,0 +1,259 @@
+import copy
+import os
+import sys
+import test.support
+import unittest
+from test import support as os_helper
+from test import support as warnings_helper
+from collections import OrderedDict
+
+import mailcap
+
+
+# Location of mailcap file
+MAILCAPFILE = test.support.findfile("mailcap.txt")
+
+# Dict to act as mock mailcap entry for this test
+# The keys and values should match the contents of MAILCAPFILE
+
+MAILCAPDICT = {
+ 'application/x-movie':
+ [{'compose': 'moviemaker %s',
+ 'x11-bitmap': '"/usr/lib/Zmail/bitmaps/movie.xbm"',
+ 'description': '"Movie"',
+ 'view': 'movieplayer %s',
+ 'lineno': 4}],
+ 'application/*':
+ [{'copiousoutput': '',
+ 'view': 'echo "This is \\"%t\\" but is 50 \\% Greek to me" \\; cat %s',
+ 'lineno': 5}],
+ 'audio/basic':
+ [{'edit': 'audiocompose %s',
+ 'compose': 'audiocompose %s',
+ 'description': '"An audio fragment"',
+ 'view': 'showaudio %s',
+ 'lineno': 6}],
+ 'video/mpeg':
+ [{'view': 'mpeg_play %s', 'lineno': 13}],
+ 'application/postscript':
+ [{'needsterminal': '', 'view': 'ps-to-terminal %s', 'lineno': 1},
+ {'compose': 'idraw %s', 'view': 'ps-to-terminal %s', 'lineno': 2}],
+ 'application/x-dvi':
+ [{'view': 'xdvi %s', 'lineno': 3}],
+ 'message/external-body':
+ [{'composetyped': 'extcompose %s',
+ 'description': '"A reference to data stored in an external location"',
+ 'needsterminal': '',
+ 'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
+ 'lineno': 10}],
+ 'text/richtext':
+ [{'test': 'test "`echo %{charset} | tr \'[A-Z]\' \'[a-z]\'`" = iso-8859-8',
+ 'copiousoutput': '',
+ 'view': 'shownonascii iso-8859-8 -e richtext -p %s',
+ 'lineno': 11}],
+ 'image/x-xwindowdump':
+ [{'view': 'display %s', 'lineno': 9}],
+ 'audio/*':
+ [{'view': '/usr/local/bin/showaudio %t', 'lineno': 7}],
+ 'video/*':
+ [{'view': 'animate %s', 'lineno': 12}],
+ 'application/frame':
+ [{'print': '"cat %s | lp"', 'view': 'showframe %s', 'lineno': 0}],
+ 'image/rgb':
+ [{'view': 'display %s', 'lineno': 8}]
+}
+
+# In Python 2, mailcap doesn't return line numbers.
+# This test suite is copied from Python 3.11; for easier backporting we keep
+# data from there and remove the lineno.
+# So, for Python 2, MAILCAPDICT_DEPRECATED is the same as MAILCAPDICT
+MAILCAPDICT_DEPRECATED = MAILCAPDICT
+for entry_list in MAILCAPDICT_DEPRECATED.values():
+ for entry in entry_list:
+ entry.pop('lineno')
+
+
+class HelperFunctionTest(unittest.TestCase):
+
+ def test_listmailcapfiles(self):
+ # The return value for listmailcapfiles() will vary by system.
+ # So verify that listmailcapfiles() returns a list of strings that is of
+ # non-zero length.
+ mcfiles = mailcap.listmailcapfiles()
+ self.assertIsInstance(mcfiles, list)
+ for m in mcfiles:
+ self.assertIsInstance(m, str)
+ with os_helper.EnvironmentVarGuard() as env:
+ # According to RFC 1524, if MAILCAPS env variable exists, use that
+ # and only that.
+ if "MAILCAPS" in env:
+ env_mailcaps = env["MAILCAPS"].split(os.pathsep)
+ else:
+ env_mailcaps = ["/testdir1/.mailcap", "/testdir2/mailcap"]
+ env["MAILCAPS"] = os.pathsep.join(env_mailcaps)
+ mcfiles = mailcap.listmailcapfiles()
+ self.assertEqual(env_mailcaps, mcfiles)
+
+ def test_readmailcapfile(self):
+ # Test readmailcapfile() using test file. It should match MAILCAPDICT.
+ with open(MAILCAPFILE, 'r') as mcf:
+ d = mailcap.readmailcapfile(mcf)
+ self.assertDictEqual(d, MAILCAPDICT_DEPRECATED)
+
+ def test_lookup(self):
+ # Test without key
+
+ # In Python 2, 'video/mpeg' is tried before 'video/*'
+ # (unfixed bug: https://github.com/python/cpython/issues/59182 )
+ # So, these are in reverse order:
+ expected = [{'view': 'mpeg_play %s', },
+ {'view': 'animate %s', }]
+ actual = mailcap.lookup(MAILCAPDICT, 'video/mpeg')
+ self.assertListEqual(expected, actual)
+
+ # Test with key
+ key = 'compose'
+ expected = [{'edit': 'audiocompose %s',
+ 'compose': 'audiocompose %s',
+ 'description': '"An audio fragment"',
+ 'view': 'showaudio %s',
+ }]
+ actual = mailcap.lookup(MAILCAPDICT, 'audio/basic', key)
+ self.assertListEqual(expected, actual)
+
+ # Test on user-defined dicts without line numbers
+ expected = [{'view': 'mpeg_play %s'}, {'view': 'animate %s'}]
+ actual = mailcap.lookup(MAILCAPDICT_DEPRECATED, 'video/mpeg')
+ self.assertListEqual(expected, actual)
+
+ def test_subst(self):
+ plist = ['id=1', 'number=2', 'total=3']
+ # test case: ([field, MIMEtype, filename, plist=[]], <expected string>)
+ test_cases = [
+ (["", "audio/*", "foo.txt"], ""),
+ (["echo foo", "audio/*", "foo.txt"], "echo foo"),
+ (["echo %s", "audio/*", "foo.txt"], "echo foo.txt"),
+ (["echo %t", "audio/*", "foo.txt"], None),
+ (["echo %t", "audio/wav", "foo.txt"], "echo audio/wav"),
+ (["echo \\%t", "audio/*", "foo.txt"], "echo %t"),
+ (["echo foo", "audio/*", "foo.txt", plist], "echo foo"),
+ (["echo %{total}", "audio/*", "foo.txt", plist], "echo 3")
+ ]
+ for tc in test_cases:
+ self.assertEqual(mailcap.subst(*tc[0]), tc[1])
+
+class GetcapsTest(unittest.TestCase):
+
+ def test_mock_getcaps(self):
+ # Test mailcap.getcaps() using mock mailcap file in this dir.
+ # Temporarily override any existing system mailcap file by pointing the
+ # MAILCAPS environment variable to our mock file.
+ with os_helper.EnvironmentVarGuard() as env:
+ env["MAILCAPS"] = MAILCAPFILE
+ caps = mailcap.getcaps()
+ self.assertDictEqual(caps, MAILCAPDICT)
+
+ def test_system_mailcap(self):
+ # Test mailcap.getcaps() with mailcap file(s) on system, if any.
+ caps = mailcap.getcaps()
+ self.assertIsInstance(caps, dict)
+ mailcapfiles = mailcap.listmailcapfiles()
+ existingmcfiles = [mcf for mcf in mailcapfiles if os.path.exists(mcf)]
+ if existingmcfiles:
+ # At least 1 mailcap file exists, so test that.
+ for (k, v) in caps.items():
+ self.assertIsInstance(k, str)
+ self.assertIsInstance(v, list)
+ for e in v:
+ self.assertIsInstance(e, dict)
+ else:
+ # No mailcap files on system. getcaps() should return empty dict.
+ self.assertEqual({}, caps)
+
+
+class FindmatchTest(unittest.TestCase):
+
+ def test_findmatch(self):
+
+ # default findmatch arguments
+ c = MAILCAPDICT
+ fname = "foo.txt"
+ plist = ["access-type=default", "name=john", "site=python.org",
+ "directory=/tmp", "mode=foo", "server=bar"]
+ audio_basic_entry = {
+ 'edit': 'audiocompose %s',
+ 'compose': 'audiocompose %s',
+ 'description': '"An audio fragment"',
+ 'view': 'showaudio %s',
+ }
+ audio_entry = {"view": "/usr/local/bin/showaudio %t", }
+ video_entry = {'view': 'animate %s', }
+ mpeg_entry = {'view': 'mpeg_play %s', }
+ message_entry = {
+ 'composetyped': 'extcompose %s',
+ 'description': '"A reference to data stored in an external location"', 'needsterminal': '',
+ 'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
+ }
+
+ # test case: (findmatch args, findmatch keyword args, expected output)
+ # positional args: caps, MIMEtype
+ # keyword args: key="view", filename="/dev/null", plist=[]
+ # output: (command line, mailcap entry)
+ cases = [
+ ([{}, "video/mpeg"], {}, (None, None)),
+ ([c, "foo/bar"], {}, (None, None)),
+
+ # In Python 2, 'video/mpeg' is tried before 'video/*'
+ # (unfixed bug: https://github.com/python/cpython/issues/59182 )
+ #([c, "video/mpeg"], {}, ('animate /dev/null', video_entry)),
+ ([c, "video/mpeg"], {}, ('mpeg_play /dev/null', mpeg_entry)),
+
+ ([c, "audio/basic", "edit"], {}, ("audiocompose /dev/null", audio_basic_entry)),
+ ([c, "audio/basic", "compose"], {}, ("audiocompose /dev/null", audio_basic_entry)),
+ ([c, "audio/basic", "description"], {}, ('"An audio fragment"', audio_basic_entry)),
+ ([c, "audio/basic", "foobar"], {}, (None, None)),
+ ([c, "video/*"], {"filename": fname}, ("animate %s" % fname, video_entry)),
+ ([c, "audio/basic", "compose"],
+ {"filename": fname},
+ ("audiocompose %s" % fname, audio_basic_entry)),
+ ([c, "audio/basic"],
+ {"key": "description", "filename": fname},
+ ('"An audio fragment"', audio_basic_entry)),
+ ([c, "audio/*"],
+ {"filename": fname},
+ (None, None)),
+ ([c, "audio/wav"],
+ {"filename": fname},
+ ("/usr/local/bin/showaudio audio/wav", audio_entry)),
+ ([c, "message/external-body"],
+ {"plist": plist},
+ ("showexternal /dev/null default john python.org /tmp foo bar", message_entry))
+ ]
+ self._run_cases(cases)
+
+ @unittest.skipUnless(os.name == "posix", "Requires 'test' command on system")
+ @unittest.skipIf(sys.platform == "vxworks", "'test' command is not supported on VxWorks")
+ def test_test(self):
+ # findmatch() will automatically check any "test" conditions and skip
+ # the entry if the check fails.
+ caps = {"test/pass": [{"test": "test 1 -eq 1"}],
+ "test/fail": [{"test": "test 1 -eq 0"}]}
+ # test case: (findmatch args, findmatch keyword args, expected output)
+ # positional args: caps, MIMEtype, key ("test")
+ # keyword args: N/A
+ # output: (command line, mailcap entry)
+ cases = [
+ # findmatch will return the mailcap entry for test/pass because it evaluates to true
+ ([caps, "test/pass", "test"], {}, ("test 1 -eq 1", {"test": "test 1 -eq 1"})),
+ # findmatch will return None because test/fail evaluates to false
+ ([caps, "test/fail", "test"], {}, (None, None))
+ ]
+ self._run_cases(cases)
+
+ def _run_cases(self, cases):
+ for c in cases:
+ self.assertEqual(mailcap.findmatch(*c[0], **c[1]), c[2])
+
+
+def test_main():
+ test.support.run_unittest(HelperFunctionTest, GetcapsTest, FindmatchTest)
diff --git a/Misc/NEWS.d/next/Security/2022-04-27-18-25-30.gh-issue-68966.gjS8zs.rst b/Misc/NEWS.d/next/Security/2022-04-27-18-25-30.gh-issue-68966.gjS8zs.rst
new file mode 100644
index 00000000000..da81a1f6993
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2022-04-27-18-25-30.gh-issue-68966.gjS8zs.rst
@@ -0,0 +1,4 @@
+The deprecated mailcap module now refuses to inject unsafe text (filenames,
+MIME types, parameters) into shell commands. Instead of using such text, it
+will warn and act as if a match was not found (or for test commands, as if
+the test failed).

View File

@ -0,0 +1,118 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: "Miss Islington (bot)"
<31488909+miss-islington@users.noreply.github.com>
Date: Mon, 7 Nov 2022 19:22:14 -0800
Subject: [PATCH]
00394-cve-2022-45061-cpu-denial-of-service-via-inefficient-idna-decoder.patch
00394 #
gh-98433: Fix quadratic time idna decoding.
There was an unnecessary quadratic loop in idna decoding. This restores
the behavior to linear.
Backported from python3.
(cherry picked from commit a6f6c3a3d6f2b580f2d87885c9b8a9350ad7bf15)
Co-authored-by: Miss Islington (bot) <31488909+miss-islington@users.noreply.github.com>
Co-authored-by: Gregory P. Smith <greg@krypto.org>
---
Lib/encodings/idna.py | 32 +++++++++----------
Lib/test/test_codecs.py | 6 ++++
...2-11-04-09-29-36.gh-issue-98433.l76c5G.rst | 6 ++++
3 files changed, 27 insertions(+), 17 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2022-11-04-09-29-36.gh-issue-98433.l76c5G.rst
diff --git a/Lib/encodings/idna.py b/Lib/encodings/idna.py
index ea90d67142f..2ce798cf47e 100644
--- a/Lib/encodings/idna.py
+++ b/Lib/encodings/idna.py
@@ -39,23 +39,21 @@ def nameprep(label):
# Check bidi
RandAL = map(stringprep.in_table_d1, label)
- for c in RandAL:
- if c:
- # There is a RandAL char in the string. Must perform further
- # tests:
- # 1) The characters in section 5.8 MUST be prohibited.
- # This is table C.8, which was already checked
- # 2) If a string contains any RandALCat character, the string
- # MUST NOT contain any LCat character.
- if filter(stringprep.in_table_d2, label):
- raise UnicodeError("Violation of BIDI requirement 2")
-
- # 3) If a string contains any RandALCat character, a
- # RandALCat character MUST be the first character of the
- # string, and a RandALCat character MUST be the last
- # character of the string.
- if not RandAL[0] or not RandAL[-1]:
- raise UnicodeError("Violation of BIDI requirement 3")
+ if any(RandAL):
+ # There is a RandAL char in the string. Must perform further
+ # tests:
+ # 1) The characters in section 5.8 MUST be prohibited.
+ # This is table C.8, which was already checked
+ # 2) If a string contains any RandALCat character, the string
+ # MUST NOT contain any LCat character.
+ if any(stringprep.in_table_d2(x) for x in label):
+ raise UnicodeError("Violation of BIDI requirement 2")
+ # 3) If a string contains any RandALCat character, a
+ # RandALCat character MUST be the first character of the
+ # string, and a RandALCat character MUST be the last
+ # character of the string.
+ if not RandAL[0] or not RandAL[-1]:
+ raise UnicodeError("Violation of BIDI requirement 3")
return label
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 0ec8bf5a4b4..76428e1794a 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1318,6 +1318,12 @@ class IDNACodecTest(unittest.TestCase):
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
+ def test_builtin_decode_length_limit(self):
+ with self.assertRaisesRegexp(UnicodeError, "too long"):
+ (b"xn--016c"+b"a"*1100).decode("idna")
+ with self.assertRaisesRegexp(UnicodeError, "too long"):
+ (b"xn--016c"+b"a"*70).decode("idna")
+
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
diff --git a/Misc/NEWS.d/next/Security/2022-11-04-09-29-36.gh-issue-98433.l76c5G.rst b/Misc/NEWS.d/next/Security/2022-11-04-09-29-36.gh-issue-98433.l76c5G.rst
new file mode 100644
index 00000000000..5185fac2e29
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2022-11-04-09-29-36.gh-issue-98433.l76c5G.rst
@@ -0,0 +1,6 @@
+The IDNA codec decoder used on DNS hostnames by :mod:`socket` or :mod:`asyncio`
+related name resolution functions no longer involves a quadratic algorithm.
+This prevents a potential CPU denial of service if an out-of-spec excessive
+length hostname involving bidirectional characters were decoded. Some protocols
+such as :mod:`urllib` http ``3xx`` redirects potentially allow for an attacker
+to supply such a name.
diff -urNp a/Lib/encodings/idna.py b/Lib/encodings/idna.py
--- a/Lib/encodings/idna.py 2023-02-16 08:58:06.884171667 +0100
+++ b/Lib/encodings/idna.py 2023-02-16 08:59:31.931296399 +0100
@@ -101,6 +101,16 @@ def ToASCII(label):
raise UnicodeError("label empty or too long")
def ToUnicode(label):
+ if len(label) > 1024:
+ # Protection from https://github.com/python/cpython/issues/98433.
+ # https://datatracker.ietf.org/doc/html/rfc5894#section-6
+ # doesn't specify a label size limit prior to NAMEPREP. But having
+ # one makes practical sense.
+ # This leaves ample room for nameprep() to remove Nothing characters
+ # per https://www.rfc-editor.org/rfc/rfc3454#section-3.1 while still
+ # preventing us from wasting time decoding a big thing that'll just
+ # hit the actual <= 63 length limit in Step 6.
+ raise UnicodeError("label way too long")
# Step 1: Check for ASCII
if isinstance(label, str):
pure_ascii = True

127
00399-cve-2023-24329.patch Normal file
View File

@ -0,0 +1,127 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Lumir Balhar <lbalhar@redhat.com>
Date: Thu, 25 May 2023 10:03:57 +0200
Subject: [PATCH] 00399-cve-2023-24329.patch
00399 #
* gh-102153: Start stripping C0 control and space chars in `urlsplit` (GH-102508)
`urllib.parse.urlsplit` has already been respecting the WHATWG spec a bit GH-25595.
This adds more sanitizing to respect the "Remove any leading C0 control or space from input" [rule](https://url.spec.whatwg.org/GH-url-parsing:~:text=Remove%20any%20leading%20and%20trailing%20C0%20control%20or%20space%20from%20input.) in response to [CVE-2023-24329](https://nvd.nist.gov/vuln/detail/CVE-2023-24329).
Backported to Python 2 from Python 3.12.
Co-authored-by: Illia Volochii <illia.volochii@gmail.com>
Co-authored-by: Gregory P. Smith [Google] <greg@krypto.org>
Co-authored-by: Lumir Balhar <lbalhar@redhat.com>
---
Lib/test/test_urlparse.py | 57 +++++++++++++++++++++++++++++++++++++++
Lib/urlparse.py | 10 +++++++
2 files changed, 67 insertions(+)
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 16eefed56f6..419e9c2bdcc 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -666,7 +666,64 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p.scheme, "https")
self.assertEqual(p.geturl(), "https://www.python.org/javascript:alert('msg')/?query=something#fragment")
+ def test_urlsplit_strip_url(self):
+ noise = "".join([chr(i) for i in range(0, 0x20 + 1)])
+ base_url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
+ url = noise.decode("utf-8") + base_url
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, "http")
+ self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "query=yes")
+ self.assertEqual(p.fragment, "frag")
+ self.assertEqual(p.username, "User")
+ self.assertEqual(p.password, "Pass")
+ self.assertEqual(p.hostname, "www.python.org")
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), base_url)
+
+ url = noise + base_url.encode("utf-8")
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.scheme, b"http")
+ self.assertEqual(p.netloc, b"User:Pass@www.python.org:080")
+ self.assertEqual(p.path, b"/doc/")
+ self.assertEqual(p.query, b"query=yes")
+ self.assertEqual(p.fragment, b"frag")
+ self.assertEqual(p.username, b"User")
+ self.assertEqual(p.password, b"Pass")
+ self.assertEqual(p.hostname, b"www.python.org")
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), base_url.encode("utf-8"))
+
+ # Test that trailing space is preserved as some applications rely on
+ # this within query strings.
+ query_spaces_url = "https://www.python.org:88/doc/?query= "
+ p = urlparse.urlsplit(noise.decode("utf-8") + query_spaces_url)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.netloc, "www.python.org:88")
+ self.assertEqual(p.path, "/doc/")
+ self.assertEqual(p.query, "query= ")
+ self.assertEqual(p.port, 88)
+ self.assertEqual(p.geturl(), query_spaces_url)
+
+ p = urlparse.urlsplit("www.pypi.org ")
+ # That "hostname" gets considered a "path" due to the
+ # trailing space and our existing logic... YUCK...
+ # and re-assembles via geturl aka unurlsplit into the original.
+ # django.core.validators.URLValidator (at least through v3.2) relies on
+ # this, for better or worse, to catch it in a ValidationError via its
+ # regular expressions.
+ # Here we test the basic round trip concept of such a trailing space.
+ self.assertEqual(urlparse.urlunsplit(p), "www.pypi.org ")
+
+ # with scheme as cache-key
+ url = "//www.python.org/"
+ scheme = noise.decode("utf-8") + "https" + noise.decode("utf-8")
+ for _ in range(2):
+ p = urlparse.urlsplit(url, scheme=scheme)
+ self.assertEqual(p.scheme, "https")
+ self.assertEqual(p.geturl(), "https://www.python.org/")
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index 6cc40a8d2fb..0f03a7cc4a9 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -26,6 +26,10 @@ scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
+The WHATWG URL Parser spec should also be considered. We are not compliant with
+it either due to existing user code API behavior expectations (Hyrum's Law).
+It serves as a useful guide when making changes.
+
"""
import re
@@ -63,6 +67,10 @@ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'0123456789'
'+-.')
+# Leading and trailing C0 control and space to be stripped per WHATWG spec.
+# == "".join([chr(i) for i in range(0, 0x20 + 1)])
+_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f '
+
# Unsafe bytes to be removed per WHATWG spec
_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
@@ -201,6 +209,8 @@ def urlsplit(url, scheme='', allow_fragments=True):
(e.g. netloc is a single string) and we don't expand % escapes."""
url = _remove_unsafe_bytes_from_url(url)
scheme = _remove_unsafe_bytes_from_url(scheme)
+ url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE)
+ scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)

640
00404-cve-2023-40217.patch Normal file
View File

@ -0,0 +1,640 @@
From 27e9b8a48632696311bd8c4af93ec52a49ba5e09 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C5=81ukasz=20Langa?= <lukasz@langa.pl>
Date: Tue, 22 Aug 2023 19:53:15 +0200
Subject: [PATCH 1/3] gh-108310: Fix CVE-2023-40217: Check for & avoid the ssl
pre-close flaw (#108315)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Instances of `ssl.SSLSocket` were vulnerable to a bypass of the TLS handshake
and included protections (like certificate verification) and treating sent
unencrypted data as if it were post-handshake TLS encrypted data.
The vulnerability is caused when a socket is connected, data is sent by the
malicious peer and stored in a buffer, and then the malicious peer closes the
socket within a small timing window before the other peers TLS handshake can
begin. After this sequence of events the closed socket will not immediately
attempt a TLS handshake due to not being connected but will also allow the
buffered data to be read as if a successful TLS handshake had occurred.
Co-authored-by: Gregory P. Smith [Google LLC] <greg@krypto.org>
-----
Notable adjustments for Python 2.7:
Use alternative for self.getblocking(), which was added in Python 3.7
see: https://docs.python.org/3/library/socket.html#socket.socket.getblocking
Set self._sslobj early to avoid AttributeError
Use SSLError where necessary (it is not a subclass of OSError)
---
Lib/ssl.py | 32 +++
Lib/test/test_ssl.py | 210 ++++++++++++++++++
...-08-22-17-39-12.gh-issue-108310.fVM3sg.rst | 7 +
3 files changed, 249 insertions(+)
create mode 100644 Misc/NEWS.d/next/Security/2023-08-22-17-39-12.gh-issue-108310.fVM3sg.rst
diff --git a/Lib/ssl.py b/Lib/ssl.py
index 0bb43a4a4de..de9ce6bc134 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -523,6 +523,7 @@ class SSLSocket(socket):
server_hostname=None,
_context=None):
+ self._sslobj = None
self._makefile_refs = 0
if _context:
self._context = _context
@@ -573,6 +574,8 @@ class SSLSocket(socket):
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
+ sock_timeout = sock.gettimeout()
+
# See if we are connected
try:
self.getpeername()
@@ -580,9 +583,38 @@ class SSLSocket(socket):
if e.errno != errno.ENOTCONN:
raise
connected = False
+ blocking = (sock.gettimeout() != 0)
+ self.setblocking(False)
+ try:
+ # We are not connected so this is not supposed to block, but
+ # testing revealed otherwise on macOS and Windows so we do
+ # the non-blocking dance regardless. Our raise when any data
+ # is found means consuming the data is harmless.
+ notconn_pre_handshake_data = self.recv(1)
+ except socket_error as e:
+ # EINVAL occurs for recv(1) on non-connected on unix sockets.
+ if e.errno not in (errno.ENOTCONN, errno.EINVAL):
+ raise
+ notconn_pre_handshake_data = b''
+ self.setblocking(blocking)
+ if notconn_pre_handshake_data:
+ # This prevents pending data sent to the socket before it was
+ # closed from escaping to the caller who could otherwise
+ # presume it came through a successful TLS connection.
+ reason = "Closed before TLS handshake with data in recv buffer"
+ notconn_pre_handshake_data_error = SSLError(e.errno, reason)
+ # Add the SSLError attributes that _ssl.c always adds.
+ notconn_pre_handshake_data_error.reason = reason
+ notconn_pre_handshake_data_error.library = None
+ try:
+ self.close()
+ except OSError:
+ pass
+ raise notconn_pre_handshake_data_error
else:
connected = True
+ self.settimeout(sock_timeout) # Must come after setblocking() calls.
self._closed = False
self._sslobj = None
self._connected = connected
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index ef2e59c1d15..fd657761a05 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -8,9 +8,11 @@ from test.script_helper import assert_python_ok
import asyncore
import socket
import select
+import struct
import time
import datetime
import gc
+import httplib
import os
import errno
import pprint
@@ -3240,6 +3242,213 @@ else:
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
+def set_socket_so_linger_on_with_zero_timeout(sock):
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
+
+
+class TestPreHandshakeClose(unittest.TestCase):
+ """Verify behavior of close sockets with received data before to the handshake.
+ """
+
+ class SingleConnectionTestServerThread(threading.Thread):
+
+ def __init__(self, name, call_after_accept):
+ self.call_after_accept = call_after_accept
+ self.received_data = b'' # set by .run()
+ self.wrap_error = None # set by .run()
+ self.listener = None # set by .start()
+ self.port = None # set by .start()
+ threading.Thread.__init__(self, name=name)
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, *args):
+ try:
+ if self.listener:
+ self.listener.close()
+ except ssl.SSLError:
+ pass
+ self.join()
+ self.wrap_error = None # avoid dangling references
+
+ def start(self):
+ self.ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ self.ssl_ctx.verify_mode = ssl.CERT_REQUIRED
+ self.ssl_ctx.load_verify_locations(cafile=ONLYCERT)
+ self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
+ self.listener = socket.socket()
+ self.port = support.bind_port(self.listener)
+ self.listener.settimeout(2.0)
+ self.listener.listen(1)
+ threading.Thread.start(self)
+
+ def run(self):
+ conn, address = self.listener.accept()
+ self.listener.close()
+ with closing(conn):
+ if self.call_after_accept(conn):
+ return
+ try:
+ tls_socket = self.ssl_ctx.wrap_socket(conn, server_side=True)
+ except ssl.SSLError as err:
+ self.wrap_error = err
+ else:
+ try:
+ self.received_data = tls_socket.recv(400)
+ except OSError:
+ pass # closed, protocol error, etc.
+
+ def non_linux_skip_if_other_okay_error(self, err):
+ if sys.platform == "linux":
+ return # Expect the full test setup to always work on Linux.
+ if (isinstance(err, ConnectionResetError) or
+ (isinstance(err, OSError) and err.errno == errno.EINVAL) or
+ re.search('wrong.version.number', getattr(err, "reason", ""), re.I)):
+ # On Windows the TCP RST leads to a ConnectionResetError
+ # (ECONNRESET) which Linux doesn't appear to surface to userspace.
+ # If wrap_socket() winds up on the "if connected:" path and doing
+ # the actual wrapping... we get an SSLError from OpenSSL. Typically
+ # WRONG_VERSION_NUMBER. While appropriate, neither is the scenario
+ # we're specifically trying to test. The way this test is written
+ # is known to work on Linux. We'll skip it anywhere else that it
+ # does not present as doing so.
+ self.skipTest("Could not recreate conditions on {}: \
+ err={}".format(sys.platform,err))
+ # If maintaining this conditional winds up being a problem.
+ # just turn this into an unconditional skip anything but Linux.
+ # The important thing is that our CI has the logic covered.
+
+ def test_preauth_data_to_tls_server(self):
+ server_accept_called = threading.Event()
+ ready_for_server_wrap_socket = threading.Event()
+
+ def call_after_accept(unused):
+ server_accept_called.set()
+ if not ready_for_server_wrap_socket.wait(2.0):
+ raise RuntimeError("wrap_socket event never set, test may fail.")
+ return False # Tell the server thread to continue.
+
+ server = self.SingleConnectionTestServerThread(
+ call_after_accept=call_after_accept,
+ name="preauth_data_to_tls_server")
+ server.__enter__() # starts it
+ self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
+
+ with closing(socket.socket()) as client:
+ client.connect(server.listener.getsockname())
+ # This forces an immediate connection close via RST on .close().
+ set_socket_so_linger_on_with_zero_timeout(client)
+ client.setblocking(False)
+
+ server_accept_called.wait()
+ client.send(b"DELETE /data HTTP/1.0\r\n\r\n")
+ client.close() # RST
+
+ ready_for_server_wrap_socket.set()
+ server.join()
+ wrap_error = server.wrap_error
+ self.assertEqual(b"", server.received_data)
+ self.assertIsInstance(wrap_error, ssl.SSLError)
+ self.assertIn("before TLS handshake with data", wrap_error.args[1])
+ self.assertIn("before TLS handshake with data", wrap_error.reason)
+ self.assertNotEqual(0, wrap_error.args[0])
+ self.assertIsNone(wrap_error.library, msg="attr must exist")
+
+ def test_preauth_data_to_tls_client(self):
+ client_can_continue_with_wrap_socket = threading.Event()
+
+ def call_after_accept(conn_to_client):
+ # This forces an immediate connection close via RST on .close().
+ set_socket_so_linger_on_with_zero_timeout(conn_to_client)
+ conn_to_client.send(
+ b"HTTP/1.0 307 Temporary Redirect\r\n"
+ b"Location: https://example.com/someone-elses-server\r\n"
+ b"\r\n")
+ conn_to_client.close() # RST
+ client_can_continue_with_wrap_socket.set()
+ return True # Tell the server to stop.
+
+ server = self.SingleConnectionTestServerThread(
+ call_after_accept=call_after_accept,
+ name="preauth_data_to_tls_client")
+ server.__enter__() # starts it
+ self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
+
+ # Redundant; call_after_accept sets SO_LINGER on the accepted conn.
+ set_socket_so_linger_on_with_zero_timeout(server.listener)
+
+ with closing(socket.socket()) as client:
+ client.connect(server.listener.getsockname())
+ if not client_can_continue_with_wrap_socket.wait(2.0):
+ self.fail("test server took too long.")
+ ssl_ctx = ssl.create_default_context()
+ try:
+ tls_client = ssl_ctx.wrap_socket(
+ client, server_hostname="localhost")
+ except ssl.SSLError as err:
+ wrap_error = err
+ received_data = b""
+ else:
+ wrap_error = None
+ received_data = tls_client.recv(400)
+ tls_client.close()
+
+ server.join()
+ self.assertEqual(b"", received_data)
+ self.assertIsInstance(wrap_error, ssl.SSLError)
+ self.assertIn("before TLS handshake with data", wrap_error.args[1])
+ self.assertIn("before TLS handshake with data", wrap_error.reason)
+ self.assertNotEqual(0, wrap_error.args[0])
+ self.assertIsNone(wrap_error.library, msg="attr must exist")
+
+ def test_https_client_non_tls_response_ignored(self):
+
+ server_responding = threading.Event()
+
+ class SynchronizedHTTPSConnection(httplib.HTTPSConnection):
+ def connect(self):
+ httplib.HTTPConnection.connect(self)
+ # Wait for our fault injection server to have done its thing.
+ if not server_responding.wait(1.0) and support.verbose:
+ sys.stdout.write("server_responding event never set.")
+ self.sock = self._context.wrap_socket(
+ self.sock, server_hostname=self.host)
+
+ def call_after_accept(conn_to_client):
+ # This forces an immediate connection close via RST on .close().
+ set_socket_so_linger_on_with_zero_timeout(conn_to_client)
+ conn_to_client.send(
+ b"HTTP/1.0 402 Payment Required\r\n"
+ b"\r\n")
+ conn_to_client.close() # RST
+ server_responding.set()
+ return True # Tell the server to stop.
+
+ server = self.SingleConnectionTestServerThread(
+ call_after_accept=call_after_accept,
+ name="non_tls_http_RST_responder")
+ server.__enter__() # starts it
+ self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
+ # Redundant; call_after_accept sets SO_LINGER on the accepted conn.
+ set_socket_so_linger_on_with_zero_timeout(server.listener)
+
+ connection = SynchronizedHTTPSConnection(
+ "localhost",
+ port=server.port,
+ context=ssl.create_default_context(),
+ timeout=2.0,
+ )
+ # There are lots of reasons this raises as desired, long before this
+ # test was added. Sending the request requires a successful TLS wrapped
+ # socket; that fails if the connection is broken. It may seem pointless
+ # to test this. It serves as an illustration of something that we never
+ # want to happen... properly not happening.
+ with self.assertRaises(ssl.SSLError) as err_ctx:
+ connection.request("HEAD", "/test", headers={"Host": "localhost"})
+ response = connection.getresponse()
+
def test_main(verbose=False):
if support.verbose:
@@ -3274,6 +3483,7 @@ def test_main(verbose=False):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
+ tests += [TestPreHandshakeClose]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
diff --git a/Misc/NEWS.d/next/Security/2023-08-22-17-39-12.gh-issue-108310.fVM3sg.rst b/Misc/NEWS.d/next/Security/2023-08-22-17-39-12.gh-issue-108310.fVM3sg.rst
new file mode 100644
index 00000000000..403c77a9d48
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2023-08-22-17-39-12.gh-issue-108310.fVM3sg.rst
@@ -0,0 +1,7 @@
+Fixed an issue where instances of :class:`ssl.SSLSocket` were vulnerable to
+a bypass of the TLS handshake and included protections (like certificate
+verification) and treating sent unencrypted data as if it were
+post-handshake TLS encrypted data. Security issue reported as
+`CVE-2023-40217
+<https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-40217>`_ by
+Aapo Oksman. Patch by Gregory P. Smith.
--
2.41.0
From 8ea38387426d3d2dd4c38f5ab7c26ee0ab0fb242 Mon Sep 17 00:00:00 2001
From: "Miss Islington (bot)"
<31488909+miss-islington@users.noreply.github.com>
Date: Wed, 23 Aug 2023 03:10:56 -0700
Subject: [PATCH 2/3] gh-108342: Break ref cycle in SSLSocket._create() exc
(GH-108344) (#108352)
Explicitly break a reference cycle when SSLSocket._create() raises an
exception. Clear the variable storing the exception, since the
exception traceback contains the variables and so creates a reference
cycle.
This test leak was introduced by the test added for the fix of GH-108310.
(cherry picked from commit 64f99350351bc46e016b2286f36ba7cd669b79e3)
Co-authored-by: Victor Stinner <vstinner@python.org>
---
Lib/ssl.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/Lib/ssl.py b/Lib/ssl.py
index de9ce6bc134..ac734b4f2ad 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -610,7 +610,11 @@ class SSLSocket(socket):
self.close()
except OSError:
pass
- raise notconn_pre_handshake_data_error
+ try:
+ raise notconn_pre_handshake_data_error
+ finally:
+ # Explicitly break the reference cycle.
+ notconn_pre_handshake_data_error = None
else:
connected = True
--
2.41.0
From 845491ee514d6489466664aeef377fe9155f8e83 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C5=81ukasz=20Langa?= <lukasz@langa.pl>
Date: Thu, 24 Aug 2023 12:09:30 +0200
Subject: [PATCH 3/3] [3.8] gh-108342: Make ssl TestPreHandshakeClose more
reliable (GH-108370) (#108408)
* In preauth tests of test_ssl, explicitly break reference cycles
invoving SingleConnectionTestServerThread to make sure that the
thread is deleted. Otherwise, the test marks the environment as
altered because the threading module sees a "dangling thread"
(SingleConnectionTestServerThread). This test leak was introduced
by the test added for the fix of issue gh-108310.
* Use support.SHORT_TIMEOUT instead of hardcoded 1.0 or 2.0 seconds
timeout.
* SingleConnectionTestServerThread.run() catchs TimeoutError
* Fix a race condition (missing synchronization) in
test_preauth_data_to_tls_client(): the server now waits until the
client connect() completed in call_after_accept().
* test_https_client_non_tls_response_ignored() calls server.join()
explicitly.
* Replace "localhost" with server.listener.getsockname()[0].
(cherry picked from commit 592bacb6fc0833336c0453e818e9b95016e9fd47)
---
Lib/test/support/__init__.py | 2 +
Lib/test/test_ssl.py | 105 ++++++++++++++++++++---------------
2 files changed, 62 insertions(+), 45 deletions(-)
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index ccc11c1b4b0..d6ee9934f04 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -47,6 +47,8 @@ __all__ = ["Error", "TestFailed", "TestDidNotRun", "ResourceDenied", "import_mod
"strip_python_stderr", "IPV6_ENABLED", "run_with_tz",
"SuppressCrashReport"]
+SHORT_TIMEOUT = 30.0 # Added to make backporting from 3.x easier
+
class Error(Exception):
"""Base class for regression test exceptions."""
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index fd657761a05..0f5aa37a0c1 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -3252,12 +3252,16 @@ class TestPreHandshakeClose(unittest.TestCase):
class SingleConnectionTestServerThread(threading.Thread):
- def __init__(self, name, call_after_accept):
+ def __init__(self, name, call_after_accept, timeout=None):
self.call_after_accept = call_after_accept
self.received_data = b'' # set by .run()
self.wrap_error = None # set by .run()
self.listener = None # set by .start()
self.port = None # set by .start()
+ if timeout is None:
+ self.timeout = support.SHORT_TIMEOUT
+ else:
+ self.timeout = timeout
threading.Thread.__init__(self, name=name)
def __enter__(self):
@@ -3280,13 +3284,22 @@ class TestPreHandshakeClose(unittest.TestCase):
self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
self.listener = socket.socket()
self.port = support.bind_port(self.listener)
- self.listener.settimeout(2.0)
+ self.listener.settimeout(self.timeout)
self.listener.listen(1)
threading.Thread.start(self)
def run(self):
- conn, address = self.listener.accept()
- self.listener.close()
+ try:
+ conn, address = self.listener.accept()
+ except OSError as e:
+ if e.errno == errno.ETIMEDOUT:
+ # on timeout, just close the listener
+ return
+ else:
+ raise
+ finally:
+ self.listener.close()
+
with closing(conn):
if self.call_after_accept(conn):
return
@@ -3300,33 +3313,13 @@ class TestPreHandshakeClose(unittest.TestCase):
except OSError:
pass # closed, protocol error, etc.
- def non_linux_skip_if_other_okay_error(self, err):
- if sys.platform == "linux":
- return # Expect the full test setup to always work on Linux.
- if (isinstance(err, ConnectionResetError) or
- (isinstance(err, OSError) and err.errno == errno.EINVAL) or
- re.search('wrong.version.number', getattr(err, "reason", ""), re.I)):
- # On Windows the TCP RST leads to a ConnectionResetError
- # (ECONNRESET) which Linux doesn't appear to surface to userspace.
- # If wrap_socket() winds up on the "if connected:" path and doing
- # the actual wrapping... we get an SSLError from OpenSSL. Typically
- # WRONG_VERSION_NUMBER. While appropriate, neither is the scenario
- # we're specifically trying to test. The way this test is written
- # is known to work on Linux. We'll skip it anywhere else that it
- # does not present as doing so.
- self.skipTest("Could not recreate conditions on {}: \
- err={}".format(sys.platform,err))
- # If maintaining this conditional winds up being a problem.
- # just turn this into an unconditional skip anything but Linux.
- # The important thing is that our CI has the logic covered.
-
def test_preauth_data_to_tls_server(self):
server_accept_called = threading.Event()
ready_for_server_wrap_socket = threading.Event()
def call_after_accept(unused):
server_accept_called.set()
- if not ready_for_server_wrap_socket.wait(2.0):
+ if not ready_for_server_wrap_socket.wait(support.SHORT_TIMEOUT):
raise RuntimeError("wrap_socket event never set, test may fail.")
return False # Tell the server thread to continue.
@@ -3348,18 +3341,28 @@ class TestPreHandshakeClose(unittest.TestCase):
ready_for_server_wrap_socket.set()
server.join()
+
wrap_error = server.wrap_error
- self.assertEqual(b"", server.received_data)
- self.assertIsInstance(wrap_error, ssl.SSLError)
- self.assertIn("before TLS handshake with data", wrap_error.args[1])
- self.assertIn("before TLS handshake with data", wrap_error.reason)
- self.assertNotEqual(0, wrap_error.args[0])
- self.assertIsNone(wrap_error.library, msg="attr must exist")
+ try:
+ self.assertEqual(b"", server.received_data)
+ self.assertIsInstance(wrap_error, ssl.SSLError)
+ self.assertIn("before TLS handshake with data", wrap_error.args[1])
+ self.assertIn("before TLS handshake with data", wrap_error.reason)
+ self.assertNotEqual(0, wrap_error.args[0])
+ self.assertIsNone(wrap_error.library, msg="attr must exist")
+ finally:
+ # gh-108342: Explicitly break the reference cycle
+ wrap_error = None
+ server = None
def test_preauth_data_to_tls_client(self):
+ server_can_continue_with_wrap_socket = threading.Event()
client_can_continue_with_wrap_socket = threading.Event()
def call_after_accept(conn_to_client):
+ if not server_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
+ print("ERROR: test client took too long")
+
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
@@ -3381,8 +3384,10 @@ class TestPreHandshakeClose(unittest.TestCase):
with closing(socket.socket()) as client:
client.connect(server.listener.getsockname())
- if not client_can_continue_with_wrap_socket.wait(2.0):
- self.fail("test server took too long.")
+ server_can_continue_with_wrap_socket.set()
+
+ if not client_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
+ self.fail("test server took too long")
ssl_ctx = ssl.create_default_context()
try:
tls_client = ssl_ctx.wrap_socket(
@@ -3396,22 +3401,29 @@ class TestPreHandshakeClose(unittest.TestCase):
tls_client.close()
server.join()
- self.assertEqual(b"", received_data)
- self.assertIsInstance(wrap_error, ssl.SSLError)
- self.assertIn("before TLS handshake with data", wrap_error.args[1])
- self.assertIn("before TLS handshake with data", wrap_error.reason)
- self.assertNotEqual(0, wrap_error.args[0])
- self.assertIsNone(wrap_error.library, msg="attr must exist")
+ try:
+ self.assertEqual(b"", received_data)
+ self.assertIsInstance(wrap_error, ssl.SSLError)
+ self.assertIn("before TLS handshake with data", wrap_error.args[1])
+ self.assertIn("before TLS handshake with data", wrap_error.reason)
+ self.assertNotEqual(0, wrap_error.args[0])
+ self.assertIsNone(wrap_error.library, msg="attr must exist")
+ finally:
+ # gh-108342: Explicitly break the reference cycle
+ wrap_error = None
+ server = None
def test_https_client_non_tls_response_ignored(self):
-
server_responding = threading.Event()
class SynchronizedHTTPSConnection(httplib.HTTPSConnection):
def connect(self):
+ # Call clear text HTTP connect(), not the encrypted HTTPS (TLS)
+ # connect(): wrap_socket() is called manually below.
httplib.HTTPConnection.connect(self)
+
# Wait for our fault injection server to have done its thing.
- if not server_responding.wait(1.0) and support.verbose:
+ if not server_responding.wait(support.SHORT_TIMEOUT) and support.verbose:
sys.stdout.write("server_responding event never set.")
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
@@ -3426,29 +3438,32 @@ class TestPreHandshakeClose(unittest.TestCase):
server_responding.set()
return True # Tell the server to stop.
+ timeout = 2.0
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
- name="non_tls_http_RST_responder")
+ name="non_tls_http_RST_responder",
+ timeout=timeout)
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
connection = SynchronizedHTTPSConnection(
- "localhost",
+ server.listener.getsockname()[0],
port=server.port,
context=ssl.create_default_context(),
- timeout=2.0,
+ timeout=timeout,
)
# There are lots of reasons this raises as desired, long before this
# test was added. Sending the request requires a successful TLS wrapped
# socket; that fails if the connection is broken. It may seem pointless
# to test this. It serves as an illustration of something that we never
# want to happen... properly not happening.
- with self.assertRaises(ssl.SSLError) as err_ctx:
+ with self.assertRaises(ssl.SSLError):
connection.request("HEAD", "/test", headers={"Host": "localhost"})
response = connection.getresponse()
+ server.join()
def test_main(verbose=False):
if support.verbose:
--
2.41.0

View File

@ -0,0 +1,98 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Ned Deily <nad@python.org>
Date: Fri, 6 Oct 2023 12:23:43 +0200
Subject: [PATCH] 00406-cve-2022-48565.patch
Reject XML entity declarations in plist files
CVE-2022-48565: XML External Entity in XML processing plistlib module
Backported from https://github.com/python/cpython/commit/a158fb9c5138
Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2022-48565
Co-authored-by: Ronald Oussoren <ronaldoussoren@mac.com>
Co-authored-by: Lumir Balhar <lbalhar@redhat.com>
---
Lib/plistlib.py | 12 ++++++++++++
Lib/test/test_plistlib.py | 18 ++++++++++++++++++
.../2020-10-19-10-56-27.bpo-42051.EU_B7u.rst | 3 +++
3 files changed, 33 insertions(+)
create mode 100644 Misc/NEWS.d/next/Security/2020-10-19-10-56-27.bpo-42051.EU_B7u.rst
diff --git a/Lib/plistlib.py b/Lib/plistlib.py
index 42897b8da8b..73fca1d4714 100644
--- a/Lib/plistlib.py
+++ b/Lib/plistlib.py
@@ -390,6 +390,11 @@ class Data:
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
+class InvalidFileException (ValueError):
+ def __init__(self, message="Invalid file"):
+ ValueError.__init__(self, message)
+
+
class PlistParser:
def __init__(self):
@@ -403,9 +408,16 @@ class PlistParser:
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
+ parser.EntityDeclHandler = self.handle_entity_decl
parser.ParseFile(fileobj)
return self.root
+ def handle_entity_decl(self, entity_name, is_parameter_entity, value, base, system_id, public_id, notation_name):
+ # Reject plist files with entity declarations to avoid XML vulnerabilies in expat.
+ # Regular plist files don't contain those declerations, and Apple's plutil tool does not
+ # accept them either.
+ raise InvalidFileException("XML entity declarations are not supported in plist files")
+
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py
index 7859ad05723..bcdae924d91 100644
--- a/Lib/test/test_plistlib.py
+++ b/Lib/test/test_plistlib.py
@@ -86,6 +86,19 @@ TESTDATA = """<?xml version="1.0" encoding="UTF-8"?>
</plist>
""".replace(" " * 8, "\t") # Apple as well as plistlib.py output hard tabs
+XML_PLIST_WITH_ENTITY=b'''\
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd" [
+ <!ENTITY entity "replacement text">
+ ]>
+<plist version="1.0">
+ <dict>
+ <key>A</key>
+ <string>&entity;</string>
+ </dict>
+</plist>
+'''
+
class TestPlistlib(unittest.TestCase):
@@ -195,6 +208,11 @@ class TestPlistlib(unittest.TestCase):
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
+ def test_xml_plist_with_entity_decl(self):
+ with self.assertRaisesRegexp(plistlib.InvalidFileException,
+ "XML entity declarations are not supported"):
+ plistlib.readPlistFromString(XML_PLIST_WITH_ENTITY)
+
def test_main():
test_support.run_unittest(TestPlistlib)
diff --git a/Misc/NEWS.d/next/Security/2020-10-19-10-56-27.bpo-42051.EU_B7u.rst b/Misc/NEWS.d/next/Security/2020-10-19-10-56-27.bpo-42051.EU_B7u.rst
new file mode 100644
index 00000000000..31b44b229f6
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2020-10-19-10-56-27.bpo-42051.EU_B7u.rst
@@ -0,0 +1,3 @@
+The :mod:`plistlib` module no longer accepts entity declarations in XML
+plist files to avoid XML vulnerabilities. This should not affect users as
+entity declarations are not used in regular plist files.
\ No newline at end of file

136
00408-cve-2022-48560.patch Normal file
View File

@ -0,0 +1,136 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Lumir Balhar <lbalhar@redhat.com>
Date: Thu, 23 Nov 2023 13:25:44 +0100
Subject: [PATCH] 00408-cve-2022-48560.patch
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Security fix for CVE-2022-48560: python3: use after free in heappushpop()
of heapq module
Resolved upstream: https://github.com/python/cpython/issues/83602
Backported from Python 3.6.11.
Co-authored-by: Pablo Galindo <Pablogsal@gmail.com>
Co-authored-by: Lumír Balhar <lbalhar@redhat.com>
---
Lib/test/test_heapq.py | 32 ++++++++++++++++++++++++++++++++
Modules/_heapqmodule.c | 31 ++++++++++++++++++++++++-------
2 files changed, 56 insertions(+), 7 deletions(-)
diff --git a/Lib/test/test_heapq.py b/Lib/test/test_heapq.py
index c4de593bb82..29cefb5d1a6 100644
--- a/Lib/test/test_heapq.py
+++ b/Lib/test/test_heapq.py
@@ -396,6 +396,38 @@ class TestErrorHandling(TestCase):
with self.assertRaises((IndexError, RuntimeError)):
self.module.heappop(heap)
+ def test_comparison_operator_modifiying_heap(self):
+ # See bpo-39421: Strong references need to be taken
+ # when comparing objects as they can alter the heap
+ class EvilClass(int):
+ def __lt__(self, o):
+ heap[:] = []
+ return NotImplemented
+
+ heap = []
+ self.module.heappush(heap, EvilClass(0))
+ self.assertRaises(IndexError, self.module.heappushpop, heap, 1)
+
+ def test_comparison_operator_modifiying_heap_two_heaps(self):
+
+ class h(int):
+ def __lt__(self, o):
+ list2[:] = []
+ return NotImplemented
+
+ class g(int):
+ def __lt__(self, o):
+ list1[:] = []
+ return NotImplemented
+
+ list1, list2 = [], []
+
+ self.module.heappush(list1, h(0))
+ self.module.heappush(list2, g(0))
+
+ self.assertRaises((IndexError, RuntimeError), self.module.heappush, list1, g(1))
+ self.assertRaises((IndexError, RuntimeError), self.module.heappush, list2, h(1))
+
class TestErrorHandlingPython(TestErrorHandling):
module = py_heapq
diff --git a/Modules/_heapqmodule.c b/Modules/_heapqmodule.c
index 5b0ef691545..c013aee1ebf 100644
--- a/Modules/_heapqmodule.c
+++ b/Modules/_heapqmodule.c
@@ -52,7 +52,11 @@ _siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
while (pos > startpos) {
parentpos = (pos - 1) >> 1;
parent = PyList_GET_ITEM(heap, parentpos);
+ Py_INCREF(newitem);
+ Py_INCREF(parent);
cmp = cmp_lt(newitem, parent);
+ Py_DECREF(parent);
+ Py_DECREF(newitem);
if (cmp == -1)
return -1;
if (size != PyList_GET_SIZE(heap)) {
@@ -93,9 +97,13 @@ _siftup(PyListObject *heap, Py_ssize_t pos)
childpos = 2*pos + 1; /* leftmost child position */
rightpos = childpos + 1;
if (rightpos < endpos) {
- cmp = cmp_lt(
- PyList_GET_ITEM(heap, childpos),
- PyList_GET_ITEM(heap, rightpos));
+ PyObject* a = PyList_GET_ITEM(heap, childpos);
+ PyObject* b = PyList_GET_ITEM(heap, rightpos);
+ Py_INCREF(a);
+ Py_INCREF(b);
+ cmp = cmp_lt(a, b);
+ Py_DECREF(a);
+ Py_DECREF(b);
if (cmp == -1)
return -1;
if (cmp == 0)
@@ -236,7 +244,10 @@ heappushpop(PyObject *self, PyObject *args)
return item;
}
- cmp = cmp_lt(PyList_GET_ITEM(heap, 0), item);
+ PyObject* top = PyList_GET_ITEM(heap, 0);
+ Py_INCREF(top);
+ cmp = cmp_lt(top, item);
+ Py_DECREF(top);
if (cmp == -1)
return NULL;
if (cmp == 0) {
@@ -395,7 +406,9 @@ _siftdownmax(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
while (pos > startpos){
parentpos = (pos - 1) >> 1;
parent = PyList_GET_ITEM(heap, parentpos);
+ Py_INCREF(parent);
cmp = cmp_lt(parent, newitem);
+ Py_DECREF(parent);
if (cmp == -1) {
Py_DECREF(newitem);
return -1;
@@ -436,9 +449,13 @@ _siftupmax(PyListObject *heap, Py_ssize_t pos)
childpos = 2*pos + 1; /* leftmost child position */
rightpos = childpos + 1;
if (rightpos < endpos) {
- cmp = cmp_lt(
- PyList_GET_ITEM(heap, rightpos),
- PyList_GET_ITEM(heap, childpos));
+ PyObject* a = PyList_GET_ITEM(heap, rightpos);
+ PyObject* b = PyList_GET_ITEM(heap, childpos);
+ Py_INCREF(a);
+ Py_INCREF(b);
+ cmp = cmp_lt(a, b);
+ Py_DECREF(a);
+ Py_DECREF(b);
if (cmp == -1) {
Py_DECREF(newitem);
return -1;

View File

@ -1,732 +0,0 @@
diff -up Python-2.7.2/Lib/hashlib.py.hashlib-fips Python-2.7.2/Lib/hashlib.py
--- Python-2.7.2/Lib/hashlib.py.hashlib-fips 2011-06-11 11:46:24.000000000 -0400
+++ Python-2.7.2/Lib/hashlib.py 2011-09-14 00:21:26.194252001 -0400
@@ -6,9 +6,12 @@
__doc__ = """hashlib module - A common interface to many hash functions.
-new(name, string='') - returns a new hash object implementing the
- given hash function; initializing the hash
- using the given string data.
+new(name, string='', usedforsecurity=True)
+ - returns a new hash object implementing the given hash function;
+ initializing the hash using the given string data.
+
+ "usedforsecurity" is a non-standard extension for better supporting
+ FIPS-compliant environments (see below)
Named constructor functions are also available, these are much faster
than using new():
@@ -24,6 +27,20 @@ the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
+Our implementation of hashlib uses OpenSSL.
+
+OpenSSL has a "FIPS mode", which, if enabled, may restrict the available hashes
+to only those that are compliant with FIPS regulations. For example, it may
+deny the use of MD5, on the grounds that this is not secure for uses such as
+authentication, system integrity checking, or digital signatures.
+
+If you need to use such a hash for non-security purposes (such as indexing into
+a data structure for speed), you can override the keyword argument
+"usedforsecurity" from True to False to signify that your code is not relying
+on the hash for security purposes, and this will allow the hash to be usable
+even in FIPS mode. This is not a standard feature of Python 2.7's hashlib, and
+is included here to better support FIPS mode.
+
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
@@ -63,76 +80,41 @@ algorithms = __always_supported
'pbkdf2_hmac')
-def __get_builtin_constructor(name):
- try:
- if name in ('SHA1', 'sha1'):
- import _sha
- return _sha.new
- elif name in ('MD5', 'md5'):
- import _md5
- return _md5.new
- elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
- import _sha256
- bs = name[3:]
- if bs == '256':
- return _sha256.sha256
- elif bs == '224':
- return _sha256.sha224
- elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
- import _sha512
- bs = name[3:]
- if bs == '512':
- return _sha512.sha512
- elif bs == '384':
- return _sha512.sha384
- except ImportError:
- pass # no extension module, this hash is unsupported.
-
- raise ValueError('unsupported hash type ' + name)
-
-
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
- f()
+ #
+ # We pass "usedforsecurity=False" to disable FIPS-based restrictions:
+ # at this stage we're merely seeing if the function is callable,
+ # rather than using it for actual work.
+ f(usedforsecurity=False)
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
- return __get_builtin_constructor(name)
+ raise
-
-def __py_new(name, string=''):
- """new(name, string='') - Return a new hashing object using the named algorithm;
- optionally initialized with a string.
- """
- return __get_builtin_constructor(name)(string)
-
-
-def __hash_new(name, string=''):
+def __hash_new(name, string='', usedforsecurity=True):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
+ Override 'usedforsecurity' to False when using for non-security purposes in
+ a FIPS environment
"""
try:
- return _hashlib.new(name, string)
+ return _hashlib.new(name, string, usedforsecurity)
except ValueError:
- # If the _hashlib module (OpenSSL) doesn't support the named
- # hash, try using our builtin implementations.
- # This allows for SHA224/256 and SHA384/512 support even though
- # the OpenSSL library prior to 0.9.8 doesn't provide them.
- return __get_builtin_constructor(name)(string)
-
+ raise
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
- new = __py_new
- __get_hash = __get_builtin_constructor
+ # We don't build the legacy modules
+ raise
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
@@ -143,4 +125,4 @@ for __func_name in __always_supported:
# Cleanup locals()
del __always_supported, __func_name, __get_hash
-del __py_new, __hash_new, __get_openssl_constructor
+del __hash_new, __get_openssl_constructor
diff -up Python-2.7.2/Lib/test/test_hashlib.py.hashlib-fips Python-2.7.2/Lib/test/test_hashlib.py
--- Python-2.7.2/Lib/test/test_hashlib.py.hashlib-fips 2011-06-11 11:46:25.000000000 -0400
+++ Python-2.7.2/Lib/test/test_hashlib.py 2011-09-14 01:08:55.525254195 -0400
@@ -32,6 +32,19 @@ def hexstr(s):
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
+def openssl_enforces_fips():
+ # Use the "openssl" command (if present) to try to determine if the local
+ # OpenSSL is configured to enforce FIPS
+ from subprocess import Popen, PIPE
+ try:
+ p = Popen(['openssl', 'md5'],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ except OSError:
+ # "openssl" command not found
+ return False
+ stdout, stderr = p.communicate(input=b'abc')
+ return b'unknown cipher' in stderr
+OPENSSL_ENFORCES_FIPS = openssl_enforces_fips()
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
@@ -61,10 +74,10 @@ class HashLibTestCase(unittest.TestCase)
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
- def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
+ def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm, usedforsecurity=True):
if data is None:
- return hashlib.new(_alg)
- return hashlib.new(_alg, data)
+ return hashlib.new(_alg, usedforsecurity=usedforsecurity)
+ return hashlib.new(_alg, data, usedforsecurity=usedforsecurity)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
@@ -78,28 +91,13 @@ class HashLibTestCase(unittest.TestCase)
if constructor:
constructors.add(constructor)
- _md5 = self._conditional_import_module('_md5')
- if _md5:
- self.constructors_to_test['md5'].add(_md5.new)
- _sha = self._conditional_import_module('_sha')
- if _sha:
- self.constructors_to_test['sha1'].add(_sha.new)
- _sha256 = self._conditional_import_module('_sha256')
- if _sha256:
- self.constructors_to_test['sha224'].add(_sha256.sha224)
- self.constructors_to_test['sha256'].add(_sha256.sha256)
- _sha512 = self._conditional_import_module('_sha512')
- if _sha512:
- self.constructors_to_test['sha384'].add(_sha512.sha384)
- self.constructors_to_test['sha512'].add(_sha512.sha512)
-
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.itervalues()
for cons in itertools.chain.from_iterable(constructors):
- c = cons(a)
+ c = cons(a, usedforsecurity=False)
c.hexdigest()
def test_algorithms_attribute(self):
@@ -115,28 +113,9 @@ class HashLibTestCase(unittest.TestCase)
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
- def test_get_builtin_constructor(self):
- get_builtin_constructor = hashlib.__dict__[
- '__get_builtin_constructor']
- self.assertRaises(ValueError, get_builtin_constructor, 'test')
- try:
- import _md5
- except ImportError:
- pass
- # This forces an ImportError for "import _md5" statements
- sys.modules['_md5'] = None
- try:
- self.assertRaises(ValueError, get_builtin_constructor, 'md5')
- finally:
- if '_md5' in locals():
- sys.modules['_md5'] = _md5
- else:
- del sys.modules['_md5']
- self.assertRaises(TypeError, get_builtin_constructor, 3)
-
def test_hexdigest(self):
for name in self.supported_hash_names:
- h = hashlib.new(name)
+ h = hashlib.new(name, usedforsecurity=False)
self.assertTrue(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
@@ -145,16 +125,16 @@ class HashLibTestCase(unittest.TestCase)
abcs = aas + bees + cees
for name in self.supported_hash_names:
- m1 = hashlib.new(name)
+ m1 = hashlib.new(name, usedforsecurity=False)
m1.update(aas)
m1.update(bees)
m1.update(cees)
- m2 = hashlib.new(name)
+ m2 = hashlib.new(name, usedforsecurity=False)
m2.update(abcs)
self.assertEqual(m1.digest(), m2.digest(), name+' update problem.')
- m3 = hashlib.new(name, abcs)
+ m3 = hashlib.new(name, abcs, usedforsecurity=False)
self.assertEqual(m1.digest(), m3.digest(), name+' new problem.')
def check(self, name, data, digest):
@@ -162,7 +142,7 @@ class HashLibTestCase(unittest.TestCase)
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
- computed = hash_object_constructor(data).hexdigest()
+ computed = hash_object_constructor(data, usedforsecurity=False).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
@@ -172,7 +152,8 @@ class HashLibTestCase(unittest.TestCase)
def check_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
- expected = hashlib.new(algorithm_name, str(u'spam')).hexdigest()
+ expected = hashlib.new(algorithm_name, str(u'spam'),
+ usedforsecurity=False).hexdigest()
self.check(algorithm_name, u'spam', expected)
def test_unicode(self):
@@ -354,6 +335,70 @@ class HashLibTestCase(unittest.TestCase)
self.assertEqual(expected_hash, hasher.hexdigest())
+ def test_issue9146(self):
+ # Ensure that various ways to use "MD5" from "hashlib" don't segfault:
+ m = hashlib.md5(usedforsecurity=False)
+ m.update(b'abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.new('md5', usedforsecurity=False)
+ m.update(b'abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.md5(b'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = hashlib.new('md5', b'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ def assertRaisesUnknownCipher(self, callable_obj=None, *args, **kwargs):
+ try:
+ callable_obj(*args, **kwargs)
+ except ValueError, e:
+ if not e.args[0].endswith('unknown cipher'):
+ self.fail('Incorrect exception raised')
+ else:
+ self.fail('Exception was not raised')
+
+ @unittest.skipUnless(OPENSSL_ENFORCES_FIPS,
+ 'FIPS enforcement required for this test.')
+ def test_hashlib_fips_mode(self):
+ # Ensure that we raise a ValueError on vanilla attempts to use MD5
+ # in hashlib in a FIPS-enforced setting:
+ self.assertRaisesUnknownCipher(hashlib.md5)
+ self.assertRaisesUnknownCipher(hashlib.new, 'md5')
+
+ @unittest.skipUnless(OPENSSL_ENFORCES_FIPS,
+ 'FIPS enforcement required for this test.')
+ def test_hashopenssl_fips_mode(self):
+ # Verify the _hashlib module's handling of md5:
+ import _hashlib
+
+ assert hasattr(_hashlib, 'openssl_md5')
+
+ # Ensure that _hashlib raises a ValueError on vanilla attempts to
+ # use MD5 in a FIPS-enforced setting:
+ self.assertRaisesUnknownCipher(_hashlib.openssl_md5)
+ self.assertRaisesUnknownCipher(_hashlib.new, 'md5')
+
+ # Ensure that in such a setting we can whitelist a callsite with
+ # usedforsecurity=False and have it succeed:
+ m = _hashlib.openssl_md5(usedforsecurity=False)
+ m.update('abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.new('md5', usedforsecurity=False)
+ m.update('abc\n')
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.openssl_md5('abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+ m = _hashlib.new('md5', 'abc\n', usedforsecurity=False)
+ self.assertEquals(m.hexdigest(), "0bee89b07a248e27c83fc3d5951213c1")
+
+
+
class KDFTests(unittest.TestCase):
pbkdf2_test_vectors = [
(b'password', b'salt', 1, None),
diff -up Python-2.7.2/Modules/Setup.dist.hashlib-fips Python-2.7.2/Modules/Setup.dist
--- Python-2.7.2/Modules/Setup.dist.hashlib-fips 2011-09-14 00:21:26.163252001 -0400
+++ Python-2.7.2/Modules/Setup.dist 2011-09-14 00:21:26.201252001 -0400
@@ -248,14 +248,14 @@ imageop imageop.c # Operations on images
# Message-Digest Algorithm, described in RFC 1321. The necessary files
# md5.c and md5.h are included here.
-_md5 md5module.c md5.c
+#_md5 md5module.c md5.c
# The _sha module implements the SHA checksum algorithms.
# (NIST's Secure Hash Algorithms.)
-_sha shamodule.c
-_sha256 sha256module.c
-_sha512 sha512module.c
+#_sha shamodule.c
+#_sha256 sha256module.c
+#_sha512 sha512module.c
# SGI IRIX specific modules -- off by default.
diff -up Python-2.7.2/setup.py.hashlib-fips Python-2.7.2/setup.py
--- Python-2.7.2/setup.py.hashlib-fips 2011-09-14 00:21:25.722252001 -0400
+++ Python-2.7.2/setup.py 2011-09-14 00:21:26.203252001 -0400
@@ -768,21 +768,6 @@ class PyBuildExt(build_ext):
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
- if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
- # The _sha module implements the SHA1 hash algorithm.
- exts.append( Extension('_sha', ['shamodule.c']) )
- # The _md5 module implements the RSA Data Security, Inc. MD5
- # Message-Digest Algorithm, described in RFC 1321. The
- # necessary files md5.c and md5.h are included here.
- exts.append( Extension('_md5',
- sources = ['md5module.c', 'md5.c'],
- depends = ['md5.h']) )
-
- min_sha2_openssl_ver = 0x00908000
- if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
- # OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
- exts.append( Extension('_sha256', ['sha256module.c']) )
- exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
--- Python-2.7.8/Modules/_hashopenssl.c.orig 2014-06-30 04:05:41.000000000 +0200
+++ Python-2.7.8/Modules/_hashopenssl.c 2014-07-14 14:21:59.546386572 +0200
@@ -36,6 +36,8 @@
#endif
/* EVP is the preferred interface to hashing in OpenSSL */
+#include <openssl/ssl.h>
+#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/err.h>
@@ -67,11 +69,19 @@
static PyTypeObject EVPtype;
+/* Struct to hold all the cached information we need on a specific algorithm.
+ We have one of these per algorithm */
+typedef struct {
+ PyObject *name_obj;
+ EVP_MD_CTX ctxs[2];
+ /* ctx_ptrs will point to ctxs unless an error occurred, when it will
+ be NULL: */
+ EVP_MD_CTX *ctx_ptrs[2];
+ PyObject *error_msgs[2];
+} EVPCachedInfo;
-#define DEFINE_CONSTS_FOR_NEW(Name) \
- static PyObject *CONST_ ## Name ## _name_obj = NULL; \
- static EVP_MD_CTX CONST_new_ ## Name ## _ctx; \
- static EVP_MD_CTX *CONST_new_ ## Name ## _ctx_p = NULL;
+#define DEFINE_CONSTS_FOR_NEW(Name) \
+ static EVPCachedInfo cached_info_ ##Name;
DEFINE_CONSTS_FOR_NEW(md5)
DEFINE_CONSTS_FOR_NEW(sha1)
@@ -117,6 +127,48 @@
}
}
+static void
+mc_ctx_init(EVP_MD_CTX *ctx, int usedforsecurity)
+{
+ EVP_MD_CTX_init(ctx);
+
+ /*
+ If the user has declared that this digest is being used in a
+ non-security role (e.g. indexing into a data structure), set
+ the exception flag for openssl to allow it
+ */
+ if (!usedforsecurity) {
+#ifdef EVP_MD_CTX_FLAG_NON_FIPS_ALLOW
+ EVP_MD_CTX_set_flags(ctx,
+ EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+#endif
+ }
+}
+
+/* Get an error msg for the last error as a PyObject */
+static PyObject *
+error_msg_for_last_error(void)
+{
+ char *errstr;
+
+ errstr = ERR_error_string(ERR_peek_last_error(), NULL);
+ ERR_clear_error();
+
+ return PyString_FromString(errstr); /* Can be NULL */
+}
+
+static void
+set_evp_exception(void)
+{
+ char *errstr;
+
+ errstr = ERR_error_string(ERR_peek_last_error(), NULL);
+ ERR_clear_error();
+
+ PyErr_SetString(PyExc_ValueError, errstr);
+}
+
+
/* Internal methods for a hash object */
static void
@@ -315,14 +367,15 @@
static int
EVP_tp_init(EVPobject *self, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"name", "string", NULL};
+ static char *kwlist[] = {"name", "string", "usedforsecurity", NULL};
PyObject *name_obj = NULL;
+ int usedforsecurity = 1;
Py_buffer view = { 0 };
char *nameStr;
const EVP_MD *digest;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|s*:HASH", kwlist,
- &name_obj, &view)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|s*i:HASH", kwlist,
+ &name_obj, &view, &usedforsecurity)) {
return -1;
}
@@ -338,7 +391,12 @@
PyBuffer_Release(&view);
return -1;
}
- EVP_DigestInit(&self->ctx, digest);
+ mc_ctx_init(&self->ctx, usedforsecurity);
+ if (!EVP_DigestInit_ex(&self->ctx, digest, NULL)) {
+ set_evp_exception();
+ PyBuffer_Release(&view);
+ return -1;
+ }
self->name = name_obj;
Py_INCREF(self->name);
@@ -422,7 +480,8 @@
static PyObject *
EVPnew(PyObject *name_obj,
const EVP_MD *digest, const EVP_MD_CTX *initial_ctx,
- const unsigned char *cp, Py_ssize_t len)
+ const unsigned char *cp, Py_ssize_t len,
+ int usedforsecurity)
{
EVPobject *self;
@@ -437,7 +496,12 @@
if (initial_ctx) {
EVP_MD_CTX_copy(&self->ctx, initial_ctx);
} else {
- EVP_DigestInit(&self->ctx, digest);
+ mc_ctx_init(&self->ctx, usedforsecurity);
+ if (!EVP_DigestInit_ex(&self->ctx, digest, NULL)) {
+ set_evp_exception();
+ Py_DECREF(self);
+ return NULL;
+ }
}
if (cp && len) {
@@ -461,20 +525,28 @@
An optional string argument may be provided and will be\n\
automatically hashed.\n\
\n\
-The MD5 and SHA1 algorithms are always supported.\n");
+The MD5 and SHA1 algorithms are always supported.\n\
+\n\
+An optional \"usedforsecurity=True\" keyword argument is provided for use in\n\
+environments that enforce FIPS-based restrictions. Some implementations of\n\
+OpenSSL can be configured to prevent the usage of non-secure algorithms (such\n\
+as MD5). If you have a non-security use for these algorithms (e.g. a hash\n\
+table), you can override this argument by marking the callsite as\n\
+\"usedforsecurity=False\".");
static PyObject *
EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
{
- static char *kwlist[] = {"name", "string", NULL};
+ static char *kwlist[] = {"name", "string", "usedforsecurity", NULL};
PyObject *name_obj = NULL;
Py_buffer view = { 0 };
PyObject *ret_obj;
char *name;
const EVP_MD *digest;
+ int usedforsecurity = 1;
- if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O|s*:new", kwlist,
- &name_obj, &view)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O|s*i:new", kwlist,
+ &name_obj, &view, &usedforsecurity)) {
return NULL;
}
@@ -487,7 +559,7 @@
digest = EVP_get_digestbyname(name);
ret_obj = EVPnew(name_obj, digest, NULL, (unsigned char*)view.buf,
- view.len);
+ view.len, usedforsecurity);
PyBuffer_Release(&view);
return ret_obj;
@@ -713,51 +785,111 @@
/*
- * This macro generates constructor function definitions for specific
- * hash algorithms. These constructors are much faster than calling
- * the generic one passing it a python string and are noticably
- * faster than calling a python new() wrapper. Thats important for
+ * This macro and function generates a family of constructor function
+ * definitions for specific hash algorithms. These constructors are much
+ * faster than calling the generic one passing it a python string and are
+ * noticably faster than calling a python new() wrapper. That's important for
* code that wants to make hashes of a bunch of small strings.
*/
#define GEN_CONSTRUCTOR(NAME) \
static PyObject * \
- EVP_new_ ## NAME (PyObject *self, PyObject *args) \
+ EVP_new_ ## NAME (PyObject *self, PyObject *args, PyObject *kwdict) \
{ \
- Py_buffer view = { 0 }; \
- PyObject *ret_obj; \
- \
- if (!PyArg_ParseTuple(args, "|s*:" #NAME , &view)) { \
- return NULL; \
- } \
- \
- ret_obj = EVPnew( \
- CONST_ ## NAME ## _name_obj, \
- NULL, \
- CONST_new_ ## NAME ## _ctx_p, \
- (unsigned char*)view.buf, view.len); \
- PyBuffer_Release(&view); \
- return ret_obj; \
+ return implement_specific_EVP_new(self, args, kwdict, \
+ "|s*i:" #NAME, \
+ &cached_info_ ## NAME ); \
}
+static PyObject *
+implement_specific_EVP_new(PyObject *self, PyObject *args, PyObject *kwdict,
+ const char *format,
+ EVPCachedInfo *cached_info)
+{
+ static char *kwlist[] = {"string", "usedforsecurity", NULL};
+ Py_buffer view = { 0 };
+ int usedforsecurity = 1;
+ int idx;
+ PyObject *ret_obj = NULL;
+
+ assert(cached_info);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, format, kwlist,
+ &view, &usedforsecurity)) {
+ return NULL;
+ }
+
+ idx = usedforsecurity ? 1 : 0;
+
+ /*
+ * If an error occurred during creation of the global content, the ctx_ptr
+ * will be NULL, and the error_msg will hopefully be non-NULL:
+ */
+ if (cached_info->ctx_ptrs[idx]) {
+ /* We successfully initialized this context; copy it: */
+ ret_obj = EVPnew(cached_info->name_obj,
+ NULL,
+ cached_info->ctx_ptrs[idx],
+ (unsigned char*)view.buf, view.len,
+ usedforsecurity);
+ } else {
+ /* Some kind of error happened initializing the global context for
+ this (digest, usedforsecurity) pair.
+ Raise an exception with the saved error message: */
+ if (cached_info->error_msgs[idx]) {
+ PyErr_SetObject(PyExc_ValueError, cached_info->error_msgs[idx]);
+ } else {
+ PyErr_SetString(PyExc_ValueError, "Error initializing hash");
+ }
+ }
+
+ PyBuffer_Release(&view);
+
+ return ret_obj;
+}
+
/* a PyMethodDef structure for the constructor */
#define CONSTRUCTOR_METH_DEF(NAME) \
- {"openssl_" #NAME, (PyCFunction)EVP_new_ ## NAME, METH_VARARGS, \
+ {"openssl_" #NAME, (PyCFunction)EVP_new_ ## NAME, \
+ METH_VARARGS |METH_KEYWORDS, \
PyDoc_STR("Returns a " #NAME \
" hash object; optionally initialized with a string") \
}
-/* used in the init function to setup a constructor: initialize OpenSSL
- constructor constants if they haven't been initialized already. */
-#define INIT_CONSTRUCTOR_CONSTANTS(NAME) do { \
- if (CONST_ ## NAME ## _name_obj == NULL) { \
- CONST_ ## NAME ## _name_obj = PyString_FromString(#NAME); \
- if (EVP_get_digestbyname(#NAME)) { \
- CONST_new_ ## NAME ## _ctx_p = &CONST_new_ ## NAME ## _ctx; \
- EVP_DigestInit(CONST_new_ ## NAME ## _ctx_p, EVP_get_digestbyname(#NAME)); \
- } \
- } \
+/*
+ Macro/function pair to set up the constructors.
+
+ Try to initialize a context for each hash twice, once with
+ EVP_MD_CTX_FLAG_NON_FIPS_ALLOW and once without.
+
+ Any that have errors during initialization will end up wit a NULL ctx_ptrs
+ entry, and err_msgs will be set (unless we're very low on memory)
+*/
+#define INIT_CONSTRUCTOR_CONSTANTS(NAME) do { \
+ init_constructor_constant(&cached_info_ ## NAME, #NAME); \
} while (0);
+static void
+init_constructor_constant(EVPCachedInfo *cached_info, const char *name)
+{
+ assert(cached_info);
+ cached_info->name_obj = PyString_FromString(name);
+ if (EVP_get_digestbyname(name)) {
+ int i;
+ for (i=0; i<2; i++) {
+ mc_ctx_init(&cached_info->ctxs[i], i);
+ if (EVP_DigestInit_ex(&cached_info->ctxs[i],
+ EVP_get_digestbyname(name), NULL)) {
+ /* Success: */
+ cached_info->ctx_ptrs[i] = &cached_info->ctxs[i];
+ } else {
+ /* Failure: */
+ cached_info->ctx_ptrs[i] = NULL;
+ cached_info->error_msgs[i] = error_msg_for_last_error();
+ }
+ }
+ }
+}
+
GEN_CONSTRUCTOR(md5)
GEN_CONSTRUCTOR(sha1)
#ifdef _OPENSSL_SUPPORTS_SHA2
@@ -794,14 +926,11 @@
{
PyObject *m, *openssl_md_meth_names;
+ SSL_load_error_strings();
+ SSL_library_init();
OpenSSL_add_all_digests();
ERR_load_crypto_strings();
- /* TODO build EVP_functions openssl_* entries dynamically based
- * on what hashes are supported rather than listing many
- * but having some be unsupported. Only init appropriate
- * constants. */
-
Py_TYPE(&EVPtype) = &PyType_Type;
if (PyType_Ready(&EVPtype) < 0)
return;

View File

@ -1,249 +0,0 @@
diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py
index 5021ebf..63c763a 100644
--- a/Lib/ensurepip/__init__.py
+++ b/Lib/ensurepip/__init__.py
@@ -7,6 +7,7 @@ import pkgutil
import shutil
import sys
import tempfile
+from ensurepip import rewheel
__all__ = ["version", "bootstrap"]
@@ -29,6 +30,8 @@ def _run_pip(args, additional_paths=None):
# Install the bundled software
import pip._internal
+ if args[0] in ["install", "list", "wheel"]:
+ args.append('--pre')
return pip._internal.main(args)
@@ -93,21 +96,40 @@ def _bootstrap(root=None, upgrade=False, user=False,
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
+ whls = []
+ rewheel_dir = None
+ # try to see if we have system-wide versions of _PROJECTS
+ dep_records = rewheel.find_system_records([p[0] for p in _PROJECTS])
+ # TODO: check if system-wide versions are the newest ones
+ # if --upgrade is used?
+ if all(dep_records):
+ # if we have all _PROJECTS installed system-wide, we'll recreate
+ # wheels from them and install those
+ rewheel_dir = tempfile.mkdtemp()
+ for dr in dep_records:
+ new_whl = rewheel.rewheel_from_record(dr, rewheel_dir)
+ whls.append(os.path.join(rewheel_dir, new_whl))
+ else:
+ # if we don't have all the _PROJECTS installed system-wide,
+ # let's just fall back to bundled wheels
+ for project, version in _PROJECTS:
+ whl = os.path.join(
+ os.path.dirname(__file__),
+ "_bundled",
+ "{}-{}-py2.py3-none-any.whl".format(project, version)
+ )
+ whls.append(whl)
+
tmpdir = tempfile.mkdtemp()
try:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
- for project, version in _PROJECTS:
- wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
- whl = pkgutil.get_data(
- "ensurepip",
- "_bundled/{}".format(wheel_name),
- )
- with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
- fp.write(whl)
-
- additional_paths.append(os.path.join(tmpdir, wheel_name))
+ for whl in whls:
+ shutil.copy(whl, tmpdir)
+ additional_paths.append(os.path.join(tmpdir, os.path.basename(whl)))
+ if rewheel_dir:
+ shutil.rmtree(rewheel_dir)
# Construct the arguments to be passed to the pip command
args = ["install", "--no-index", "--find-links", tmpdir]
diff --git a/Lib/ensurepip/rewheel/__init__.py b/Lib/ensurepip/rewheel/__init__.py
new file mode 100644
index 0000000..75c2094
--- /dev/null
+++ b/Lib/ensurepip/rewheel/__init__.py
@@ -0,0 +1,158 @@
+import argparse
+import codecs
+import csv
+import email.parser
+import os
+import io
+import re
+import site
+import subprocess
+import sys
+import zipfile
+
+def run():
+ parser = argparse.ArgumentParser(description='Recreate wheel of package with given RECORD.')
+ parser.add_argument('record_path',
+ help='Path to RECORD file')
+ parser.add_argument('-o', '--output-dir',
+ help='Dir where to place the wheel, defaults to current working dir.',
+ dest='outdir',
+ default=os.path.curdir)
+
+ ns = parser.parse_args()
+ retcode = 0
+ try:
+ print(rewheel_from_record(**vars(ns)))
+ except BaseException as e:
+ print('Failed: {}'.format(e))
+ retcode = 1
+ sys.exit(1)
+
+def find_system_records(projects):
+ """Return list of paths to RECORD files for system-installed projects.
+
+ If a project is not installed, the resulting list contains None instead
+ of a path to its RECORD
+ """
+ records = []
+ # get system site-packages dirs
+ if hasattr(sys, 'real_prefix'):
+ #we are in python2 virtualenv and sys.real_prefix is the original sys.prefix
+ _orig_prefixes = site.PREFIXES
+ setattr(site, 'PREFIXES', [sys.real_prefix]*2)
+ sys_sitepack = site.getsitepackages()
+ setattr(site, 'PREFIXES', _orig_prefixes)
+ elif hasattr(sys, 'base_prefix'): # python3 venv doesn't inject real_prefix to sys
+ # we are on python3 and base(_exec)_prefix is unchanged in venv
+ sys_sitepack = site.getsitepackages([sys.base_prefix, sys.base_exec_prefix])
+ else:
+ # we are in python2 without virtualenv
+ sys_sitepack = site.getsitepackages()
+
+ sys_sitepack = [sp for sp in sys_sitepack if os.path.exists(sp)]
+ # try to find all projects in all system site-packages
+ for project in projects:
+ path = None
+ for sp in sys_sitepack:
+ dist_info_re = os.path.join(sp, project) + '-[^\{0}]+\.dist-info'.format(os.sep)
+ candidates = [os.path.join(sp, p) for p in os.listdir(sp)]
+ # filter out candidate dirs based on the above regexp
+ filtered = [c for c in candidates if re.match(dist_info_re, c)]
+ # if we have 0 or 2 or more dirs, something is wrong...
+ if len(filtered) == 1:
+ path = filtered[0]
+ if path is not None:
+ records.append(os.path.join(path, 'RECORD'))
+ else:
+ records.append(None)
+ return records
+
+def rewheel_from_record(record_path, outdir):
+ """Recreates a whee of package with given record_path and returns path
+ to the newly created wheel."""
+ site_dir = os.path.dirname(os.path.dirname(record_path))
+ record_relpath = record_path[len(site_dir):].strip(os.path.sep)
+ to_write, to_omit = get_records_to_pack(site_dir, record_relpath)
+ new_wheel_name = get_wheel_name(record_path)
+ new_wheel_path = os.path.join(outdir, new_wheel_name + '.whl')
+
+ new_wheel = zipfile.ZipFile(new_wheel_path, mode='w', compression=zipfile.ZIP_DEFLATED)
+ # we need to write a new record with just the files that we will write,
+ # e.g. not binaries and *.pyc/*.pyo files
+ if sys.version_info[0] < 3:
+ new_record = io.BytesIO()
+ else:
+ new_record = io.StringIO()
+ writer = csv.writer(new_record)
+
+ # handle files that we can write straight away
+ for f, sha_hash, size in to_write:
+ new_wheel.write(os.path.join(site_dir, f), arcname=f)
+ writer.writerow([f, sha_hash,size])
+
+ # rewrite the old wheel file with a new computed one
+ writer.writerow([record_relpath, '', ''])
+ new_wheel.writestr(record_relpath, new_record.getvalue())
+
+ new_wheel.close()
+
+ return new_wheel.filename
+
+def get_wheel_name(record_path):
+ """Return proper name of the wheel, without .whl."""
+
+ wheel_info_path = os.path.join(os.path.dirname(record_path), 'WHEEL')
+ with codecs.open(wheel_info_path, encoding='utf-8') as wheel_info_file:
+ wheel_info = email.parser.Parser().parsestr(wheel_info_file.read().encode('utf-8'))
+
+ metadata_path = os.path.join(os.path.dirname(record_path), 'METADATA')
+ with codecs.open(metadata_path, encoding='utf-8') as metadata_file:
+ metadata = email.parser.Parser().parsestr(metadata_file.read().encode('utf-8'))
+
+ # construct name parts according to wheel spec
+ distribution = metadata.get('Name')
+ version = metadata.get('Version')
+ build_tag = '' # nothing for now
+ lang_tag = []
+ for t in wheel_info.get_all('Tag'):
+ lang_tag.append(t.split('-')[0])
+ lang_tag = '.'.join(lang_tag)
+ abi_tag, plat_tag = wheel_info.get('Tag').split('-')[1:3]
+ # leave out build tag, if it is empty
+ to_join = filter(None, [distribution, version, build_tag, lang_tag, abi_tag, plat_tag])
+ return '-'.join(list(to_join))
+
+def get_records_to_pack(site_dir, record_relpath):
+ """Accepts path of sitedir and path of RECORD file relative to it.
+ Returns two lists:
+ - list of files that can be written to new RECORD straight away
+ - list of files that shouldn't be written or need some processing
+ (pyc and pyo files, scripts)
+ """
+ record_file_path = os.path.join(site_dir, record_relpath)
+ with codecs.open(record_file_path, encoding='utf-8') as record_file:
+ record_contents = record_file.read()
+ # temporary fix for https://github.com/pypa/pip/issues/1376
+ # we need to ignore files under ".data" directory
+ data_dir = os.path.dirname(record_relpath).strip(os.path.sep)
+ data_dir = data_dir[:-len('dist-info')] + 'data'
+
+ to_write = []
+ to_omit = []
+ for l in record_contents.splitlines():
+ spl = l.split(',')
+ if len(spl) == 3:
+ # new record will omit (or write differently):
+ # - abs paths, paths with ".." (entry points),
+ # - pyc+pyo files
+ # - the old RECORD file
+ # TODO: is there any better way to recognize an entry point?
+ if os.path.isabs(spl[0]) or spl[0].startswith('..') or \
+ spl[0].endswith('.pyc') or spl[0].endswith('.pyo') or \
+ spl[0] == record_relpath or spl[0].startswith(data_dir):
+ to_omit.append(spl)
+ else:
+ to_write.append(spl)
+ else:
+ pass # bad RECORD or empty line
+ return to_write, to_omit
diff --git a/Makefile.pre.in b/Makefile.pre.in
index 877698c..2c43611 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -1065,7 +1065,7 @@ LIBSUBDIRS= lib-tk lib-tk/test lib-tk/test/test_tkinter \
test/tracedmodules \
encodings compiler hotshot \
email email/mime email/test email/test/data \
- ensurepip ensurepip/_bundled \
+ ensurepip ensurepip/_bundled ensurepip/rewheel\
json json/tests \
sqlite3 sqlite3/test \
logging bsddb bsddb/test csv importlib wsgiref \

View File

@ -1,52 +0,0 @@
--- Python-2.7.15-orig/Python/pythonrun.c
+++ Python-2.7.15/Python/pythonrun.c
@@ -180,6 +182,49 @@
char buf[128];
#endif
extern void _Py_ReadyTypes(void);
+ char *py2_allow_flag = getenv("RHEL_ALLOW_PYTHON2_FOR_BUILD");
+
+ // Fail unless a specific workaround is applied
+ if ((!py2_allow_flag || strcmp(py2_allow_flag, "1") != 0)
+ && (strstr(Py_GetProgramName(), "for-tests") == NULL)
+ ) {
+ fprintf(stderr,
+ "\n"
+ "ERROR: Python 2 is disabled in RHEL8.\n"
+ "\n"
+ "- For guidance on porting to Python 3, see the\n"
+ " Conservative Python3 Porting Guide:\n"
+ " http://portingguide.readthedocs.io/\n"
+ "\n"
+ "- If you need Python 2 at runtime:\n"
+ " - Use the python27 module\n"
+ "\n"
+ "- If you do not have access to BZ#1533919:\n"
+ " - Use the python27 module\n"
+ "\n"
+ "- If you need to use Python 2 only at RPM build time:\n"
+ " - File a bug blocking BZ#1533919:\n"
+ " https://bugzilla.redhat.com/show_bug.cgi?id=1533919\n"
+ " - Set the environment variable RHEL_ALLOW_PYTHON2_FOR_BUILD=1\n"
+ " (Note that if you do not file the bug as above,\n"
+ " this workaround will break without warning in the future.)\n"
+ "\n"
+ "- If you need to use Python 2 only for tests:\n"
+ " - File a bug blocking BZ#1533919:\n"
+ " https://bugzilla.redhat.com/show_bug.cgi?id=1533919\n"
+ " (If your test tool does not have a Bugzilla component,\n"
+ " feel free to use `python2`.)\n"
+ " - Use /usr/bin/python2-for-tests instead of python2 to run\n"
+ " your tests.\n"
+ " (Note that if you do not file the bug as above,\n"
+ " this workaround will break without warning in the future.)\n"
+ "\n"
+ "For details, see https://hurl.corp.redhat.com/rhel8-py2\n"
+ "\n"
+ );
+ fflush(stderr);
+ Py_FatalError("Python 2 is disabled");
+ }
if (initialized)
return;

View File

@ -1,21 +0,0 @@
diff -urN Python-2.7.13/Modules/Setup.dist Python-2.7.13_modul/Modules/Setup.dist
--- Python-2.7.13/Modules/Setup.dist 2017-04-21 14:57:13.767444374 +0200
+++ Python-2.7.13_modul/Modules/Setup.dist 2017-04-21 14:56:49.658953833 +0200
@@ -326,7 +326,7 @@
# every system.
# *** Always uncomment this (leave the leading underscore in!):
-_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT \
+#_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT \
# *** Uncomment and edit to reflect where your Tcl/Tk libraries are:
# -L/usr/local/lib \
# *** Uncomment and edit to reflect where your Tcl/Tk headers are:
@@ -345,7 +345,7 @@
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment and edit to reflect your Tcl/Tk versions:
- -ltk -ltcl \
+# -ltk -ltcl \
# *** Uncomment and edit to reflect where your X11 libraries are:
# -L/usr/X11R6/lib \
# *** Or uncomment this for Solaris:

1392
python-gdb.py Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

40
rpminspect.yaml Normal file
View File

@ -0,0 +1,40 @@
# exclude test XML data (not always valid) from XML validity check:
xml:
ignore:
- /usr/lib*/python*/test/xmltestdata/*
- /usr/lib*/python*/test/xmltestdata/*/*
# exclude _socket from ipv4 only functions check, it has both ipv4 and ipv6 only
badfuncs:
allowed::
/usr/lib*/python*/lib-dynload/_socket.*:
- inet_aton
- inet_ntoa
# don't report changed content of compiled files
# that is expected with every toolchain update and not reproducible yet
changedfiles:
# note that this is a posix regex, so no \d
exclude_path: (\.so(\.[0-9]+(\.[0-9]+)?)?$|^/usr/bin/python[0-9]+\.[0-9]+d?m?$)
# files change size all the time, we don't need to VERIFY it
# however, the INFO is useful, so we don't disable the check entirely
filesize:
# artificially large number, TODO a better way
size_threshold: 100000
debuginfo:
ignore:
# libpython3.so doesn't contain compiled code
- /usr/lib/debug/usr/lib*/libpython3.so*debug
# completely disabled inspections:
inspections:
# we know about our patches, no need to report anything
patches: off
# Ignore the runpath which we have enabled explicitly
# pyexpat
runpath:
ignore:
- /usr/lib*/python*/lib-dynload/pyexpat.so

1
sources Normal file
View File

@ -0,0 +1 @@
SHA512 (Python-2.7.18-noexe.tar.xz) = c74b0fcf4cf01ae33b1f9cdfc104d8679bd4718daa093db72c2664688d966ece736fd6d82c195a227d8de85bc7d9d149aaa1d52bc3dbe7d3413076adae9fb5b6