Compare commits
No commits in common. "c8-stream-5" and "c8-stream-6" have entirely different histories.
c8-stream-
...
c8-stream-
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,2 +1,2 @@
|
|||||||
SOURCES/redis-5.0.3.tar.gz
|
SOURCES/redis-6.2.7.tar.gz
|
||||||
SOURCES/redis-doc-a1e79fc.tar.gz
|
SOURCES/redis-doc-8d4bf9b.tar.gz
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
a43c24ea6365482323b78e21752d610756efcc39 SOURCES/redis-5.0.3.tar.gz
|
b01ef3f117c9815dea41bf2609e489a03c3a5ab1 SOURCES/redis-6.2.7.tar.gz
|
||||||
f2d0dc6e21bf416d4ff32868a2f0fee415391057 SOURCES/redis-doc-a1e79fc.tar.gz
|
45ec7c3b4a034891252507febace7e25ee64b4d9 SOURCES/redis-doc-8d4bf9b.tar.gz
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
From c7958ad1c0d615b81276ec2d4dbc1bf6a67dcc4d Mon Sep 17 00:00:00 2001
|
From d68953c34d4d6987883ddf6158c3c69e7500667f Mon Sep 17 00:00:00 2001
|
||||||
From: Remi Collet <fedora@famillecollet.com>
|
From: Remi Collet <fedora@famillecollet.com>
|
||||||
Date: Thu, 8 Sep 2016 14:51:15 +0200
|
Date: Thu, 8 Sep 2016 14:51:15 +0200
|
||||||
Subject: [PATCH 1/2] 1st man pageis for - redis-cli - redis-benchmark -
|
Subject: [PATCH 1/3] 1st man pageis for - redis-cli - redis-benchmark -
|
||||||
redis-check-aof - redis-check-rdb - redis-server - redis.conf
|
redis-check-aof - redis-check-rdb - redis-server - redis.conf
|
||||||
|
|
||||||
as redis-sentinel is a symlink to redis-server, same page can be used (also symlinked)
|
as redis-sentinel is a symlink to redis-server, same page can be used (also symlinked)
|
||||||
redis.conf can also be used for sentinel.conf
|
redis.conf can also be used for sentinel.conf
|
||||||
---
|
---
|
||||||
man/man1/redis-benchmark.1 | 132 ++++++++++++++++++++++++++++++++++
|
man/man1/redis-benchmark.1 | 132 ++++++++++++++++++++++++++++
|
||||||
man/man1/redis-check-aof.1 | 60 ++++++++++++++++
|
man/man1/redis-check-aof.1 | 60 +++++++++++++
|
||||||
man/man1/redis-check-rdb.1 | 53 ++++++++++++++
|
man/man1/redis-check-rdb.1 | 53 ++++++++++++
|
||||||
man/man1/redis-cli.1 | 171 +++++++++++++++++++++++++++++++++++++++++++++
|
man/man1/redis-cli.1 | 171 +++++++++++++++++++++++++++++++++++++
|
||||||
man/man1/redis-server.1 | 117 +++++++++++++++++++++++++++++++
|
man/man1/redis-server.1 | 117 +++++++++++++++++++++++++
|
||||||
man/man5/redis.conf.5 | 57 +++++++++++++++
|
man/man5/redis.conf.5 | 57 +++++++++++++
|
||||||
6 files changed, 590 insertions(+)
|
6 files changed, 590 insertions(+)
|
||||||
create mode 100644 man/man1/redis-benchmark.1
|
create mode 100644 man/man1/redis-benchmark.1
|
||||||
create mode 100644 man/man1/redis-check-aof.1
|
create mode 100644 man/man1/redis-check-aof.1
|
||||||
@ -648,5 +648,5 @@ index 0000000..1e0c9c9
|
|||||||
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
--
|
--
|
||||||
2.13.5
|
2.24.1
|
||||||
|
|
||||||
|
@ -1,26 +0,0 @@
|
|||||||
From 992c773e70462a6fbe1536e18e673c9ab55d5901 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Remi Collet <fedora@famillecollet.com>
|
|
||||||
Date: Fri, 9 Sep 2016 17:23:27 +0200
|
|
||||||
Subject: [PATCH 2/2] install redis-check-rdb as a symlink instead of duplicating
|
|
||||||
the binary
|
|
||||||
|
|
||||||
---
|
|
||||||
src/Makefile | 4 ++--
|
|
||||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/Makefile b/src/Makefile
|
|
||||||
index fdbe36a..c3083f8 100644
|
|
||||||
--- a/src/Makefile
|
|
||||||
+++ b/src/Makefile
|
|
||||||
@@ -287,6 +287,6 @@ install: all
|
|
||||||
$(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(INSTALL_BIN)
|
|
||||||
$(REDIS_INSTALL) $(REDIS_BENCHMARK_NAME) $(INSTALL_BIN)
|
|
||||||
$(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN)
|
|
||||||
- $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN)
|
|
||||||
- $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN)
|
|
||||||
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME)
|
|
||||||
+ @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_RDB_NAME)
|
|
||||||
+ @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_AOF_NAME)
|
|
||||||
--
|
|
||||||
2.13.5
|
|
||||||
|
|
@ -1,117 +0,0 @@
|
|||||||
From 9f13b2bd4967334b1701c6eccdf53760cb13f79e Mon Sep 17 00:00:00 2001
|
|
||||||
From: John Sully <john@csquare.ca>
|
|
||||||
Date: Thu, 14 Mar 2019 14:02:16 -0400
|
|
||||||
Subject: [PATCH] Fix hyperloglog corruption
|
|
||||||
|
|
||||||
---
|
|
||||||
src/hyperloglog.c | 6 ++++++
|
|
||||||
1 file changed, 6 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
|
|
||||||
index fc21ea0065d..e993bf26e1d 100644
|
|
||||||
--- a/src/hyperloglog.c
|
|
||||||
+++ b/src/hyperloglog.c
|
|
||||||
@@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) {
|
|
||||||
} else {
|
|
||||||
runlen = HLL_SPARSE_VAL_LEN(p);
|
|
||||||
regval = HLL_SPARSE_VAL_VALUE(p);
|
|
||||||
+ if ((runlen + idx) > HLL_REGISTERS) {
|
|
||||||
+ sdsfree(dense);
|
|
||||||
+ return C_ERR;
|
|
||||||
+ }
|
|
||||||
while(runlen--) {
|
|
||||||
HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
|
|
||||||
idx++;
|
|
||||||
@@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) {
|
|
||||||
} else {
|
|
||||||
runlen = HLL_SPARSE_VAL_LEN(p);
|
|
||||||
regval = HLL_SPARSE_VAL_VALUE(p);
|
|
||||||
+ if ((runlen + i) > HLL_REGISTERS)
|
|
||||||
+ return C_ERR;
|
|
||||||
while(runlen--) {
|
|
||||||
if (regval > max[i]) max[i] = regval;
|
|
||||||
i++;
|
|
||||||
From e216ceaf0e099536fe3658a29dcb725d812364e0 Mon Sep 17 00:00:00 2001
|
|
||||||
From: antirez <antirez@gmail.com>
|
|
||||||
Date: Fri, 15 Mar 2019 17:16:06 +0100
|
|
||||||
Subject: [PATCH] HyperLogLog: handle wrong offset in the base case.
|
|
||||||
|
|
||||||
---
|
|
||||||
src/hyperloglog.c | 8 ++------
|
|
||||||
1 file changed, 2 insertions(+), 6 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
|
|
||||||
index 526510b43b9..1e7ce3dceb7 100644
|
|
||||||
--- a/src/hyperloglog.c
|
|
||||||
+++ b/src/hyperloglog.c
|
|
||||||
@@ -614,10 +614,7 @@ int hllSparseToDense(robj *o) {
|
|
||||||
} else {
|
|
||||||
runlen = HLL_SPARSE_VAL_LEN(p);
|
|
||||||
regval = HLL_SPARSE_VAL_VALUE(p);
|
|
||||||
- if ((runlen + idx) > HLL_REGISTERS) {
|
|
||||||
- sdsfree(dense);
|
|
||||||
- return C_ERR;
|
|
||||||
- }
|
|
||||||
+ if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */
|
|
||||||
while(runlen--) {
|
|
||||||
HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
|
|
||||||
idx++;
|
|
||||||
@@ -1097,8 +1094,7 @@ int hllMerge(uint8_t *max, robj *hll) {
|
|
||||||
} else {
|
|
||||||
runlen = HLL_SPARSE_VAL_LEN(p);
|
|
||||||
regval = HLL_SPARSE_VAL_VALUE(p);
|
|
||||||
- if ((runlen + i) > HLL_REGISTERS)
|
|
||||||
- return C_ERR;
|
|
||||||
+ if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */
|
|
||||||
while(runlen--) {
|
|
||||||
if (regval > max[i]) max[i] = regval;
|
|
||||||
i++;
|
|
||||||
From 4208666797b5831eefc022ae46ab5747200cd671 Mon Sep 17 00:00:00 2001
|
|
||||||
From: antirez <antirez@gmail.com>
|
|
||||||
Date: Fri, 15 Mar 2019 13:52:29 +0100
|
|
||||||
Subject: [PATCH] HyperLogLog: dense/sparse repr parsing fuzz test.
|
|
||||||
|
|
||||||
---
|
|
||||||
tests/unit/hyperloglog.tcl | 29 +++++++++++++++++++++++++++++
|
|
||||||
1 file changed, 29 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl
|
|
||||||
index 7d36b7a351f..6a9c47b11c5 100644
|
|
||||||
--- a/tests/unit/hyperloglog.tcl
|
|
||||||
+++ b/tests/unit/hyperloglog.tcl
|
|
||||||
@@ -115,6 +115,35 @@ start_server {tags {"hll"}} {
|
|
||||||
set e
|
|
||||||
} {*WRONGTYPE*}
|
|
||||||
|
|
||||||
+ test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
|
|
||||||
+ for {set j 0} {$j < 10000} {incr j} {
|
|
||||||
+ r del hll
|
|
||||||
+ set items {}
|
|
||||||
+ set numitems [randomInt 3000]
|
|
||||||
+ for {set i 0} {$i < $numitems} {incr i} {
|
|
||||||
+ lappend items [expr {rand()}]
|
|
||||||
+ }
|
|
||||||
+ r pfadd hll {*}$items
|
|
||||||
+
|
|
||||||
+ # Corrupt it in some random way.
|
|
||||||
+ for {set i 0} {$i < 5} {incr i} {
|
|
||||||
+ set len [r strlen hll]
|
|
||||||
+ set pos [randomInt $len]
|
|
||||||
+ set byte [randstring 1 1 binary]
|
|
||||||
+ r setrange hll $pos $byte
|
|
||||||
+ # Don't modify more bytes 50% of times
|
|
||||||
+ if {rand() < 0.5} break
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ # Use the hyperloglog to check if it crashes
|
|
||||||
+ # Redis in some way.
|
|
||||||
+ catch {
|
|
||||||
+ r pfcount hll
|
|
||||||
+ r pfdebug getreg hll
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
test {PFADD, PFCOUNT, PFMERGE type checking works} {
|
|
||||||
r set foo bar
|
|
||||||
catch {r pfadd foo 1} e
|
|
@ -1,27 +0,0 @@
|
|||||||
From a4b90be9fcd5e1668ac941cabce3b1ab38dbe326 Mon Sep 17 00:00:00 2001
|
|
||||||
From: antirez <antirez@gmail.com>
|
|
||||||
Date: Fri, 15 Mar 2019 17:10:16 +0100
|
|
||||||
Subject: [PATCH] HyperLogLog: enlarge reghisto variable for safety.
|
|
||||||
|
|
||||||
---
|
|
||||||
src/hyperloglog.c | 7 ++++++-
|
|
||||||
1 file changed, 6 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
|
|
||||||
index e993bf26e1d..526510b43b9 100644
|
|
||||||
--- a/src/hyperloglog.c
|
|
||||||
+++ b/src/hyperloglog.c
|
|
||||||
@@ -1017,7 +1017,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) {
|
|
||||||
double m = HLL_REGISTERS;
|
|
||||||
double E;
|
|
||||||
int j;
|
|
||||||
- int reghisto[HLL_Q+2] = {0};
|
|
||||||
+ /* Note that reghisto could be just HLL_Q+1, becuase this is the
|
|
||||||
+ * maximum frequency of the "000...1" sequence the hash function is
|
|
||||||
+ * able to return. However it is slow to check for sanity of the
|
|
||||||
+ * input: instead we history array at a safe size: overflows will
|
|
||||||
+ * just write data to wrong, but correctly allocated, places. */
|
|
||||||
+ int reghisto[64] = {0};
|
|
||||||
|
|
||||||
/* Compute register histogram */
|
|
||||||
if (hdr->encoding == HLL_DENSE) {
|
|
@ -1,120 +0,0 @@
|
|||||||
Backported for 5.0.3
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
From a4b813d8b844094fcd77c511af596866043b20c8 Mon Sep 17 00:00:00 2001
|
|
||||||
From: "meir@redislabs.com" <meir@redislabs.com>
|
|
||||||
Date: Sun, 13 Jun 2021 14:27:18 +0300
|
|
||||||
Subject: [PATCH] Fix invalid memory write on lua stack overflow
|
|
||||||
{CVE-2021-32626}
|
|
||||||
MIME-Version: 1.0
|
|
||||||
Content-Type: text/plain; charset=UTF-8
|
|
||||||
Content-Transfer-Encoding: 8bit
|
|
||||||
|
|
||||||
When LUA call our C code, by default, the LUA stack has room for 20
|
|
||||||
elements. In most cases, this is more than enough but sometimes it's not
|
|
||||||
and the caller must verify the LUA stack size before he pushes elements.
|
|
||||||
|
|
||||||
On 3 places in the code, there was no verification of the LUA stack size.
|
|
||||||
On specific inputs this missing verification could have lead to invalid
|
|
||||||
memory write:
|
|
||||||
1. On 'luaReplyToRedisReply', one might return a nested reply that will
|
|
||||||
explode the LUA stack.
|
|
||||||
2. On 'redisProtocolToLuaType', the Redis reply might be deep enough
|
|
||||||
to explode the LUA stack (notice that currently there is no such
|
|
||||||
command in Redis that returns such a nested reply, but modules might
|
|
||||||
do it)
|
|
||||||
3. On 'ldbRedis', one might give a command with enough arguments to
|
|
||||||
explode the LUA stack (all the arguments will be pushed to the LUA
|
|
||||||
stack)
|
|
||||||
|
|
||||||
This commit is solving all those 3 issues by calling 'lua_checkstack' and
|
|
||||||
verify that there is enough room in the LUA stack to push elements. In
|
|
||||||
case 'lua_checkstack' returns an error (there is not enough room in the
|
|
||||||
LUA stack and it's not possible to increase the stack), we will do the
|
|
||||||
following:
|
|
||||||
1. On 'luaReplyToRedisReply', we will return an error to the user.
|
|
||||||
2. On 'redisProtocolToLuaType' we will exit with panic (we assume this
|
|
||||||
scenario is rare because it can only happen with a module).
|
|
||||||
3. On 'ldbRedis', we return an error.
|
|
||||||
|
|
||||||
(cherry picked from commit d32a3f74f2a343846b50920e95754a955c1a10a9)
|
|
||||||
---
|
|
||||||
src/scripting.c | 36 ++++++++++++++++++++++++++++++++++++
|
|
||||||
1 file changed, 36 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/scripting.c b/src/scripting.c
|
|
||||||
index db1e4d4b5f1..153b942404e 100644
|
|
||||||
--- a/src/scripting.c
|
|
||||||
+++ b/src/scripting.c
|
|
||||||
@@ -125,6 +125,16 @@ void sha1hex(char *digest, char *script, size_t len) {
|
|
||||||
*/
|
|
||||||
|
|
||||||
char *redisProtocolToLuaType(lua_State *lua, char* reply) {
|
|
||||||
+
|
|
||||||
+ if (!lua_checkstack(lua, 5)) {
|
|
||||||
+ /*
|
|
||||||
+ * Increase the Lua stack if needed, to make sure there is enough room
|
|
||||||
+ * to push 5 elements to the stack. On failure, exit with panic.
|
|
||||||
+ * Notice that we need, in the worst case, 5 elements because redisProtocolToLuaType_Aggregate
|
|
||||||
+ * might push 5 elements to the Lua stack.*/
|
|
||||||
+ serverPanic("lua stack limit reach when parsing redis.call reply");
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
char *p = reply;
|
|
||||||
|
|
||||||
switch(*p) {
|
|
||||||
@@ -275,6 +285,17 @@ void luaSortArray(lua_State *lua) {
|
|
||||||
* ------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void luaReplyToRedisReply(client *c, lua_State *lua) {
|
|
||||||
+
|
|
||||||
+ if (!lua_checkstack(lua, 4)) {
|
|
||||||
+ /* Increase the Lua stack if needed to make sure there is enough room
|
|
||||||
+ * to push 4 elements to the stack. On failure, return error.
|
|
||||||
+ * Notice that we need, in the worst case, 4 elements because returning a map might
|
|
||||||
+ * require push 4 elements to the Lua stack.*/
|
|
||||||
+ addReplyErrorFormat(c, "reached lua stack limit");
|
|
||||||
+ lua_pop(lua,1); // pop the element from the stack
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
int t = lua_type(lua,-1);
|
|
||||||
|
|
||||||
switch(t) {
|
|
||||||
@@ -292,6 +313,9 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
|
|
||||||
* Error are returned as a single element table with 'err' field.
|
|
||||||
* Status replies are returned as single element table with 'ok'
|
|
||||||
* field. */
|
|
||||||
+
|
|
||||||
+ /* Handle error reply. */
|
|
||||||
+ /* we took care of the stack size on function start */
|
|
||||||
lua_pushstring(lua,"err");
|
|
||||||
lua_gettable(lua,-2);
|
|
||||||
t = lua_type(lua,-1);
|
|
||||||
@@ -320,6 +344,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
|
|
||||||
|
|
||||||
lua_pop(lua,1); /* Discard the 'ok' field value we popped */
|
|
||||||
while(1) {
|
|
||||||
+ /* we took care of the stack size on function start */
|
|
||||||
lua_pushnumber(lua,j++);
|
|
||||||
lua_gettable(lua,-2);
|
|
||||||
t = lua_type(lua,-1);
|
|
||||||
@@ -2231,6 +2256,17 @@ void ldbEval(lua_State *lua, sds *argv, int argc) {
|
|
||||||
void ldbRedis(lua_State *lua, sds *argv, int argc) {
|
|
||||||
int j, saved_rc = server.lua_replicate_commands;
|
|
||||||
|
|
||||||
+ if (!lua_checkstack(lua, argc + 1)) {
|
|
||||||
+ /* Increase the Lua stack if needed to make sure there is enough room
|
|
||||||
+ * to push 'argc + 1' elements to the stack. On failure, return error.
|
|
||||||
+ * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments
|
|
||||||
+ * given by the user (without the first argument) and we also push the 'redis' global table and
|
|
||||||
+ * 'redis.call' function so:
|
|
||||||
+ * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/
|
|
||||||
+ ldbLogRedisReply("max lua stack reached");
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
lua_getglobal(lua,"redis");
|
|
||||||
lua_pushstring(lua,"call");
|
|
||||||
lua_gettable(lua,-2); /* Stack: redis, redis.call */
|
|
@ -1,775 +0,0 @@
|
|||||||
Backported for 5.0.3
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
From 6facfb7a103b26b9a602253a738b2130afb7c5d3 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oran Agra <oran@redislabs.com>
|
|
||||||
Date: Thu, 3 Jun 2021 12:10:02 +0300
|
|
||||||
Subject: [PATCH] Fix ziplist and listpack overflows and truncations
|
|
||||||
(CVE-2021-32627, CVE-2021-32628)
|
|
||||||
|
|
||||||
- fix possible heap corruption in ziplist and listpack resulting by trying to
|
|
||||||
allocate more than the maximum size of 4GB.
|
|
||||||
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
|
|
||||||
converted to HT encoding, that's not a useful size.
|
|
||||||
- prevent listpack (stream) from reaching size of above 1GB.
|
|
||||||
- XADD will start a new listpack if the new record may cause the previous
|
|
||||||
listpack to grow over 1GB.
|
|
||||||
- XADD will respond with an error if a single stream record is over 1GB
|
|
||||||
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
|
|
||||||
now it'll respond with an error.
|
|
||||||
|
|
||||||
(cherry picked from commit 68e221a3f98a427805d31c1760b4cdf37ba810ab)
|
|
||||||
---
|
|
||||||
src/geo.c | 5 +-
|
|
||||||
src/listpack.c | 2 +-
|
|
||||||
src/quicklist.c | 17 ++++-
|
|
||||||
src/rdb.c | 36 ++++++---
|
|
||||||
src/server.h | 2 +-
|
|
||||||
src/t_hash.c | 13 +++-
|
|
||||||
src/t_list.c | 25 ++++++
|
|
||||||
src/t_stream.c | 48 +++++++++---
|
|
||||||
src/t_zset.c | 43 +++++++----
|
|
||||||
src/ziplist.c | 17 ++++-
|
|
||||||
src/ziplist.h | 1 +
|
|
||||||
tests/support/util.tcl | 21 +++++
|
|
||||||
tests/unit/violations.tcl | 156 ++++++++++++++++++++++++++++++++++++++
|
|
||||||
13 files changed, 338 insertions(+), 48 deletions(-)
|
|
||||||
create mode 100644 tests/unit/violations.tcl
|
|
||||||
|
|
||||||
diff --git a/src/geo.c b/src/geo.c
|
|
||||||
index f1d3f18d46e7..b94fcc1b3d70 100644
|
|
||||||
--- a/src/geo.c
|
|
||||||
+++ b/src/geo.c
|
|
||||||
@@ -635,7 +635,7 @@ void georadiusGeneric(client *c, int fla
|
|
||||||
robj *zobj;
|
|
||||||
zset *zs;
|
|
||||||
int i;
|
|
||||||
- size_t maxelelen = 0;
|
|
||||||
+ size_t maxelelen = 0, totelelen = 0;
|
|
||||||
|
|
||||||
if (returned_items) {
|
|
||||||
zobj = createZsetObject();
|
|
||||||
@@ -650,13 +650,14 @@ void georadiusGeneric(client *c, int fla
|
|
||||||
size_t elelen = sdslen(gp->member);
|
|
||||||
|
|
||||||
if (maxelelen < elelen) maxelelen = elelen;
|
|
||||||
+ totelelen += elelen;
|
|
||||||
znode = zslInsert(zs->zsl,score,gp->member);
|
|
||||||
serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK);
|
|
||||||
gp->member = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (returned_items) {
|
|
||||||
- zsetConvertToZiplistIfNeeded(zobj,maxelelen);
|
|
||||||
+ zsetConvertToZiplistIfNeeded(zobj,maxelelen,totelelen);
|
|
||||||
setKey(c->db,storekey,zobj);
|
|
||||||
decrRefCount(zobj);
|
|
||||||
notifyKeyspaceEvent(NOTIFY_LIST,"georadiusstore",storekey,
|
|
||||||
diff --git a/src/listpack.c b/src/listpack.c
|
|
||||||
index e1f4d9a02ee8..cd5583ccb258 100644
|
|
||||||
--- a/src/listpack.c
|
|
||||||
+++ b/src/listpack.c
|
|
||||||
@@ -283,7 +283,7 @@ int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, ui
|
|
||||||
} else {
|
|
||||||
if (size < 64) *enclen = 1+size;
|
|
||||||
else if (size < 4096) *enclen = 2+size;
|
|
||||||
- else *enclen = 5+size;
|
|
||||||
+ else *enclen = 5+(uint64_t)size;
|
|
||||||
return LP_ENCODING_STRING;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
diff --git a/src/quicklist.c b/src/quicklist.c
|
|
||||||
index 7b5484116785..d5cc758b2fa0 100644
|
|
||||||
--- a/src/quicklist.c
|
|
||||||
+++ b/src/quicklist.c
|
|
||||||
@@ -29,6 +29,7 @@
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <string.h> /* for memcpy */
|
|
||||||
+#include "redisassert.h"
|
|
||||||
#include "quicklist.h"
|
|
||||||
#include "zmalloc.h"
|
|
||||||
#include "ziplist.h"
|
|
||||||
@@ -43,11 +44,16 @@
|
|
||||||
#define REDIS_STATIC static
|
|
||||||
#endif
|
|
||||||
|
|
||||||
-/* Optimization levels for size-based filling */
|
|
||||||
+/* Optimization levels for size-based filling.
|
|
||||||
+ * Note that the largest possible limit is 16k, so even if each record takes
|
|
||||||
+ * just one byte, it still won't overflow the 16 bit count field. */
|
|
||||||
static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536};
|
|
||||||
|
|
||||||
/* Maximum size in bytes of any multi-element ziplist.
|
|
||||||
- * Larger values will live in their own isolated ziplists. */
|
|
||||||
+ * Larger values will live in their own isolated ziplists.
|
|
||||||
+ * This is used only if we're limited by record count. when we're limited by
|
|
||||||
+ * size, the maximum limit is bigger, but still safe.
|
|
||||||
+ * 8k is a recommended / default size limit */
|
|
||||||
#define SIZE_SAFETY_LIMIT 8192
|
|
||||||
|
|
||||||
/* Minimum ziplist size in bytes for attempting compression. */
|
|
||||||
@@ -441,6 +447,8 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node,
|
|
||||||
unsigned int new_sz = node->sz + sz + ziplist_overhead;
|
|
||||||
if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(new_sz, fill)))
|
|
||||||
return 1;
|
|
||||||
+ /* when we return 1 above we know that the limit is a size limit (which is
|
|
||||||
+ * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
|
|
||||||
else if (!sizeMeetsSafetyLimit(new_sz))
|
|
||||||
return 0;
|
|
||||||
else if ((int)node->count < fill)
|
|
||||||
@@ -460,6 +468,8 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
|
|
||||||
unsigned int merge_sz = a->sz + b->sz - 11;
|
|
||||||
if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(merge_sz, fill)))
|
|
||||||
return 1;
|
|
||||||
+ /* when we return 1 above we know that the limit is a size limit (which is
|
|
||||||
+ * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
|
|
||||||
else if (!sizeMeetsSafetyLimit(merge_sz))
|
|
||||||
return 0;
|
|
||||||
else if ((int)(a->count + b->count) <= fill)
|
|
||||||
@@ -479,6 +489,7 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
|
|
||||||
* Returns 1 if new head created. */
|
|
||||||
int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
|
|
||||||
quicklistNode *orig_head = quicklist->head;
|
|
||||||
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
|
|
||||||
if (likely(
|
|
||||||
_quicklistNodeAllowInsert(quicklist->head, quicklist->fill, sz))) {
|
|
||||||
quicklist->head->zl =
|
|
||||||
@@ -502,6 +513,7 @@ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
|
|
||||||
* Returns 1 if new tail created. */
|
|
||||||
int quicklistPushTail(quicklist *quicklist, void *value, size_t sz) {
|
|
||||||
quicklistNode *orig_tail = quicklist->tail;
|
|
||||||
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
|
|
||||||
if (likely(
|
|
||||||
_quicklistNodeAllowInsert(quicklist->tail, quicklist->fill, sz))) {
|
|
||||||
quicklist->tail->zl =
|
|
||||||
@@ -835,6 +847,7 @@ REDIS_STATIC void _quicklistInsert(quicklist *quicklist, quicklistEntry *entry,
|
|
||||||
int fill = quicklist->fill;
|
|
||||||
quicklistNode *node = entry->node;
|
|
||||||
quicklistNode *new_node = NULL;
|
|
||||||
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
|
|
||||||
|
|
||||||
if (!node) {
|
|
||||||
/* we have no reference node, so let's create only node in the list */
|
|
||||||
diff --git a/src/rdb.c b/src/rdb.c
|
|
||||||
index 3c58a1eaf7fb..c7dc724f3df6 100644
|
|
||||||
--- a/src/rdb.c
|
|
||||||
+++ b/src/rdb.c
|
|
||||||
@@ -1452,7 +1452,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
|
|
||||||
} else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) {
|
|
||||||
/* Read list/set value. */
|
|
||||||
uint64_t zsetlen;
|
|
||||||
- size_t maxelelen = 0;
|
|
||||||
+ size_t maxelelen = 0, totelelen = 0;
|
|
||||||
zset *zs;
|
|
||||||
|
|
||||||
if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
|
|
||||||
@@ -1479,6 +1479,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
|
|
||||||
|
|
||||||
/* Don't care about integer-encoded strings. */
|
|
||||||
if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele);
|
|
||||||
+ totelelen += sdslen(sdsele);
|
|
||||||
|
|
||||||
znode = zslInsert(zs->zsl,score,sdsele);
|
|
||||||
dictAdd(zs->dict,sdsele,&znode->score);
|
|
||||||
@@ -1486,8 +1487,11 @@ robj *rdbLoadObject(int rdbtype, rio *rd
|
|
||||||
|
|
||||||
/* Convert *after* loading, since sorted sets are not stored ordered. */
|
|
||||||
if (zsetLength(o) <= server.zset_max_ziplist_entries &&
|
|
||||||
- maxelelen <= server.zset_max_ziplist_value)
|
|
||||||
- zsetConvert(o,OBJ_ENCODING_ZIPLIST);
|
|
||||||
+ maxelelen <= server.zset_max_ziplist_value &&
|
|
||||||
+ ziplistSafeToAdd(NULL, totelelen))
|
|
||||||
+ {
|
|
||||||
+ zsetConvert(o,OBJ_ENCODING_ZIPLIST);
|
|
||||||
+ }
|
|
||||||
} else if (rdbtype == RDB_TYPE_HASH) {
|
|
||||||
uint64_t len;
|
|
||||||
int ret;
|
|
||||||
@@ -1511,21 +1515,25 @@ robj *rdbLoadObject(int rdbtype, rio *rd
|
|
||||||
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL))
|
|
||||||
== NULL) return NULL;
|
|
||||||
|
|
||||||
- /* Add pair to ziplist */
|
|
||||||
- o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
|
|
||||||
- sdslen(field), ZIPLIST_TAIL);
|
|
||||||
- o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
|
|
||||||
- sdslen(value), ZIPLIST_TAIL);
|
|
||||||
-
|
|
||||||
/* Convert to hash table if size threshold is exceeded */
|
|
||||||
if (sdslen(field) > server.hash_max_ziplist_value ||
|
|
||||||
- sdslen(value) > server.hash_max_ziplist_value)
|
|
||||||
+ sdslen(value) > server.hash_max_ziplist_value ||
|
|
||||||
+ !ziplistSafeToAdd(o->ptr, sdslen(field)+sdslen(value)))
|
|
||||||
{
|
|
||||||
- sdsfree(field);
|
|
||||||
- sdsfree(value);
|
|
||||||
hashTypeConvert(o, OBJ_ENCODING_HT);
|
|
||||||
+ ret = dictAdd((dict*)o->ptr, field, value);
|
|
||||||
+ if (ret == DICT_ERR) {
|
|
||||||
+ rdbExitReportCorruptRDB("Duplicate hash fields detected");
|
|
||||||
+ }
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+ /* Add pair to ziplist */
|
|
||||||
+ o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
|
|
||||||
+ sdslen(field), ZIPLIST_TAIL);
|
|
||||||
+ o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
|
|
||||||
+ sdslen(value), ZIPLIST_TAIL);
|
|
||||||
+
|
|
||||||
sdsfree(field);
|
|
||||||
sdsfree(value);
|
|
||||||
}
|
|
||||||
@@ -1594,6 +1602,10 @@ robj *rdbLoadObject(int rdbtype, rio *rd
|
|
||||||
while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) {
|
|
||||||
if (flen > maxlen) maxlen = flen;
|
|
||||||
if (vlen > maxlen) maxlen = vlen;
|
|
||||||
+ if (!ziplistSafeToAdd(zl, (size_t)flen + vlen)) {
|
|
||||||
+ rdbExitReportCorruptRDB("Hash zipmap too big (%u)", flen);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL);
|
|
||||||
zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL);
|
|
||||||
}
|
|
||||||
diff --git a/src/server.h b/src/server.h
|
|
||||||
index ca868939cf6d..164a82271f44 100644
|
|
||||||
--- a/src/server.h
|
|
||||||
+++ b/src/server.h
|
|
||||||
@@ -1677,7 +1677,7 @@ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range);
|
|
||||||
unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range);
|
|
||||||
unsigned long zsetLength(const robj *zobj);
|
|
||||||
void zsetConvert(robj *zobj, int encoding);
|
|
||||||
-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
|
|
||||||
+void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen);
|
|
||||||
int zsetScore(robj *zobj, sds member, double *score);
|
|
||||||
unsigned long zslGetRank(zskiplist *zsl, double score, sds o);
|
|
||||||
int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore);
|
|
||||||
diff --git a/src/t_hash.c b/src/t_hash.c
|
|
||||||
index 0ca152df78cc..109522c1322f 100644
|
|
||||||
--- a/src/t_hash.c
|
|
||||||
+++ b/src/t_hash.c
|
|
||||||
@@ -39,17 +39,22 @@
|
|
||||||
* as their string length can be queried in constant time. */
|
|
||||||
void hashTypeTryConversion(robj *o, robj **argv, int start, int end) {
|
|
||||||
int i;
|
|
||||||
+ size_t sum = 0;
|
|
||||||
|
|
||||||
if (o->encoding != OBJ_ENCODING_ZIPLIST) return;
|
|
||||||
|
|
||||||
for (i = start; i <= end; i++) {
|
|
||||||
- if (sdsEncodedObject(argv[i]) &&
|
|
||||||
- sdslen(argv[i]->ptr) > server.hash_max_ziplist_value)
|
|
||||||
- {
|
|
||||||
+ if (!sdsEncodedObject(argv[i]))
|
|
||||||
+ continue;
|
|
||||||
+ size_t len = sdslen(argv[i]->ptr);
|
|
||||||
+ if (len > server.hash_max_ziplist_value) {
|
|
||||||
hashTypeConvert(o, OBJ_ENCODING_HT);
|
|
||||||
- break;
|
|
||||||
+ return;
|
|
||||||
}
|
|
||||||
+ sum += len;
|
|
||||||
}
|
|
||||||
+ if (!ziplistSafeToAdd(o->ptr, sum))
|
|
||||||
+ hashTypeConvert(o, OBJ_ENCODING_HT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the value from a ziplist encoded hash, identified by field.
|
|
||||||
diff --git a/src/t_list.c b/src/t_list.c
|
|
||||||
index de417f4705f4..67541554f616 100644
|
|
||||||
--- a/src/t_list.c
|
|
||||||
+++ b/src/t_list.c
|
|
||||||
@@ -29,6 +29,8 @@
|
|
||||||
|
|
||||||
#include "server.h"
|
|
||||||
|
|
||||||
+#define LIST_MAX_ITEM_SIZE ((1ull<<32)-1024)
|
|
||||||
+
|
|
||||||
/*-----------------------------------------------------------------------------
|
|
||||||
* List API
|
|
||||||
*----------------------------------------------------------------------------*/
|
|
||||||
@@ -196,6 +198,14 @@ void listTypeConvert(robj *subject, int enc) {
|
|
||||||
|
|
||||||
void pushGenericCommand(client *c, int where) {
|
|
||||||
int j, pushed = 0;
|
|
||||||
+
|
|
||||||
+ for (j = 2; j < c->argc; j++) {
|
|
||||||
+ if (sdslen(c->argv[j]->ptr) > LIST_MAX_ITEM_SIZE) {
|
|
||||||
+ addReplyError(c, "Element too large");
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
robj *lobj = lookupKeyWrite(c->db,c->argv[1]);
|
|
||||||
|
|
||||||
if (lobj && lobj->type != OBJ_LIST) {
|
|
||||||
@@ -277,6 +287,11 @@ void linsertCommand(client *c) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
+ if (sdslen(c->argv[4]->ptr) > LIST_MAX_ITEM_SIZE) {
|
|
||||||
+ addReplyError(c, "Element too large");
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if ((subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL ||
|
|
||||||
checkType(c,subject,OBJ_LIST)) return;
|
|
||||||
|
|
||||||
@@ -344,6 +359,11 @@ void lsetCommand(client *c) {
|
|
||||||
long index;
|
|
||||||
robj *value = c->argv[3];
|
|
||||||
|
|
||||||
+ if (sdslen(value->ptr) > LIST_MAX_ITEM_SIZE) {
|
|
||||||
+ addReplyError(c, "Element too large");
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK))
|
|
||||||
return;
|
|
||||||
|
|
||||||
@@ -493,6 +513,11 @@ void lremCommand(client *c) {
|
|
||||||
long toremove;
|
|
||||||
long removed = 0;
|
|
||||||
|
|
||||||
+ if (sdslen(obj->ptr) > LIST_MAX_ITEM_SIZE) {
|
|
||||||
+ addReplyError(c, "Element too large");
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if ((getLongFromObjectOrReply(c, c->argv[2], &toremove, NULL) != C_OK))
|
|
||||||
return;
|
|
||||||
|
|
||||||
diff --git a/src/t_stream.c b/src/t_stream.c
|
|
||||||
index d7754985dd03..e7263d68a28f 100644
|
|
||||||
--- a/src/t_stream.c
|
|
||||||
+++ b/src/t_stream.c
|
|
||||||
@@ -40,6 +40,12 @@
|
|
||||||
#define STREAM_ITEM_FLAG_DELETED (1<<0) /* Entry is delted. Skip it. */
|
|
||||||
#define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1) /* Same fields as master entry. */
|
|
||||||
|
|
||||||
+/* Don't let listpacks grow too big, even if the user config allows it.
|
|
||||||
+ * doing so can lead to an overflow (trying to store more than 32bit length
|
|
||||||
+ * into the listpack header), or actually an assertion since lpInsert
|
|
||||||
+ * will return NULL. */
|
|
||||||
+#define STREAM_LISTPACK_MAX_SIZE (1<<30)
|
|
||||||
+
|
|
||||||
void streamFreeCG(streamCG *cg);
|
|
||||||
void streamFreeNACK(streamNACK *na);
|
|
||||||
size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer);
|
|
||||||
@@ -170,12 +176,31 @@ int streamCompareID(streamID *a, streamI
|
|
||||||
*
|
|
||||||
* The function returns C_OK if the item was added, this is always true
|
|
||||||
* if the ID was generated by the function. However the function may return
|
|
||||||
- * C_ERR if an ID was given via 'use_id', but adding it failed since the
|
|
||||||
- * current top ID is greater or equal. */
|
|
||||||
+ * C_ERR in several cases:
|
|
||||||
+ * 1. If an ID was given via 'use_id', but adding it failed since the
|
|
||||||
+ * current top ID is greater or equal. errno will be set to EDOM.
|
|
||||||
+ * 2. If a size of a single element or the sum of the elements is too big to
|
|
||||||
+ * be stored into the stream. errno will be set to ERANGE. */
|
|
||||||
int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) {
|
|
||||||
/* If an ID was given, check that it's greater than the last entry ID
|
|
||||||
* or return an error. */
|
|
||||||
- if (use_id && streamCompareID(use_id,&s->last_id) <= 0) return C_ERR;
|
|
||||||
+ if (use_id && streamCompareID(use_id,&s->last_id) <= 0) {
|
|
||||||
+ errno = EDOM;
|
|
||||||
+ return C_ERR;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ /* Avoid overflow when trying to add an element to the stream (listpack
|
|
||||||
+ * can only host up to 32bit length sttrings, and also a total listpack size
|
|
||||||
+ * can't be bigger than 32bit length. */
|
|
||||||
+ size_t totelelen = 0;
|
|
||||||
+ for (int64_t i = 0; i < numfields*2; i++) {
|
|
||||||
+ sds ele = argv[i]->ptr;
|
|
||||||
+ totelelen += sdslen(ele);
|
|
||||||
+ }
|
|
||||||
+ if (totelelen > STREAM_LISTPACK_MAX_SIZE) {
|
|
||||||
+ errno = ERANGE;
|
|
||||||
+ return C_ERR;
|
|
||||||
+ }
|
|
||||||
|
|
||||||
/* Add the new entry. */
|
|
||||||
raxIterator ri;
|
|
||||||
@@ -241,9 +266,10 @@ int streamAppendItem(stream *s, robj **a
|
|
||||||
* if we need to switch to the next one. 'lp' will be set to NULL if
|
|
||||||
* the current node is full. */
|
|
||||||
if (lp != NULL) {
|
|
||||||
- if (server.stream_node_max_bytes &&
|
|
||||||
- lp_bytes > server.stream_node_max_bytes)
|
|
||||||
- {
|
|
||||||
+ size_t node_max_bytes = server.stream_node_max_bytes;
|
|
||||||
+ if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE)
|
|
||||||
+ node_max_bytes = STREAM_LISTPACK_MAX_SIZE;
|
|
||||||
+ if (lp_bytes + totelelen >= node_max_bytes) {
|
|
||||||
lp = NULL;
|
|
||||||
} else if (server.stream_node_max_entries) {
|
|
||||||
int64_t count = lpGetInteger(lpFirst(lp));
|
|
||||||
@@ -1224,11 +1250,13 @@ void xaddCommand(client *c) {
|
|
||||||
|
|
||||||
/* Append using the low level function and return the ID. */
|
|
||||||
if (streamAppendItem(s,c->argv+field_pos,(c->argc-field_pos)/2,
|
|
||||||
- &id, id_given ? &id : NULL)
|
|
||||||
- == C_ERR)
|
|
||||||
+ &id, id_given ? &id : NULL) == C_ERR)
|
|
||||||
{
|
|
||||||
- addReplyError(c,"The ID specified in XADD is equal or smaller than the "
|
|
||||||
- "target stream top item");
|
|
||||||
+ if (errno == EDOM)
|
|
||||||
+ addReplyError(c,"The ID specified in XADD is equal or smaller than "
|
|
||||||
+ "the target stream top item");
|
|
||||||
+ else
|
|
||||||
+ addReplyError(c,"Elements are too large to be stored");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
addReplyStreamID(c,&id);
|
|
||||||
diff --git a/src/t_zset.c b/src/t_zset.c
|
|
||||||
index 56ea39607b52..989d5855e1ea 100644
|
|
||||||
--- a/src/t_zset.c
|
|
||||||
+++ b/src/t_zset.c
|
|
||||||
@@ -1237,15 +1237,18 @@ void zsetConvert(robj *zobj, int encodin
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Convert the sorted set object into a ziplist if it is not already a ziplist
|
|
||||||
- * and if the number of elements and the maximum element size is within the
|
|
||||||
- * expected ranges. */
|
|
||||||
-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) {
|
|
||||||
+ * and if the number of elements and the maximum element size and total elements size
|
|
||||||
+ * are within the expected ranges. */
|
|
||||||
+void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen) {
|
|
||||||
if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return;
|
|
||||||
zset *zset = zobj->ptr;
|
|
||||||
|
|
||||||
if (zset->zsl->length <= server.zset_max_ziplist_entries &&
|
|
||||||
- maxelelen <= server.zset_max_ziplist_value)
|
|
||||||
- zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
|
|
||||||
+ maxelelen <= server.zset_max_ziplist_value &&
|
|
||||||
+ ziplistSafeToAdd(NULL, totelelen))
|
|
||||||
+ {
|
|
||||||
+ zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
|
|
||||||
+ }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return (by reference) the score of the specified member of the sorted set
|
|
||||||
@@ -1354,21 +1357,28 @@ int zsetAdd(robj *zobj, double score, sd
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
} else if (!xx) {
|
|
||||||
- /* Optimize: check if the element is too large or the list
|
|
||||||
+ /* check if the element is too large or the list
|
|
||||||
* becomes too long *before* executing zzlInsert. */
|
|
||||||
- zobj->ptr = zzlInsert(zobj->ptr,ele,score);
|
|
||||||
- if (zzlLength(zobj->ptr) > server.zset_max_ziplist_entries)
|
|
||||||
- zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
|
|
||||||
- if (sdslen(ele) > server.zset_max_ziplist_value)
|
|
||||||
+ if (zzlLength(zobj->ptr)+1 > server.zset_max_ziplist_entries ||
|
|
||||||
+ sdslen(ele) > server.zset_max_ziplist_value ||
|
|
||||||
+ !ziplistSafeToAdd(zobj->ptr, sdslen(ele)))
|
|
||||||
+ {
|
|
||||||
zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
|
|
||||||
- if (newscore) *newscore = score;
|
|
||||||
- *flags |= ZADD_ADDED;
|
|
||||||
- return 1;
|
|
||||||
+ } else {
|
|
||||||
+ zobj->ptr = zzlInsert(zobj->ptr,ele,score);
|
|
||||||
+ if (newscore) *newscore = score;
|
|
||||||
+ *flags |= ZADD_ADDED;
|
|
||||||
+ return 1;
|
|
||||||
+ }
|
|
||||||
} else {
|
|
||||||
*flags |= ZADD_NOP;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
- } else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ /* Note that the above block handling ziplist would have either returned or
|
|
||||||
+ * converted the key to skiplist. */
|
|
||||||
+ if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
|
|
||||||
zset *zs = zobj->ptr;
|
|
||||||
zskiplistNode *znode;
|
|
||||||
dictEntry *de;
|
|
||||||
@@ -2180,7 +2190,7 @@ void zunionInterGenericCommand(client *c
|
|
||||||
zsetopsrc *src;
|
|
||||||
zsetopval zval;
|
|
||||||
sds tmp;
|
|
||||||
- size_t maxelelen = 0;
|
|
||||||
+ size_t maxelelen = 0, totelelen = 0;
|
|
||||||
robj *dstobj;
|
|
||||||
zset *dstzset;
|
|
||||||
zskiplistNode *znode;
|
|
||||||
@@ -2304,6 +2314,7 @@ void zunionInterGenericCommand(client *c
|
|
||||||
tmp = zuiNewSdsFromValue(&zval);
|
|
||||||
znode = zslInsert(dstzset->zsl,score,tmp);
|
|
||||||
dictAdd(dstzset->dict,tmp,&znode->score);
|
|
||||||
+ totelelen += sdslen(tmp);
|
|
||||||
if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2340,6 +2351,7 @@ void zunionInterGenericCommand(client *c
|
|
||||||
/* Remember the longest single element encountered,
|
|
||||||
* to understand if it's possible to convert to ziplist
|
|
||||||
* at the end. */
|
|
||||||
+ totelelen += sdslen(tmp);
|
|
||||||
if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
|
|
||||||
/* Update the element with its initial score. */
|
|
||||||
dictSetKey(accumulator, de, tmp);
|
|
||||||
@@ -2380,7 +2392,7 @@ void zunionInterGenericCommand(client *c
|
|
||||||
if (dbDelete(c->db,dstkey))
|
|
||||||
touched = 1;
|
|
||||||
if (dstzset->zsl->length) {
|
|
||||||
- zsetConvertToZiplistIfNeeded(dstobj,maxelelen);
|
|
||||||
+ zsetConvertToZiplistIfNeeded(dstobj,maxelelen,totelelen);
|
|
||||||
dbAdd(c->db,dstkey,dstobj);
|
|
||||||
addReplyLongLong(c,zsetLength(dstobj));
|
|
||||||
signalModifiedKey(c->db,dstkey);
|
|
||||||
diff --git a/src/ziplist.c b/src/ziplist.c
|
|
||||||
index dbd804b11dfc..1a8566698972 100644
|
|
||||||
--- a/src/ziplist.c
|
|
||||||
+++ b/src/ziplist.c
|
|
||||||
@@ -265,6 +265,17 @@
|
|
||||||
ZIPLIST_LENGTH(zl) = intrev16ifbe(intrev16ifbe(ZIPLIST_LENGTH(zl))+incr); \
|
|
||||||
}
|
|
||||||
|
|
||||||
+/* Don't let ziplists grow over 1GB in any case, don't wanna risk overflow in
|
|
||||||
+ * zlbytes*/
|
|
||||||
+#define ZIPLIST_MAX_SAFETY_SIZE (1<<30)
|
|
||||||
+int ziplistSafeToAdd(unsigned char* zl, size_t add) {
|
|
||||||
+ size_t len = zl? ziplistBlobLen(zl): 0;
|
|
||||||
+ if (len + add > ZIPLIST_MAX_SAFETY_SIZE)
|
|
||||||
+ return 0;
|
|
||||||
+ return 1;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+
|
|
||||||
/* We use this function to receive information about a ziplist entry.
|
|
||||||
* Note that this is not how the data is actually encoded, is just what we
|
|
||||||
* get filled by a function in order to operate more easily. */
|
|
||||||
@@ -586,7 +597,8 @@ unsigned char *ziplistNew(void) {
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Resize the ziplist. */
|
|
||||||
-unsigned char *ziplistResize(unsigned char *zl, unsigned int len) {
|
|
||||||
+unsigned char *ziplistResize(unsigned char *zl, size_t len) {
|
|
||||||
+ assert(len < UINT32_MAX);
|
|
||||||
zl = zrealloc(zl,len);
|
|
||||||
ZIPLIST_BYTES(zl) = intrev32ifbe(len);
|
|
||||||
zl[len-1] = ZIP_END;
|
|
||||||
@@ -898,6 +910,9 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) {
|
|
||||||
/* Combined zl length should be limited within UINT16_MAX */
|
|
||||||
zllength = zllength < UINT16_MAX ? zllength : UINT16_MAX;
|
|
||||||
|
|
||||||
+ /* larger values can't be stored into ZIPLIST_BYTES */
|
|
||||||
+ assert(zlbytes < UINT32_MAX);
|
|
||||||
+
|
|
||||||
/* Save offset positions before we start ripping memory apart. */
|
|
||||||
size_t first_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*first));
|
|
||||||
size_t second_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*second));
|
|
||||||
diff --git a/src/ziplist.h b/src/ziplist.h
|
|
||||||
index 964a47f6dc29..f6ba6c8be47d 100644
|
|
||||||
--- a/src/ziplist.h
|
|
||||||
+++ b/src/ziplist.h
|
|
||||||
@@ -49,6 +49,7 @@ unsigned char *ziplistFind(unsigned char *p, unsigned char *vstr, unsigned int v
|
|
||||||
unsigned int ziplistLen(unsigned char *zl);
|
|
||||||
size_t ziplistBlobLen(unsigned char *zl);
|
|
||||||
void ziplistRepr(unsigned char *zl);
|
|
||||||
+int ziplistSafeToAdd(unsigned char* zl, size_t add);
|
|
||||||
|
|
||||||
#ifdef REDIS_TEST
|
|
||||||
int ziplistTest(int argc, char *argv[]);
|
|
||||||
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
|
|
||||||
index 74f491e483a5..46b56cc2822a 100644
|
|
||||||
--- a/tests/support/util.tcl
|
|
||||||
+++ b/tests/support/util.tcl
|
|
||||||
@@ -99,6 +99,27 @@ proc wait_for_ofs_sync {r1 r2} {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
+# count current log lines in server's stdout
|
|
||||||
+proc count_log_lines {srv_idx} {
|
|
||||||
+ set _ [string trim [exec wc -l < [srv $srv_idx stdout]]]
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# returns the number of times a line with that pattern appears in a file
|
|
||||||
+proc count_message_lines {file pattern} {
|
|
||||||
+ set res 0
|
|
||||||
+ # exec fails when grep exists with status other than 0 (when the patter wasn't found)
|
|
||||||
+ catch {
|
|
||||||
+ set res [string trim [exec grep $pattern $file 2> /dev/null | wc -l]]
|
|
||||||
+ }
|
|
||||||
+ return $res
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# returns the number of times a line with that pattern appears in the log
|
|
||||||
+proc count_log_message {srv_idx pattern} {
|
|
||||||
+ set stdout [srv $srv_idx stdout]
|
|
||||||
+ return [count_message_lines $stdout $pattern]
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
# Random integer between 0 and max (excluded).
|
|
||||||
proc randomInt {max} {
|
|
||||||
expr {int(rand()*$max)}
|
|
||||||
diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl
|
|
||||||
new file mode 100644
|
|
||||||
index 000000000000..d87b9236528e
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/tests/unit/violations.tcl
|
|
||||||
@@ -0,0 +1,156 @@
|
|
||||||
+# These tests consume massive amounts of memory, and are not
|
|
||||||
+# suitable to be executed as part of the normal test suite
|
|
||||||
+set ::str500 [string repeat x 500000000] ;# 500mb
|
|
||||||
+
|
|
||||||
+# Utility function to write big argument into redis client connection
|
|
||||||
+proc write_big_bulk {size} {
|
|
||||||
+ r write "\$$size\r\n"
|
|
||||||
+ while {$size >= 500000000} {
|
|
||||||
+ r write $::str500
|
|
||||||
+ incr size -500000000
|
|
||||||
+ }
|
|
||||||
+ if {$size > 0} {
|
|
||||||
+ r write [string repeat x $size]
|
|
||||||
+ }
|
|
||||||
+ r write "\r\n"
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# One XADD with one huge 5GB field
|
|
||||||
+# Expected to fail resulting in an empty stream
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {XADD one huge field} {
|
|
||||||
+ r config set proto-max-bulk-len 10000000000 ;#10gb
|
|
||||||
+ r config set client-query-buffer-limit 10000000000 ;#10gb
|
|
||||||
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
|
|
||||||
+ r write "\$1\r\nA\r\n"
|
|
||||||
+ write_big_bulk 5000000000 ;#5gb
|
|
||||||
+ r flush
|
|
||||||
+ catch {r read} err
|
|
||||||
+ assert_match {*too large*} $err
|
|
||||||
+ r xlen S1
|
|
||||||
+ } {0}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# One XADD with one huge (exactly nearly) 4GB field
|
|
||||||
+# This uncovers the overflow in lpEncodeGetType
|
|
||||||
+# Expected to fail resulting in an empty stream
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {XADD one huge field - 1} {
|
|
||||||
+ r config set proto-max-bulk-len 10000000000 ;#10gb
|
|
||||||
+ r config set client-query-buffer-limit 10000000000 ;#10gb
|
|
||||||
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
|
|
||||||
+ r write "\$1\r\nA\r\n"
|
|
||||||
+ write_big_bulk 4294967295 ;#4gb-1
|
|
||||||
+ r flush
|
|
||||||
+ catch {r read} err
|
|
||||||
+ assert_match {*too large*} $err
|
|
||||||
+ r xlen S1
|
|
||||||
+ } {0}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Gradually add big stream fields using repeated XADD calls
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {several XADD big fields} {
|
|
||||||
+ r config set stream-node-max-bytes 0
|
|
||||||
+ for {set j 0} {$j<10} {incr j} {
|
|
||||||
+ r xadd stream * 1 $::str500 2 $::str500
|
|
||||||
+ }
|
|
||||||
+ r ping
|
|
||||||
+ r xlen stream
|
|
||||||
+ } {10}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Add over 4GB to a single stream listpack (one XADD command)
|
|
||||||
+# Expected to fail resulting in an empty stream
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {single XADD big fields} {
|
|
||||||
+ r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n"
|
|
||||||
+ for {set j 0} {$j<10} {incr j} {
|
|
||||||
+ r write "\$1\r\n$j\r\n"
|
|
||||||
+ write_big_bulk 500000000 ;#500mb
|
|
||||||
+ }
|
|
||||||
+ r flush
|
|
||||||
+ catch {r read} err
|
|
||||||
+ assert_match {*too large*} $err
|
|
||||||
+ r xlen S
|
|
||||||
+ } {0}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Gradually add big hash fields using repeated HSET calls
|
|
||||||
+# This reproduces the overflow in the call to ziplistResize
|
|
||||||
+# Object will be converted to hashtable encoding
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ r config set hash-max-ziplist-value 1000000000 ;#1gb
|
|
||||||
+ test {hash with many big fields} {
|
|
||||||
+ for {set j 0} {$j<10} {incr j} {
|
|
||||||
+ r hset h $j $::str500
|
|
||||||
+ }
|
|
||||||
+ r object encoding h
|
|
||||||
+ } {hashtable}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Add over 4GB to a single hash field (one HSET command)
|
|
||||||
+# Object will be converted to hashtable encoding
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {hash with one huge field} {
|
|
||||||
+ catch {r config set hash-max-ziplist-value 10000000000} ;#10gb
|
|
||||||
+ r config set proto-max-bulk-len 10000000000 ;#10gb
|
|
||||||
+ r config set client-query-buffer-limit 10000000000 ;#10gb
|
|
||||||
+ r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n"
|
|
||||||
+ r write "\$1\r\nA\r\n"
|
|
||||||
+ write_big_bulk 5000000000 ;#5gb
|
|
||||||
+ r flush
|
|
||||||
+ r read
|
|
||||||
+ r object encoding H1
|
|
||||||
+ } {hashtable}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Add over 4GB to a single list member (one LPUSH command)
|
|
||||||
+# Currently unsupported, and expected to fail rather than being truncated
|
|
||||||
+# Expected to fail resulting in a non-existing list
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {list with one huge field} {
|
|
||||||
+ r config set proto-max-bulk-len 10000000000 ;#10gb
|
|
||||||
+ r config set client-query-buffer-limit 10000000000 ;#10gb
|
|
||||||
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$2\r\nL1\r\n"
|
|
||||||
+ write_big_bulk 5000000000 ;#5gb
|
|
||||||
+ r flush
|
|
||||||
+ catch {r read} err
|
|
||||||
+ assert_match {*too large*} $err
|
|
||||||
+ r exists L1
|
|
||||||
+ } {0}
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# SORT which attempts to store an element larger than 4GB into a list.
|
|
||||||
+# Currently unsupported and results in an assertion instead of truncation
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {SORT adds huge field to list} {
|
|
||||||
+ r config set proto-max-bulk-len 10000000000 ;#10gb
|
|
||||||
+ r config set client-query-buffer-limit 10000000000 ;#10gb
|
|
||||||
+ r write "*3\r\n\$3\r\nSET\r\n\$2\r\nS1\r\n"
|
|
||||||
+ write_big_bulk 5000000000 ;#5gb
|
|
||||||
+ r flush
|
|
||||||
+ r read
|
|
||||||
+ assert_equal [r strlen S1] 5000000000
|
|
||||||
+ r set S2 asdf
|
|
||||||
+ r sadd myset 1 2
|
|
||||||
+ r mset D1 1 D2 2
|
|
||||||
+ catch {r sort myset by D* get S* store mylist}
|
|
||||||
+ # assert_equal [count_log_message 0 "crashed by signal"] 0 - not suitable for 6.0
|
|
||||||
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
|
|
||||||
+ }
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# SORT which stores an integer encoded element into a list.
|
|
||||||
+# Just for coverage, no news here.
|
|
||||||
+start_server [list overrides [list save ""] ] {
|
|
||||||
+ test {SORT adds integer field to list} {
|
|
||||||
+ r set S1 asdf
|
|
||||||
+ r set S2 123 ;# integer encoded
|
|
||||||
+ assert_encoding "int" S2
|
|
||||||
+ r sadd myset 1 2
|
|
||||||
+ r mset D1 1 D2 2
|
|
||||||
+ r sort myset by D* get S* store mylist
|
|
||||||
+ r llen mylist
|
|
||||||
+ } {2}
|
|
||||||
+}
|
|
@ -1,69 +0,0 @@
|
|||||||
From 71be97294abf3657710a044157ebbc8a21489da3 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oran Agra <oran@redislabs.com>
|
|
||||||
Date: Wed, 9 Jun 2021 17:31:39 +0300
|
|
||||||
Subject: [PATCH] Prevent unauthenticated client from easily consuming lots of
|
|
||||||
memory (CVE-2021-32675)
|
|
||||||
|
|
||||||
This change sets a low limit for multibulk and bulk length in the
|
|
||||||
protocol for unauthenticated connections, so that they can't easily
|
|
||||||
cause redis to allocate massive amounts of memory by sending just a few
|
|
||||||
characters on the network.
|
|
||||||
The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
|
|
||||||
|
|
||||||
(cherry picked from commit 3d221e81f3b680543e34942579af190b049ff283)
|
|
||||||
---
|
|
||||||
src/networking.c | 8 ++++++++
|
|
||||||
tests/unit/auth.tcl | 16 ++++++++++++++++
|
|
||||||
2 files changed, 24 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/networking.c b/src/networking.c
|
|
||||||
index bfaded9b4d0..2b8588094d2 100644
|
|
||||||
--- a/src/networking.c
|
|
||||||
+++ b/src/networking.c
|
|
||||||
@@ -1309,6 +1309,10 @@ int processMultibulkBuffer(client *c) {
|
|
||||||
addReplyError(c,"Protocol error: invalid multibulk length");
|
|
||||||
setProtocolError("invalid mbulk count",c);
|
|
||||||
return C_ERR;
|
|
||||||
+ } else if (ll > 10 && server.requirepass && !c->authenticated) {
|
|
||||||
+ addReplyError(c, "Protocol error: unauthenticated multibulk length");
|
|
||||||
+ setProtocolError("unauth mbulk count", c);
|
|
||||||
+ return C_ERR;
|
|
||||||
}
|
|
||||||
|
|
||||||
c->qb_pos = (newline-c->querybuf)+2;
|
|
||||||
@@ -1354,6 +1358,10 @@ int processMultibulkBuffer(client *c) {
|
|
||||||
addReplyError(c,"Protocol error: invalid bulk length");
|
|
||||||
setProtocolError("invalid bulk length",c);
|
|
||||||
return C_ERR;
|
|
||||||
+ } else if (ll > 16384 && server.requirepass && !c->authenticated) {
|
|
||||||
+ addReplyError(c, "Protocol error: unauthenticated bulk length");
|
|
||||||
+ setProtocolError("unauth bulk length", c);
|
|
||||||
+ return C_ERR;
|
|
||||||
}
|
|
||||||
|
|
||||||
c->qb_pos = newline-c->querybuf+2;
|
|
||||||
diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl
|
|
||||||
index 633cda95c92..f5da728e845 100644
|
|
||||||
--- a/tests/unit/auth.tcl
|
|
||||||
+++ b/tests/unit/auth.tcl
|
|
||||||
@@ -24,4 +24,20 @@ start_server {tags {"auth"} overrides {requirepass foobar}} {
|
|
||||||
r set foo 100
|
|
||||||
r incr foo
|
|
||||||
} {101}
|
|
||||||
+
|
|
||||||
+ test {For unauthenticated clients multibulk and bulk length are limited} {
|
|
||||||
+ set rr [redis [srv "host"] [srv "port"] 0]
|
|
||||||
+ $rr write "*100\r\n"
|
|
||||||
+ $rr flush
|
|
||||||
+ catch {[$rr read]} e
|
|
||||||
+ assert_match {*unauthenticated multibulk length*} $e
|
|
||||||
+ $rr close
|
|
||||||
+
|
|
||||||
+ set rr [redis [srv "host"] [srv "port"] 0]
|
|
||||||
+ $rr write "*1\r\n\$100000000\r\n"
|
|
||||||
+ $rr flush
|
|
||||||
+ catch {[$rr read]} e
|
|
||||||
+ assert_match {*unauthenticated bulk length*} $e
|
|
||||||
+ $rr close
|
|
||||||
+ }
|
|
||||||
}
|
|
@ -1,73 +0,0 @@
|
|||||||
Backported for 5.0.3
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
From c043ba77cf9bbf73e964fd9b8681c0cc4bd2662e Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oran Agra <oran@redislabs.com>
|
|
||||||
Date: Sun, 26 Sep 2021 15:42:17 +0300
|
|
||||||
Subject: [PATCH] Fix Integer overflow issue with intsets (CVE-2021-32687)
|
|
||||||
|
|
||||||
The vulnerability involves changing the default set-max-intset-entries
|
|
||||||
configuration parameter to a very large value and constructing specially
|
|
||||||
crafted commands to manipulate sets
|
|
||||||
|
|
||||||
(cherry picked from commit 4cb7075edaaf0584c74eb080d838ca8f56c190e3)
|
|
||||||
---
|
|
||||||
src/intset.c | 4 +++-
|
|
||||||
src/rdb.c | 4 +++-
|
|
||||||
src/t_set.c | 5 ++++-
|
|
||||||
3 files changed, 10 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/intset.c b/src/intset.c
|
|
||||||
index 4445a5ca6c56..288e19adff18 100644
|
|
||||||
--- a/src/intset.c
|
|
||||||
+++ b/src/intset.c
|
|
||||||
@@ -34,6 +34,7 @@
|
|
||||||
#include "intset.h"
|
|
||||||
#include "zmalloc.h"
|
|
||||||
#include "endianconv.h"
|
|
||||||
+#include "redisassert.h"
|
|
||||||
|
|
||||||
/* Note that these encodings are ordered, so:
|
|
||||||
* INTSET_ENC_INT16 < INTSET_ENC_INT32 < INTSET_ENC_INT64. */
|
|
||||||
@@ -103,7 +104,8 @@ intset *intsetNew(void) {
|
|
||||||
|
|
||||||
/* Resize the intset */
|
|
||||||
static intset *intsetResize(intset *is, uint32_t len) {
|
|
||||||
- uint32_t size = len*intrev32ifbe(is->encoding);
|
|
||||||
+ uint64_t size = (uint64_t)len*intrev32ifbe(is->encoding);
|
|
||||||
+ assert(size <= SIZE_MAX - sizeof(intset));
|
|
||||||
is = zrealloc(is,sizeof(intset)+size);
|
|
||||||
return is;
|
|
||||||
}
|
|
||||||
diff --git a/src/rdb.c b/src/rdb.c
|
|
||||||
index afbbd8ca450c..3c58a1eaf7fb 100644
|
|
||||||
--- a/src/rdb.c
|
|
||||||
+++ b/src/rdb.c
|
|
||||||
@@ -1411,7 +1411,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) {
|
|
||||||
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
|
|
||||||
|
|
||||||
/* Use a regular set when there are too many entries. */
|
|
||||||
- if (len > server.set_max_intset_entries) {
|
|
||||||
+ size_t max_entries = server.set_max_intset_entries;
|
|
||||||
+ if (max_entries >= 1<<30) max_entries = 1<<30;
|
|
||||||
+ if (len > max_entries) {
|
|
||||||
o = createSetObject();
|
|
||||||
/* It's faster to expand the dict to the right size asap in order
|
|
||||||
* to avoid rehashing */
|
|
||||||
diff --git a/src/t_set.c b/src/t_set.c
|
|
||||||
index f67073fe6bb1..db5a8cb757bb 100644
|
|
||||||
--- a/src/t_set.c
|
|
||||||
+++ b/src/t_set.c
|
|
||||||
@@ -66,7 +66,10 @@ int setTypeAdd(robj *subject, sds value) {
|
|
||||||
if (success) {
|
|
||||||
/* Convert to regular set when the intset contains
|
|
||||||
* too many entries. */
|
|
||||||
- if (intsetLen(subject->ptr) > server.set_max_intset_entries)
|
|
||||||
+ size_t max_entries = server.set_max_intset_entries;
|
|
||||||
+ /* limit to 1G entries due to intset internals. */
|
|
||||||
+ if (max_entries >= 1<<30) max_entries = 1<<30;
|
|
||||||
+ if (intsetLen(subject->ptr) > max_entries)
|
|
||||||
setTypeConvert(subject,OBJ_ENCODING_HT);
|
|
||||||
return 1;
|
|
||||||
}
|
|
@ -1,94 +0,0 @@
|
|||||||
Backported for 5.0.3
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
From 48f04a82a0ac542341fb644a4cfbebadd5c59a33 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Yossi Gottlieb <yossigo@gmail.com>
|
|
||||||
Date: Mon, 22 Feb 2021 15:41:32 +0200
|
|
||||||
Subject: [PATCH] Fix integer overflow (CVE-2021-21309). (#8522)
|
|
||||||
|
|
||||||
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
|
|
||||||
|
|
||||||
This fix has two parts:
|
|
||||||
|
|
||||||
Set a reasonable limit to the config parameter.
|
|
||||||
Add additional checks to prevent the problem in other potential but unknown code paths.
|
|
||||||
|
|
||||||
(cherry picked from commit d32f2e9999ce003bad0bd2c3bca29f64dcce4433)
|
|
||||||
|
|
||||||
Fix MSVR reported issue.
|
|
||||||
---
|
|
||||||
src/config.c | 16 ++++++++--------
|
|
||||||
src/sds.c | 3 +++
|
|
||||||
src/zmalloc.c | 10 ++++++++++
|
|
||||||
3 files changed, 21 insertions(+), 8 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/sds.c b/src/sds.c
|
|
||||||
index cd60946bdd32..12c9da356d9b 100644
|
|
||||||
--- a/src/sds.c
|
|
||||||
+++ b/src/sds.c
|
|
||||||
@@ -96,6 +96,7 @@ sds sdsnewlen(const void *init, size_t initlen) {
|
|
||||||
int hdrlen = sdsHdrSize(type);
|
|
||||||
unsigned char *fp; /* flags pointer. */
|
|
||||||
|
|
||||||
+ assert(hdrlen+initlen+1 > initlen); /* Catch size_t overflow */
|
|
||||||
sh = s_malloc(hdrlen+initlen+1);
|
|
||||||
if (init==SDS_NOINIT)
|
|
||||||
init = NULL;
|
|
||||||
@@ -214,6 +215,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
|
|
||||||
len = sdslen(s);
|
|
||||||
sh = (char*)s-sdsHdrSize(oldtype);
|
|
||||||
newlen = (len+addlen);
|
|
||||||
+ assert(newlen > len); /* Catch size_t overflow */
|
|
||||||
if (newlen < SDS_MAX_PREALLOC)
|
|
||||||
newlen *= 2;
|
|
||||||
else
|
|
||||||
@@ -227,6 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
|
|
||||||
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
|
|
||||||
|
|
||||||
hdrlen = sdsHdrSize(type);
|
|
||||||
+ assert(hdrlen+newlen+1 > len); /* Catch size_t overflow */
|
|
||||||
if (oldtype==type) {
|
|
||||||
newsh = s_realloc(sh, hdrlen+newlen+1);
|
|
||||||
if (newsh == NULL) return NULL;
|
|
||||||
|
|
||||||
From 2b0ac7427ba5a6e1bc89380e960b138af893bbdd Mon Sep 17 00:00:00 2001
|
|
||||||
From: YiyuanGUO <yguoaz@gmail.com>
|
|
||||||
Date: Wed, 29 Sep 2021 10:20:35 +0300
|
|
||||||
Subject: [PATCH] Fix integer overflow in _sdsMakeRoomFor (CVE-2021-41099)
|
|
||||||
|
|
||||||
---
|
|
||||||
src/sds.c | 6 +++---
|
|
||||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/sds.c b/src/sds.c
|
|
||||||
index 12c9da356d9b..73d9807ae3c0 100644
|
|
||||||
--- a/src/sds.c
|
|
||||||
+++ b/src/sds.c
|
|
||||||
@@ -205,7 +205,7 @@ void sdsclear(sds s) {
|
|
||||||
sds sdsMakeRoomFor(sds s, size_t addlen) {
|
|
||||||
void *sh, *newsh;
|
|
||||||
size_t avail = sdsavail(s);
|
|
||||||
- size_t len, newlen;
|
|
||||||
+ size_t len, newlen, reqlen;
|
|
||||||
char type, oldtype = s[-1] & SDS_TYPE_MASK;
|
|
||||||
int hdrlen;
|
|
||||||
|
|
||||||
@@ -214,7 +214,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
|
|
||||||
|
|
||||||
len = sdslen(s);
|
|
||||||
sh = (char*)s-sdsHdrSize(oldtype);
|
|
||||||
- newlen = (len+addlen);
|
|
||||||
+ reqlen = newlen = (len+addlen);
|
|
||||||
assert(newlen > len); /* Catch size_t overflow */
|
|
||||||
if (newlen < SDS_MAX_PREALLOC)
|
|
||||||
newlen *= 2;
|
|
||||||
@@ -229,7 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
|
|
||||||
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
|
|
||||||
|
|
||||||
hdrlen = sdsHdrSize(type);
|
|
||||||
- assert(hdrlen+newlen+1 > len); /* Catch size_t overflow */
|
|
||||||
+ assert(hdrlen + newlen + 1 > reqlen); /* Catch size_t overflow */
|
|
||||||
if (oldtype==type) {
|
|
||||||
newsh = s_realloc(sh, hdrlen+newlen+1);
|
|
||||||
if (newsh == NULL) return NULL;
|
|
117
SOURCES/redis-config.patch
Normal file
117
SOURCES/redis-config.patch
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
Revert to 6.0.8 behavior to save configuration file
|
||||||
|
to fix "CONFIG REWRITE" when using /etc/redis.conf
|
||||||
|
as new behavior expect a writable directory
|
||||||
|
|
||||||
|
Revert: 90555566ed5cbd3e1c3df1293ba3bbf6098e34c3
|
||||||
|
|
||||||
|
See discussion about this breaking change in
|
||||||
|
https://github.com/redis/redis/issues/8051
|
||||||
|
|
||||||
|
diff -up ./src/config.c.rev ./src/config.c
|
||||||
|
--- ./src/config.c.rev 2022-05-09 14:48:31.118296748 +0200
|
||||||
|
+++ ./src/config.c 2022-05-09 14:48:41.571163767 +0200
|
||||||
|
@@ -1605,62 +1605,60 @@ void rewriteConfigRemoveOrphaned(struct
|
||||||
|
dictReleaseIterator(di);
|
||||||
|
}
|
||||||
|
|
||||||
|
-/* This function replaces the old configuration file with the new content
|
||||||
|
- * in an atomic manner.
|
||||||
|
+/* This function overwrites the old configuration file with the new content.
|
||||||
|
+ *
|
||||||
|
+ * 1) The old file length is obtained.
|
||||||
|
+ * 2) If the new content is smaller, padding is added.
|
||||||
|
+ * 3) A single write(2) call is used to replace the content of the file.
|
||||||
|
+ * 4) Later the file is truncated to the length of the new content.
|
||||||
|
+ *
|
||||||
|
+ * This way we are sure the file is left in a consistent state even if the
|
||||||
|
+ * process is stopped between any of the four operations.
|
||||||
|
*
|
||||||
|
* The function returns 0 on success, otherwise -1 is returned and errno
|
||||||
|
- * is set accordingly. */
|
||||||
|
+ * set accordingly. */
|
||||||
|
int rewriteConfigOverwriteFile(char *configfile, sds content) {
|
||||||
|
- int fd = -1;
|
||||||
|
- int retval = -1;
|
||||||
|
- char tmp_conffile[PATH_MAX];
|
||||||
|
- const char *tmp_suffix = ".XXXXXX";
|
||||||
|
- size_t offset = 0;
|
||||||
|
- ssize_t written_bytes = 0;
|
||||||
|
-
|
||||||
|
- int tmp_path_len = snprintf(tmp_conffile, sizeof(tmp_conffile), "%s%s", configfile, tmp_suffix);
|
||||||
|
- if (tmp_path_len <= 0 || (unsigned int)tmp_path_len >= sizeof(tmp_conffile)) {
|
||||||
|
- serverLog(LL_WARNING, "Config file full path is too long");
|
||||||
|
- errno = ENAMETOOLONG;
|
||||||
|
- return retval;
|
||||||
|
+ int retval = 0;
|
||||||
|
+ int fd = open(configfile,O_RDWR|O_CREAT,0644);
|
||||||
|
+ int content_size = sdslen(content), padding = 0;
|
||||||
|
+ struct stat sb;
|
||||||
|
+ sds content_padded;
|
||||||
|
+
|
||||||
|
+ /* 1) Open the old file (or create a new one if it does not
|
||||||
|
+ * exist), get the size. */
|
||||||
|
+ if (fd == -1) return -1; /* errno set by open(). */
|
||||||
|
+ if (fstat(fd,&sb) == -1) {
|
||||||
|
+ close(fd);
|
||||||
|
+ return -1; /* errno set by fstat(). */
|
||||||
|
}
|
||||||
|
|
||||||
|
-#ifdef _GNU_SOURCE
|
||||||
|
- fd = mkostemp(tmp_conffile, O_CLOEXEC);
|
||||||
|
-#else
|
||||||
|
- /* There's a theoretical chance here to leak the FD if a module thread forks & execv in the middle */
|
||||||
|
- fd = mkstemp(tmp_conffile);
|
||||||
|
-#endif
|
||||||
|
-
|
||||||
|
- if (fd == -1) {
|
||||||
|
- serverLog(LL_WARNING, "Could not create tmp config file (%s)", strerror(errno));
|
||||||
|
- return retval;
|
||||||
|
+ /* 2) Pad the content at least match the old file size. */
|
||||||
|
+ content_padded = sdsdup(content);
|
||||||
|
+ if (content_size < sb.st_size) {
|
||||||
|
+ /* If the old file was bigger, pad the content with
|
||||||
|
+ * a newline plus as many "#" chars as required. */
|
||||||
|
+ padding = sb.st_size - content_size;
|
||||||
|
+ content_padded = sdsgrowzero(content_padded,sb.st_size);
|
||||||
|
+ content_padded[content_size] = '\n';
|
||||||
|
+ memset(content_padded+content_size+1,'#',padding-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
- while (offset < sdslen(content)) {
|
||||||
|
- written_bytes = write(fd, content + offset, sdslen(content) - offset);
|
||||||
|
- if (written_bytes <= 0) {
|
||||||
|
- if (errno == EINTR) continue; /* FD is blocking, no other retryable errors */
|
||||||
|
- serverLog(LL_WARNING, "Failed after writing (%zd) bytes to tmp config file (%s)", offset, strerror(errno));
|
||||||
|
- goto cleanup;
|
||||||
|
- }
|
||||||
|
- offset+=written_bytes;
|
||||||
|
+ /* 3) Write the new content using a single write(2). */
|
||||||
|
+ if (write(fd,content_padded,strlen(content_padded)) == -1) {
|
||||||
|
+ retval = -1;
|
||||||
|
+ goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
- if (fsync(fd))
|
||||||
|
- serverLog(LL_WARNING, "Could not sync tmp config file to disk (%s)", strerror(errno));
|
||||||
|
- else if (fchmod(fd, 0644 & ~server.umask) == -1)
|
||||||
|
- serverLog(LL_WARNING, "Could not chmod config file (%s)", strerror(errno));
|
||||||
|
- else if (rename(tmp_conffile, configfile) == -1)
|
||||||
|
- serverLog(LL_WARNING, "Could not rename tmp config file (%s)", strerror(errno));
|
||||||
|
- else {
|
||||||
|
- retval = 0;
|
||||||
|
- serverLog(LL_DEBUG, "Rewritten config file (%s) successfully", configfile);
|
||||||
|
- }
|
||||||
|
+ /* 4) Truncate the file to the right length if we used padding. */
|
||||||
|
+ if (padding) {
|
||||||
|
+ if (ftruncate(fd,content_size) == -1) {
|
||||||
|
+ /* Non critical error... */
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
+ sdsfree(content_padded);
|
||||||
|
close(fd);
|
||||||
|
- if (retval) unlink(tmp_conffile);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,14 @@
|
|||||||
# If you need to change max open file limit
|
# If you need to change max open file limit
|
||||||
# for example, when you change maxclient in configuration
|
# for example, when you change maxclient in configuration
|
||||||
# you can change the LimitNOFILE value below
|
# you can change the LimitNOFILE value below.
|
||||||
# see "man systemd.exec" for information
|
# See "man systemd.exec" for more information.
|
||||||
|
|
||||||
|
# Slave nodes on large system may take lot of time to start.
|
||||||
|
# You may need to uncomment TimeoutStartSec and TimeoutStopSec
|
||||||
|
# directives below and raise their value.
|
||||||
|
# See "man systemd.service" for more information.
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
LimitNOFILE=10240
|
LimitNOFILE=10240
|
||||||
|
#TimeoutStartSec=90s
|
||||||
|
#TimeoutStopSec=90s
|
||||||
|
@ -3,7 +3,7 @@ Description=Redis Sentinel
|
|||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/bin/redis-sentinel /etc/redis-sentinel.conf --supervised systemd
|
ExecStart=/usr/bin/redis-sentinel /etc/redis-sentinel.conf --daemonize no --supervised systemd
|
||||||
ExecStop=/usr/libexec/redis-shutdown redis-sentinel
|
ExecStop=/usr/libexec/redis-shutdown redis-sentinel
|
||||||
Type=notify
|
Type=notify
|
||||||
User=redis
|
User=redis
|
||||||
|
@ -3,7 +3,7 @@ Description=Redis persistent key-value database
|
|||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/bin/redis-server /etc/redis.conf --supervised systemd
|
ExecStart=/usr/bin/redis-server /etc/redis.conf --daemonize no --supervised systemd
|
||||||
ExecStop=/usr/libexec/redis-shutdown
|
ExecStop=/usr/libexec/redis-shutdown
|
||||||
Type=notify
|
Type=notify
|
||||||
User=redis
|
User=redis
|
||||||
|
@ -8,25 +8,25 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
# Tests fail in mock, not in local build.
|
# Tests fail in mock, not in local build.
|
||||||
%global with_tests 0%{?_with_tests:1}
|
%bcond_with tests
|
||||||
|
|
||||||
# Commit IDs for the (unversioned) redis-doc repository
|
# Commit IDs for the (unversioned) redis-doc repository
|
||||||
# https://fedoraproject.org/wiki/Packaging:SourceURL "Commit Revision"
|
# https://fedoraproject.org/wiki/Packaging:SourceURL "Commit Revision"
|
||||||
%global doc_commit a1e79fc9b2f42f04a8ab59c05c3228931adcd0a6
|
%global doc_commit 8d4bf9bc476829a84a055c049be72634d6e938df
|
||||||
%global short_doc_commit %(c=%{doc_commit}; echo ${c:0:7})
|
%global short_doc_commit %(c=%{doc_commit}; echo ${c:0:7})
|
||||||
|
|
||||||
# %%{rpmmacrodir} not usable on EL-6
|
# %%{rpmmacrodir} not usable on EL-6
|
||||||
%global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d)
|
%global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d)
|
||||||
|
|
||||||
Name: redis
|
Name: redis
|
||||||
Version: 5.0.3
|
Version: 6.2.7
|
||||||
Release: 5%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: A persistent key-value database
|
Summary: A persistent key-value database
|
||||||
# redis, jemalloc, linenoise, lzf, hiredis are BSD
|
# redis, jemalloc, linenoise, lzf, hiredis are BSD
|
||||||
# lua is MIT
|
# lua is MIT
|
||||||
License: BSD and MIT
|
License: BSD and MIT
|
||||||
URL: http://redis.io
|
URL: https://redis.io
|
||||||
Source0: http://download.redis.io/releases/%{name}-%{version}.tar.gz
|
Source0: https://download.redis.io/releases/%{name}-%{version}.tar.gz
|
||||||
Source1: %{name}.logrotate
|
Source1: %{name}.logrotate
|
||||||
Source2: %{name}-sentinel.service
|
Source2: %{name}-sentinel.service
|
||||||
Source3: %{name}.service
|
Source3: %{name}.service
|
||||||
@ -46,23 +46,19 @@ Source10: https://github.com/antirez/%{name}-doc/archive/%{doc_commit}/
|
|||||||
# Update configuration for Fedora
|
# Update configuration for Fedora
|
||||||
# https://github.com/antirez/redis/pull/3491 - man pages
|
# https://github.com/antirez/redis/pull/3491 - man pages
|
||||||
Patch0001: 0001-1st-man-pageis-for-redis-cli-redis-benchmark-redis-c.patch
|
Patch0001: 0001-1st-man-pageis-for-redis-cli-redis-benchmark-redis-c.patch
|
||||||
# https://github.com/antirez/redis/pull/3494 - symlink
|
# revert BC break
|
||||||
Patch0002: 0002-install-redis-check-rdb-as-a-symlink-instead-of-dupl.patch
|
Patch0003: redis-config.patch
|
||||||
|
|
||||||
# Security patches
|
# Security patches
|
||||||
Patch100: redis-CVE-2019-10192.patch
|
|
||||||
Patch101: redis-CVE-2019-10193.patch
|
|
||||||
Patch102: redis-CVE-2021-41099.patch
|
|
||||||
Patch103: redis-CVE-2021-32687.patch
|
|
||||||
Patch104: redis-CVE-2021-32626.patch
|
|
||||||
Patch105: redis-CVE-2021-32627.patch
|
|
||||||
Patch106: redis-CVE-2021-32675.patch
|
|
||||||
|
|
||||||
%if 0%{?with_tests}
|
BuildRequires: gcc
|
||||||
|
%if %{with tests}
|
||||||
BuildRequires: procps-ng
|
BuildRequires: procps-ng
|
||||||
BuildRequires: tcl
|
BuildRequires: tcl
|
||||||
%endif
|
%endif
|
||||||
BuildRequires: systemd
|
BuildRequires: pkgconfig(libsystemd)
|
||||||
|
BuildRequires: systemd-devel
|
||||||
|
BuildRequires: openssl-devel
|
||||||
# Required for redis-shutdown
|
# Required for redis-shutdown
|
||||||
Requires: /bin/awk
|
Requires: /bin/awk
|
||||||
Requires: logrotate
|
Requires: logrotate
|
||||||
@ -70,10 +66,14 @@ Requires(pre): shadow-utils
|
|||||||
Requires(post): systemd
|
Requires(post): systemd
|
||||||
Requires(preun): systemd
|
Requires(preun): systemd
|
||||||
Requires(postun): systemd
|
Requires(postun): systemd
|
||||||
Provides: bundled(hiredis)
|
# from deps/hiredis/hiredis.h
|
||||||
Provides: bundled(lua-libs)
|
Provides: bundled(hiredis) = 1.0.0
|
||||||
Provides: bundled(linenoise)
|
# from deps/jemalloc/VERSION
|
||||||
Provides: bundled(jemalloc) = 4.0.3
|
Provides: bundled(jemalloc) = 5.1.0
|
||||||
|
# from deps/lua/src/lua.h
|
||||||
|
Provides: bundled(lua-libs) = 5.1.5
|
||||||
|
# from deps/linenoise/linenoise.h
|
||||||
|
Provides: bundled(linenoise) = 1.0
|
||||||
Provides: bundled(lzf)
|
Provides: bundled(lzf)
|
||||||
|
|
||||||
%global redis_modules_abi 1
|
%global redis_modules_abi 1
|
||||||
@ -132,23 +132,15 @@ administration and development.
|
|||||||
%setup -q
|
%setup -q
|
||||||
mv ../%{name}-doc-%{doc_commit} doc
|
mv ../%{name}-doc-%{doc_commit} doc
|
||||||
%patch0001 -p1
|
%patch0001 -p1
|
||||||
%patch0002 -p1
|
%patch0003 -p1 -b .rev
|
||||||
|
|
||||||
%patch100 -p1 -b .cve-2019-10192
|
|
||||||
%patch101 -p1 -b .cve-2019-10193
|
|
||||||
%patch102 -p1 -b .cve-2021-41099
|
|
||||||
%patch103 -p1 -b .cve-2021-32687
|
|
||||||
%patch104 -p1 -b .cve-2021-32626
|
|
||||||
%patch105 -p1 -b .cve-2021-32627
|
|
||||||
%patch106 -p1 -b .cve-2021-32675
|
|
||||||
|
|
||||||
mv deps/lua/COPYRIGHT COPYRIGHT-lua
|
mv deps/lua/COPYRIGHT COPYRIGHT-lua
|
||||||
mv deps/jemalloc/COPYING COPYING-jemalloc
|
mv deps/jemalloc/COPYING COPYING-jemalloc
|
||||||
mv deps/hiredis/COPYING COPYING-hiredis
|
mv deps/hiredis/COPYING COPYING-hiredis
|
||||||
|
|
||||||
# Configuration file changes and additions
|
# Configuration file changes
|
||||||
sed -i -e 's|^logfile .*$|logfile /var/log/redis/redis.log|g' redis.conf
|
sed -i -e 's|^logfile .*$|logfile /var/log/redis/redis.log|g' redis.conf
|
||||||
sed -i -e '$ alogfile /var/log/redis/sentinel.log' sentinel.conf
|
sed -i -e 's|^logfile .*$|logfile /var/log/redis/sentinel.log|g' sentinel.conf
|
||||||
sed -i -e 's|^dir .*$|dir /var/lib/redis|g' redis.conf
|
sed -i -e 's|^dir .*$|dir /var/lib/redis|g' redis.conf
|
||||||
|
|
||||||
# Module API version safety check
|
# Module API version safety check
|
||||||
@ -159,7 +151,7 @@ if test "$api" != "%{redis_modules_abi}"; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
%global make_flags DEBUG="" V="echo" LDFLAGS="%{?__global_ldflags}" CFLAGS+="%{optflags} -fPIC" INSTALL="install -p" PREFIX=%{buildroot}%{_prefix}
|
%global make_flags DEBUG="" V="echo" LDFLAGS="%{?__global_ldflags}" CFLAGS+="%{optflags} -fPIC" INSTALL="install -p" PREFIX=%{buildroot}%{_prefix} BUILD_WITH_SYSTEMD=yes BUILD_TLS=yes
|
||||||
|
|
||||||
%build
|
%build
|
||||||
make %{?_smp_mflags} %{make_flags} all
|
make %{?_smp_mflags} %{make_flags} all
|
||||||
@ -221,7 +213,7 @@ mkdir -p %{buildroot}%{macrosdir}
|
|||||||
install -pDm644 %{S:9} %{buildroot}%{macrosdir}/macros.%{name}
|
install -pDm644 %{S:9} %{buildroot}%{macrosdir}/macros.%{name}
|
||||||
|
|
||||||
%check
|
%check
|
||||||
%if 0%{?with_tests}
|
%if %{with tests}
|
||||||
# https://github.com/antirez/redis/issues/1417 (for "taskset -c 1")
|
# https://github.com/antirez/redis/issues/1417 (for "taskset -c 1")
|
||||||
taskset -c 1 make %{make_flags} test
|
taskset -c 1 make %{make_flags} test
|
||||||
make %{make_flags} test-sentinel
|
make %{make_flags} test-sentinel
|
||||||
@ -276,21 +268,27 @@ exit 0
|
|||||||
%dir %attr(0755, redis, redis) %ghost %{_localstatedir}/run/%{name}
|
%dir %attr(0755, redis, redis) %ghost %{_localstatedir}/run/%{name}
|
||||||
|
|
||||||
%files devel
|
%files devel
|
||||||
|
# main package is not required
|
||||||
%license COPYING
|
%license COPYING
|
||||||
%{_includedir}/%{name}module.h
|
%{_includedir}/%{name}module.h
|
||||||
%{macrosdir}/*
|
%{macrosdir}/*
|
||||||
|
|
||||||
%files doc
|
%files doc
|
||||||
|
# main package is not required
|
||||||
|
%license COPYING
|
||||||
%docdir %{_docdir}/%{name}
|
%docdir %{_docdir}/%{name}
|
||||||
%{_docdir}/%{name}
|
%{_docdir}/%{name}
|
||||||
|
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
* Mon Oct 11 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-5
|
* Mon May 9 2022 Remi Collet <rcollet@redhat.com> - 6.2.7-1
|
||||||
|
- rebase to 6.2.7 #1999873
|
||||||
|
|
||||||
|
* Mon Oct 11 2021 Remi Collet <rcollet@redhat.com> - 6.0.9-5
|
||||||
- fix denial of service via Redis Standard Protocol (RESP) request
|
- fix denial of service via Redis Standard Protocol (RESP) request
|
||||||
CVE-2021-32675
|
CVE-2021-32675
|
||||||
|
|
||||||
* Thu Oct 7 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-4
|
* Fri Oct 8 2021 Remi Collet <rcollet@redhat.com> - 6.0.9-4
|
||||||
- fix lua scripts can overflow the heap-based Lua stack
|
- fix lua scripts can overflow the heap-based Lua stack
|
||||||
CVE-2021-32626
|
CVE-2021-32626
|
||||||
- fix integer overflow issue with Streams
|
- fix integer overflow issue with Streams
|
||||||
@ -302,6 +300,20 @@ exit 0
|
|||||||
- fix integer overflow issue with strings
|
- fix integer overflow issue with strings
|
||||||
CVE-2021-41099
|
CVE-2021-41099
|
||||||
|
|
||||||
|
* Wed May 12 2021 Remi Collet <rcollet@redhat.com> - 6.0.9-3
|
||||||
|
- fix integer overflow via STRALGO LCS command
|
||||||
|
CVE-2021-29477
|
||||||
|
|
||||||
|
* Tue Nov 24 2020 Remi Collet <rcollet@redhat.com> - 6.0.9-2
|
||||||
|
- revert "simplify config rewrite file" and keep
|
||||||
|
configuration in /etc
|
||||||
|
|
||||||
|
* Thu Oct 29 2020 Remi Collet <rcollet@redhat.com> - 6.0.9-1
|
||||||
|
- update to 6.0.9
|
||||||
|
|
||||||
|
* Tue Oct 20 2020 Remi Collet <rcollet@redhat.com> - 6.0.8-1
|
||||||
|
- update to 6.0.8 for new stream #1862063
|
||||||
|
|
||||||
* Thu Jul 11 2019 Remi Collet <rcollet@redhat.com> - 5.0.3-2
|
* Thu Jul 11 2019 Remi Collet <rcollet@redhat.com> - 5.0.3-2
|
||||||
- fix Heap buffer overflow in HyperLogLog triggered by malicious client
|
- fix Heap buffer overflow in HyperLogLog triggered by malicious client
|
||||||
CVE-2019-10192
|
CVE-2019-10192
|
||||||
|
Loading…
Reference in New Issue
Block a user