Compare commits

..

No commits in common. "stream-postgresql-15-rhel-8.10.0" and "c8-stream-10" have entirely different histories.

29 changed files with 3395 additions and 1684 deletions

21
.gitignore vendored Executable file → Normal file
View File

@ -1,17 +1,4 @@
/postgresql-15.2.tar.bz2 SOURCES/postgresql-10.23-US.pdf
/postgresql-15.2.tar.bz2.sha256 SOURCES/postgresql-10.23.tar.bz2
/postgresql-setup-8.8.tar.gz SOURCES/postgresql-9.2.24.tar.bz2
/postgresql-15.2-US.pdf SOURCES/postgresql-setup-8.7.tar.gz
/postgresql-13.7.tar.bz2
/postgresql-13.7.tar.bz2.sha256
/postgresql-15.3.tar.bz2
/postgresql-15.3.tar.bz2.sha256
/postgresql-15.3-US.pdf
/postgresql-15.5.tar.bz2
/postgresql-15.5.tar.bz2.sha256
/postgresql-15.5-US.pdf
/postgresql-15.8.tar.bz2
/postgresql-15.8.tar.bz2.sha256
/postgresql-15.8-US.pdf
/postgresql-13.16.tar.bz2
/postgresql-13.16.tar.bz2.sha256

4
.postgresql.metadata Normal file
View File

@ -0,0 +1,4 @@
a416c245ff0815fbde534bc49b0a07ffdd373894 SOURCES/postgresql-10.23-US.pdf
2df7b4b3751112f3cb543c3ea81e45531bebc7a1 SOURCES/postgresql-10.23.tar.bz2
63d6966ccdbab6aae1f9754fdb8e341ada1ef653 SOURCES/postgresql-9.2.24.tar.bz2
fb97095dc9648f9c31d58fcb406831da5e419ddf SOURCES/postgresql-setup-8.7.tar.gz

4
Makefile.regress → SOURCES/Makefile.regress Executable file → Normal file
View File

@ -47,9 +47,7 @@ installcheck-parallel: cleandirs
cleandirs: cleandirs:
-rm -rf testtablespace results -rm -rf testtablespace results
mkdir testtablespace results mkdir testtablespace results
if test -x /usr/bin/chcon && ! test -f /.dockerenv; then \ [ -x /usr/bin/chcon ] && /usr/bin/chcon -u system_u -r object_r -t postgresql_db_t testtablespace results
/usr/bin/chcon -u system_u -r object_r -t postgresql_db_t testtablespace results ; \
fi
# old interfaces follow... # old interfaces follow...

View File

@ -0,0 +1,13 @@
diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out
index 6ceabb453c..6516d4f131 100644
--- a/contrib/dblink/expected/dblink.out
+++ b/contrib/dblink/expected/dblink.out
@@ -879,7 +879,7 @@ $d$;
CREATE USER MAPPING FOR public SERVER fdtest
OPTIONS (server 'localhost'); -- fail, can't specify server here
ERROR: invalid option "server"
-HINT: Valid options in this context are: user, password
+HINT: Valid options in this context are: user, password, sslpassword
CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (user :'USER');
GRANT USAGE ON FOREIGN SERVER fdtest TO regress_dblink_user;
GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO regress_dblink_user;

View File

@ -0,0 +1,249 @@
From 681d9e4621aac0a9c71364b6f54f00f6d8c4337f Mon Sep 17 00:00:00 2001
From 8d525d7b9545884a3e0d79adcd61543f9ae2ae28 Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 8 May 2023 06:14:07 -0700
Subject: Replace last PushOverrideSearchPath() call with
set_config_option().
The two methods don't cooperate, so set_config_option("search_path",
...) has been ineffective under non-empty overrideStack. This defect
enabled an attacker having database-level CREATE privilege to execute
arbitrary code as the bootstrap superuser. While that particular attack
requires v13+ for the trusted extension attribute, other attacks are
feasible in all supported versions.
Standardize on the combination of NewGUCNestLevel() and
set_config_option("search_path", ...). It is newer than
PushOverrideSearchPath(), more-prevalent, and has no known
disadvantages. The "override" mechanism remains for now, for
compatibility with out-of-tree code. Users should update such code,
which likely suffers from the same sort of vulnerability closed here.
Back-patch to v11 (all supported versions).
Alexander Lakhin. Reported by Alexander Lakhin.
Security: CVE-2023-2454
---
contrib/seg/Makefile | 2 +-
contrib/seg/expected/security.out | 32 ++++++++++++++++++
contrib/seg/sql/security.sql | 32 ++++++++++++++++++
src/backend/catalog/namespace.c | 4 +++
src/backend/commands/schemacmds.c | 37 ++++++++++++++------
src/test/regress/expected/namespace.out | 45 +++++++++++++++++++++++++
src/test/regress/sql/namespace.sql | 24 +++++++++++++
7 files changed, 165 insertions(+), 11 deletions(-)
create mode 100644 contrib/seg/expected/security.out
create mode 100644 contrib/seg/sql/security.sql
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 14e57adee2..73ddb67882 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3515,6 +3515,10 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path)
/*
* PushOverrideSearchPath - temporarily override the search path
*
+ * Do not use this function; almost any usage introduces a security
+ * vulnerability. It exists for the benefit of legacy code running in
+ * non-security-sensitive environments.
+ *
* We allow nested overrides, hence the push/pop terminology. The GUC
* search_path variable is ignored while an override is active.
*
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 48590247f8..b6a71154a8 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -30,6 +30,7 @@
#include "commands/schemacmds.h"
#include "miscadmin.h"
#include "parser/parse_utilcmd.h"
+#include "parser/scansup.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/builtins.h"
@@ -53,14 +54,16 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
{
const char *schemaName = stmt->schemaname;
Oid namespaceId;
- OverrideSearchPath *overridePath;
List *parsetree_list;
ListCell *parsetree_item;
Oid owner_uid;
Oid saved_uid;
int save_sec_context;
+ int save_nestlevel;
+ char *nsp = namespace_search_path;
AclResult aclresult;
ObjectAddress address;
+ StringInfoData pathbuf;
GetUserIdAndSecContext(&saved_uid, &save_sec_context);
@@ -153,14 +156,26 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
CommandCounterIncrement();
/*
- * Temporarily make the new namespace be the front of the search path, as
- * well as the default creation target namespace. This will be undone at
- * the end of this routine, or upon error.
+ * Prepend the new schema to the current search path.
+ *
+ * We use the equivalent of a function SET option to allow the setting to
+ * persist for exactly the duration of the schema creation. guc.c also
+ * takes care of undoing the setting on error.
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = lcons_oid(namespaceId, overridePath->schemas);
- /* XXX should we clear overridePath->useTemp? */
- PushOverrideSearchPath(overridePath);
+ save_nestlevel = NewGUCNestLevel();
+
+ initStringInfo(&pathbuf);
+ appendStringInfoString(&pathbuf, quote_identifier(schemaName));
+
+ while (scanner_isspace(*nsp))
+ nsp++;
+
+ if (*nsp != '\0')
+ appendStringInfo(&pathbuf, ", %s", nsp);
+
+ (void) set_config_option("search_path", pathbuf.data,
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
/*
* Report the new schema to possibly interested event triggers. Note we
@@ -215,8 +230,10 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
CommandCounterIncrement();
}
- /* Reset search path to normal state */
- PopOverrideSearchPath();
+ /*
+ * Restore the GUC variable search_path we set above.
+ */
+ AtEOXact_GUC(true, save_nestlevel);
/* Reset current user and security context */
SetUserIdAndSecContext(saved_uid, save_sec_context);
diff --git a/src/test/regress/expected/namespace.out b/src/test/regress/expected/namespace.out
index 2564d1b080..a62fd8ded0 100644
--- a/src/test/regress/expected/namespace.out
+++ b/src/test/regress/expected/namespace.out
@@ -1,6 +1,14 @@
--
-- Regression tests for schemas (namespaces)
--
+-- set the whitespace-only search_path to test that the
+-- GUC list syntax is preserved during a schema creation
+SELECT pg_catalog.set_config('search_path', ' ', false);
+ set_config
+------------
+
+(1 row)
+
CREATE SCHEMA test_schema_1
CREATE UNIQUE INDEX abc_a_idx ON abc (a)
CREATE VIEW abc_view AS
@@ -9,6 +17,43 @@ CREATE SCHEMA test_schema_1
a serial,
b int UNIQUE
);
+-- verify that the correct search_path restored on abort
+SET search_path to public;
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT c FROM abc;
+ERROR: column "c" does not exist
+LINE 2: CREATE VIEW abc_view AS SELECT c FROM abc;
+ ^
+COMMIT;
+SHOW search_path;
+ search_path
+-------------
+ public
+(1 row)
+
+-- verify that the correct search_path preserved
+-- after creating the schema and on commit
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT a FROM abc;
+SHOW search_path;
+ search_path
+-----------------------
+ public, test_schema_1
+(1 row)
+
+COMMIT;
+SHOW search_path;
+ search_path
+-----------------------
+ public, test_schema_1
+(1 row)
+
+DROP SCHEMA test_schema_2 CASCADE;
+NOTICE: drop cascades to view test_schema_2.abc_view
-- verify that the objects were created
SELECT COUNT(*) FROM pg_class WHERE relnamespace =
(SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1');
diff --git a/src/test/regress/sql/namespace.sql b/src/test/regress/sql/namespace.sql
index 6b12c96193..3474f5ecf4 100644
--- a/src/test/regress/sql/namespace.sql
+++ b/src/test/regress/sql/namespace.sql
@@ -2,6 +2,10 @@
-- Regression tests for schemas (namespaces)
--
+-- set the whitespace-only search_path to test that the
+-- GUC list syntax is preserved during a schema creation
+SELECT pg_catalog.set_config('search_path', ' ', false);
+
CREATE SCHEMA test_schema_1
CREATE UNIQUE INDEX abc_a_idx ON abc (a)
@@ -13,6 +17,26 @@ CREATE SCHEMA test_schema_1
b int UNIQUE
);
+-- verify that the correct search_path restored on abort
+SET search_path to public;
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT c FROM abc;
+COMMIT;
+SHOW search_path;
+
+-- verify that the correct search_path preserved
+-- after creating the schema and on commit
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT a FROM abc;
+SHOW search_path;
+COMMIT;
+SHOW search_path;
+DROP SCHEMA test_schema_2 CASCADE;
+
-- verify that the objects were created
SELECT COUNT(*) FROM pg_class WHERE relnamespace =
(SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1');
diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out
index e8da587564..15d2b9c5e7 100644
--- a/contrib/sepgsql/expected/ddl.out
+++ b/contrib/sepgsql/expected/ddl.out
@@ -24,7 +24,6 @@ LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_reg
CREATE USER regress_sepgsql_test_user;
CREATE SCHEMA regtest_schema;
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
-LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
GRANT ALL ON SCHEMA regtest_schema TO regress_sepgsql_test_user;
SET search_path = regtest_schema, public;
CREATE TABLE regtest_table (x serial primary key, y text);
--
2.41.0

View File

@ -0,0 +1,114 @@
From ca73753b090c33bc69ce299b4d7fff891a77b8ad Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Mon, 8 May 2023 10:12:44 -0400
Subject: Handle RLS dependencies in inlined set-returning
functions properly.
If an SRF in the FROM clause references a table having row-level
security policies, and we inline that SRF into the calling query,
we neglected to mark the plan as potentially dependent on which
role is executing it. This could lead to later executions in the
same session returning or hiding rows that should have been hidden
or returned instead.
Our thanks to Wolfgang Walther for reporting this problem.
Stephen Frost and Tom Lane
Security: CVE-2023-2455
---
src/backend/optimizer/util/clauses.c | 7 ++++++
src/test/regress/expected/rowsecurity.out | 27 +++++++++++++++++++++++
src/test/regress/sql/rowsecurity.sql | 20 +++++++++++++++++
3 files changed, 54 insertions(+)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index a9c7bc342e..11269fee3e 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -5205,6 +5205,13 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
*/
record_plan_function_dependency(root, func_oid);
+ /*
+ * We must also notice if the inserted query adds a dependency on the
+ * calling role due to RLS quals.
+ */
+ if (querytree->hasRowSecurity)
+ root->glob->dependsOnRole = true;
+
return querytree;
/* Here if func is not inlinable: release temp memory and return NULL */
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 38f53ed486..e278346420 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -4427,6 +4427,33 @@ SELECT * FROM rls_tbl;
DROP TABLE rls_tbl;
RESET SESSION AUTHORIZATION;
+-- CVE-2023-2455: inlining an SRF may introduce an RLS dependency
+create table rls_t (c text);
+insert into rls_t values ('invisible to bob');
+alter table rls_t enable row level security;
+grant select on rls_t to regress_rls_alice, regress_rls_bob;
+create policy p1 on rls_t for select to regress_rls_alice using (true);
+create policy p2 on rls_t for select to regress_rls_bob using (false);
+create function rls_f () returns setof rls_t
+ stable language sql
+ as $$ select * from rls_t $$;
+prepare q as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute q;
+ current_user | c
+-------------------+------------------
+ regress_rls_alice | invisible to bob
+(1 row)
+
+set role regress_rls_bob;
+execute q;
+ current_user | c
+--------------+---
+(0 rows)
+
+RESET ROLE;
+DROP FUNCTION rls_f();
+DROP TABLE rls_t;
--
-- Clean up objects
--
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index 0fd0cded7d..3d664538a6 100644
--- a/src/test/regress/sql/rowsecurity.sql
+++ b/src/test/regress/sql/rowsecurity.sql
@@ -2127,6 +2127,26 @@ SELECT * FROM rls_tbl;
DROP TABLE rls_tbl;
RESET SESSION AUTHORIZATION;
+-- CVE-2023-2455: inlining an SRF may introduce an RLS dependency
+create table rls_t (c text);
+insert into rls_t values ('invisible to bob');
+alter table rls_t enable row level security;
+grant select on rls_t to regress_rls_alice, regress_rls_bob;
+create policy p1 on rls_t for select to regress_rls_alice using (true);
+create policy p2 on rls_t for select to regress_rls_bob using (false);
+create function rls_f () returns setof rls_t
+ stable language sql
+ as $$ select * from rls_t $$;
+prepare q as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute q;
+set role regress_rls_bob;
+execute q;
+
+RESET ROLE;
+DROP FUNCTION rls_f();
+DROP TABLE rls_t;
+
--
-- Clean up objects
--
--
2.41.0

View File

@ -0,0 +1,576 @@
From d267cea24ea346c739c85bf7bccbd8e8f59da6b3 Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Mon, 6 Nov 2023 10:56:43 -0500
Subject: [PATCH 1/1] Detect integer overflow while computing new array
dimensions.
array_set_element() and related functions allow an array to be
enlarged by assigning to subscripts outside the current array bounds.
While these places were careful to check that the new bounds are
allowable, they neglected to consider the risk of integer overflow
in computing the new bounds. In edge cases, we could compute new
bounds that are invalid but get past the subsequent checks,
allowing bad things to happen. Memory stomps that are potentially
exploitable for arbitrary code execution are possible, and so is
disclosure of server memory.
To fix, perform the hazardous computations using overflow-detecting
arithmetic routines, which fortunately exist in all still-supported
branches.
The test cases added for this generate (after patching) errors that
mention the value of MaxArraySize, which is platform-dependent.
Rather than introduce multiple expected-files, use psql's VERBOSITY
parameter to suppress the printing of the message text. v11 psql
lacks that parameter, so omit the tests in that branch.
Our thanks to Pedro Gallegos for reporting this problem.
Security: CVE-2023-5869
Sign-Off-By: Tianyue Lan <tianyue.lan@oracle.com>
---
src/backend/utils/adt/arrayfuncs.c | 85 ++++++++++++++++++++++------
src/backend/utils/adt/arrayutils.c | 6 --
src/include/utils/array.h | 7 +++
src/test/regress/expected/arrays.out | 17 ++++++
src/test/regress/sql/arrays.sql | 19 +++++++
src/include/common/int.h | 273 +++++++++++++++++++++++++++++++++++++++
create mode 100644 src/include/common/int.h
6 files changed, 383 insertions(+), 24 deletions(-)
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 553c517..7363893 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -22,6 +22,7 @@
#include "access/htup_details.h"
#include "catalog/pg_type.h"
+#include "common/int.h"
#include "funcapi.h"
#include "libpq/pqformat.h"
#include "utils/array.h"
@@ -2309,22 +2310,38 @@ array_set_element(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts
+ * Check subscripts. We assume the existing subscripts passed
+ * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without
+ * overflow. But we must beware of other overflows in our calculations of
+ * new dim[] values.
*/
if (ndim == 1)
{
if (indx[0] < lb[0])
{
- addedbefore = lb[0] - indx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - indx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], indx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = indx[0];
if (addedbefore > 1)
newhasnulls = true; /* will insert nulls */
}
if (indx[0] >= (dim[0] + lb[0]))
{
- addedafter = indx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
+ /* addedafter = indx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(indx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
}
@@ -2568,14 +2585,23 @@ array_set_element_expanded(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts (this logic matches original array_set_element)
+ * Check subscripts (this logic must match array_set_element). We assume
+ * the existing subscripts passed ArrayCheckBounds, so that dim[i] + lb[i]
+ * can be computed without overflow. But we must beware of other
+ * overflows in our calculations of new dim[] values.
*/
if (ndim == 1)
{
if (indx[0] < lb[0])
{
- addedbefore = lb[0] - indx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - indx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], indx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = indx[0];
dimschanged = true;
if (addedbefore > 1)
@@ -2583,8 +2609,15 @@ array_set_element_expanded(Datum arraydatum,
}
if (indx[0] >= (dim[0] + lb[0]))
{
- addedafter = indx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
+ /* addedafter = indx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(indx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
dimschanged = true;
if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
@@ -2866,7 +2899,10 @@ array_set_slice(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts
+ * Check subscripts. We assume the existing subscripts passed
+ * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without
+ * overflow. But we must beware of other overflows in our calculations of
+ * new dim[] values.
*/
if (ndim == 1)
{
@@ -2881,18 +2917,31 @@ array_set_slice(Datum arraydatum,
errmsg("upper bound cannot be less than lower bound")));
if (lowerIndx[0] < lb[0])
{
- if (upperIndx[0] < lb[0] - 1)
- newhasnulls = true; /* will insert nulls */
- addedbefore = lb[0] - lowerIndx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - lowerIndx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], lowerIndx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = lowerIndx[0];
+ if (addedbefore > 1)
+ newhasnulls = true; /* will insert nulls */
}
if (upperIndx[0] >= (dim[0] + lb[0]))
{
- if (lowerIndx[0] > (dim[0] + lb[0]))
+ /* addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(upperIndx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
+ if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
- addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
}
}
else
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index f7c6a51..eb5f2a0 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -63,10 +63,6 @@ ArrayGetOffset0(int n, const int *tup, const int *scale)
* This must do overflow checking, since it is used to validate that a user
* dimensionality request doesn't overflow what we can handle.
*
- * We limit array sizes to at most about a quarter billion elements,
- * so that it's not necessary to check for overflow in quite so many
- * places --- for instance when palloc'ing Datum arrays.
- *
* The multiplication overflow check only works on machines that have int64
* arithmetic, but that is nearly all platforms these days, and doing check
* divides for those that don't seems way too expensive.
@@ -77,8 +73,6 @@ ArrayGetNItems(int ndim, const int *dims)
int32 ret;
int i;
-#define MaxArraySize ((Size) (MaxAllocSize / sizeof(Datum)))
-
if (ndim <= 0)
return 0;
ret = 1;
diff --git a/src/include/utils/array.h b/src/include/utils/array.h
index 905f6b0..3e4c09d 100644
--- a/src/include/utils/array.h
+++ b/src/include/utils/array.h
@@ -65,6 +65,13 @@
#include "utils/expandeddatum.h"
+/*
+ * Maximum number of elements in an array. We limit this to at most about a
+ * quarter billion elements, so that it's not necessary to check for overflow
+ * in quite so many places --- for instance when palloc'ing Datum arrays.
+ */
+#define MaxArraySize ((Size) (MaxAllocSize / sizeof(Datum)))
+
/*
* Arrays are varlena objects, so must meet the varlena convention that
* the first int32 of the object contains the total object size in bytes.
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index c730563..e4ec394 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -1347,6 +1347,23 @@ insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
-- then you didn't get an indexscan plan, and something is busted.
reset enable_seqscan;
reset enable_bitmapscan;
+-- test subscript overflow detection
+-- The normal error message includes a platform-dependent limit,
+-- so suppress it to avoid needing multiple expected-files.
+\set VERBOSITY terse
+insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}');
+update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
+ERROR: array size exceeds the maximum allowed (134217727)
+update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
+ERROR: array size exceeds the maximum allowed (134217727)
+-- also exercise the expanded-array case
+do $$ declare a int[];
+begin
+ a := '[-2147483648:-2147483647]={1,2}'::int[];
+ a[2147483647] := 42;
+end $$;
+ERROR: array size exceeds the maximum allowed (134217727)
+\set VERBOSITY default
-- test [not] (like|ilike) (any|all) (...)
select 'foo' like any (array['%a', '%o']); -- t
?column?
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index 25dd4e2..4ad6e55 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -407,6 +407,25 @@ insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
reset enable_seqscan;
reset enable_bitmapscan;
+-- test subscript overflow detection
+
+-- The normal error message includes a platform-dependent limit,
+-- so suppress it to avoid needing multiple expected-files.
+\set VERBOSITY terse
+
+insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}');
+update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
+update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
+
+-- also exercise the expanded-array case
+do $$ declare a int[];
+begin
+ a := '[-2147483648:-2147483647]={1,2}'::int[];
+ a[2147483647] := 42;
+end $$;
+
+\set VERBOSITY default
+
-- test [not] (like|ilike) (any|all) (...)
select 'foo' like any (array['%a', '%o']); -- t
select 'foo' like any (array['%a', '%b']); -- f
diff --git a/src/include/common/int.h b/src/include/common/int.h
new file mode 100644
index 0000000..d754798
--- /dev/null
+++ b/src/include/common/int.h
@@ -0,0 +1,273 @@
+/*-------------------------------------------------------------------------
+ *
+ * int.h
+ * Routines to perform integer math, while checking for overflows.
+ *
+ * The routines in this file are intended to be well defined C, without
+ * relying on compiler flags like -fwrapv.
+ *
+ * To reduce the overhead of these routines try to use compiler intrinsics
+ * where available. That's not that important for the 16, 32 bit cases, but
+ * the 64 bit cases can be considerably faster with intrinsics. In case no
+ * intrinsics are available 128 bit math is used where available.
+ *
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
+ *
+ * src/include/common/int.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef COMMON_INT_H
+#define COMMON_INT_H
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#else
+ int32 res = (int32) a + (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#else
+ int32 res = (int32) a - (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#else
+ int32 res = (int32) a * (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#else
+ int64 res = (int64) a + (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#else
+ int64 res = (int64) a - (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#else
+ int64 res = (int64) a * (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a + (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ if ((a > 0 && b > 0 && a > PG_INT64_MAX - b) ||
+ (a < 0 && b < 0 && a < PG_INT64_MIN - b))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a + b;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a - (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ if ((a < 0 && b > 0 && a < PG_INT64_MIN + b) ||
+ (a > 0 && b < 0 && a > PG_INT64_MAX + b))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a - b;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a * (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ /*
+ * Overflow can only happen if at least one value is outside the range
+ * sqrt(min)..sqrt(max) so check that first as the division can be quite a
+ * bit more expensive than the multiplication.
+ *
+ * Multiplying by 0 or 1 can't overflow of course and checking for 0
+ * separately avoids any risk of dividing by 0. Be careful about dividing
+ * INT_MIN by -1 also, note reversing the a and b to ensure we're always
+ * dividing it by a positive value.
+ *
+ */
+ if ((a > PG_INT32_MAX || a < PG_INT32_MIN ||
+ b > PG_INT32_MAX || b < PG_INT32_MIN) &&
+ a != 0 && a != 1 && b != 0 && b != 1 &&
+ ((a > 0 && b > 0 && a > PG_INT64_MAX / b) ||
+ (a > 0 && b < 0 && b < PG_INT64_MIN / a) ||
+ (a < 0 && b > 0 && a < PG_INT64_MIN / b) ||
+ (a < 0 && b < 0 && a < PG_INT64_MAX / b)))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a * b;
+ return false;
+#endif
+}
+
+#endif /* COMMON_INT_H */
--
2.39.3

View File

@ -0,0 +1 @@
94a4b2528372458e5662c18d406629266667c437198160a18cdfd2c4a4d6eee9 postgresql-10.23.tar.bz2

View File

@ -0,0 +1 @@
a754c02f7051c2f21e52f8669a421b50485afcde9a581674d6106326b189d126 postgresql-9.2.24.tar.bz2

View File

View File

@ -1,16 +1,19 @@
diff -ur postgresql-15.0/src/backend/utils/misc/postgresql.conf.sample patched/src/backend/utils/misc/postgresql.conf.sample Default to stderr-based logging with a week's worth of daily logfiles.
--- postgresql-15.0/src/backend/utils/misc/postgresql.conf.sample 2022-10-10 22:57:37.000000000 +0200
+++ patched/src/backend/utils/misc/postgresql.conf.sample 2022-10-13 10:51:57.000000000 +0200
@@ -448,7 +448,7 @@ diff -Naur postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample
# logging_collector to be on. --- postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 17:23:13.000000000 -0400
+++ postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 18:39:39.697526799 -0400
@@ -279,7 +279,7 @@
# requires logging_collector to be on.
# This is used when logging to stderr: # This is used when logging to stderr:
-#logging_collector = off # Enable capturing of stderr, jsonlog, -#logging_collector = off # Enable capturing of stderr and csvlog
+logging_collector = on # Enable capturing of stderr, jsonlog, +logging_collector = on # Enable capturing of stderr and csvlog
# and csvlog into log files. Required # into log files. Required to be on for
# to be on for csvlogs and jsonlogs. # csvlogs.
# (change requires restart) # (change requires restart)
@@ -456,16 +456,16 @@ @@ -355,11 +355,11 @@
# These are only used if logging_collector is on: # These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written, #log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA # can be absolute or relative to PGDATA
@ -19,6 +22,15 @@ diff -ur postgresql-15.0/src/backend/utils/misc/postgresql.conf.sample patched/s
# can include strftime() escapes # can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files, #log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation # begin with 0 to use octal notation
-#log_truncate_on_rotation = off # If on, an existing log file with the
+log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
@@ -367,9 +367,9 @@
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
-#log_rotation_age = 1d # Automatic rotation of logfiles will -#log_rotation_age = 1d # Automatic rotation of logfiles will
+log_rotation_age = 1d # Automatic rotation of logfiles will +log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables. # happen after that time. 0 disables.
@ -26,8 +38,4 @@ diff -ur postgresql-15.0/src/backend/utils/misc/postgresql.conf.sample patched/s
+log_rotation_size = 0 # Automatic rotation of logfiles will +log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output. # happen after that much log output.
# 0 disables. # 0 disables.
-#log_truncate_on_rotation = off # If on, an existing log file with the
+log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on

View File

@ -0,0 +1,37 @@
PostgreSQL ecpg/initdb manual page fixes
This was generated based on automatic Red Hat manual page scan (private
RHBZ#948933).
diff -up ./doc/src/sgml/man1/ecpg.1.man948933 ./doc/src/sgml/man1/ecpg.1
--- ./doc/src/sgml/man1/ecpg.1.man948933 2014-12-16 02:13:15.000000000 +0100
+++ ./doc/src/sgml/man1/ecpg.1 2014-12-23 11:26:37.883644047 +0100
@@ -128,6 +133,11 @@ Allow question mark as placeholder for c
.RE
.RE
.PP
+\fB\-\-regression\fR
+.RS 4
+Run in regression testing mode\&.
+.RE
+.PP
\fB\-t\fR
.RS 4
Turn on autocommit of transactions\&. In this mode, each SQL command is automatically committed unless it is inside an explicit transaction block\&. In the default mode, commands are committed only when
diff -up ./doc/src/sgml/man1/initdb.1.man948933 ./doc/src/sgml/man1/initdb.1
--- ./doc/src/sgml/man1/initdb.1.man948933 2014-12-16 02:13:21.000000000 +0100
+++ ./doc/src/sgml/man1/initdb.1 2014-12-23 11:26:37.883644047 +0100
@@ -281,6 +281,13 @@ determines that an error prevented it fr
.PP
Other options:
.PP
+\fB\-s\fR
+.br
+\fB\-\-show\fR
+.RS 4
+Print the internal settings, then exit\&.
+.RE
+.PP
\fB\-V\fR
.br
\fB\-\-version\fR

View File

View File

@ -0,0 +1,51 @@
diff --git a/src/bin/pg_config/Makefile b/src/bin/pg_config/Makefile
index c410087..e546b7b 100644
--- a/src/bin/pg_config/Makefile
+++ b/src/bin/pg_config/Makefile
@@ -11,28 +11,30 @@
PGFILEDESC = "pg_config - report configuration information"
PGAPPICON=win32
+PG_CONFIG = pg_server_config$(X)
+
subdir = src/bin/pg_config
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
OBJS= pg_config.o $(WIN32RES)
-all: pg_config
+all: $(PG_CONFIG)
-pg_config: $(OBJS) | submake-libpgport
- $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X)
+$(PG_CONFIG): $(OBJS) | submake-libpgport
+ $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@
install: all installdirs
- $(INSTALL_SCRIPT) pg_config$(X) '$(DESTDIR)$(bindir)/pg_config$(X)'
+ $(INSTALL_SCRIPT) $(PG_CONFIG) '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
installdirs:
$(MKDIR_P) '$(DESTDIR)$(bindir)'
uninstall:
- rm -f '$(DESTDIR)$(bindir)/pg_config$(X)'
+ rm -f '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
clean distclean maintainer-clean:
- rm -f pg_config$(X) $(OBJS)
+ rm -f $(PG_CONFIG) $(OBJS)
rm -rf tmp_check
check:
diff --git a/src/bin/pg_config/nls.mk b/src/bin/pg_config/nls.mk
index 1d41f90ee0..0f34f371cc 100644
--- a/src/bin/pg_config/nls.mk
+++ b/src/bin/pg_config/nls.mk
@@ -1,4 +1,4 @@
# src/bin/pg_config/nls.mk
-CATALOG_NAME = pg_config
+CATALOG_NAME = pg_server_config
AVAIL_LANGUAGES = cs de es fr he it ja ko nb pl pt_BR ro ru sv ta tr zh_CN zh_TW
GETTEXT_FILES = pg_config.c ../../common/config_info.c ../../common/exec.c

View File

@ -42,12 +42,12 @@ diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index e278fa0..9ee15d4 100644 index e278fa0..9ee15d4 100644
--- a/src/include/pg_config_manual.h --- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h
@@ -201,7 +201,7 @@ @@ -169,7 +169,7 @@
* support them yet. * here's where to twiddle it. You can also override this at runtime
* with the postmaster's -k switch.
*/ */
#ifndef WIN32
-#define DEFAULT_PGSOCKET_DIR "/tmp" -#define DEFAULT_PGSOCKET_DIR "/tmp"
+#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql" +#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql"
#else
#define DEFAULT_PGSOCKET_DIR "" /*
#endif * This is the default event source for Windows event log.

0
postgresql.pam → SOURCES/postgresql.pam Executable file → Normal file
View File

View File

0
rpm-pgsql.patch → SOURCES/rpm-pgsql.patch Executable file → Normal file
View File

2315
SPECS/postgresql.spec Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
#! /bin/sh
rm sources .gitignore
set -e
spectool -S *.spec | cut -d' ' -f2 \
| grep -E -e 'postgresql-.*\.tar\.*' -e 'postgresql.*\.pdf' | sort | \
while read line
do
base=`basename "$line"`
echo " * handling $base"
sha512sum --tag "$base" >> sources
echo "/$base" >> .gitignore
done

View File

@ -1,12 +0,0 @@
#! /bin/sh
rm sources
set -e
spectool -S *.spec | cut -d' ' -f2 \
| grep -E -e 'postgresql-.*\.tar\.*' -e 'postgresql.*\.pdf' | sort | \
while read line
do
base=`basename "$line"`
echo " * handling $base"
sha512sum --tag "$base" >> sources
done

View File

@ -1,192 +0,0 @@
From 69db3b0cfccc0687dfbdf56afcfb2f8e536053c6 Mon Sep 17 00:00:00 2001
From: Andrew Dunstan <andrew@dunslane.net>
Date: Sun, 14 May 2017 01:10:18 -0400
Subject: [PATCH] Suppress indentation from Data::Dumper in regression tests
Ultra-modern versions of the perl Data::Dumper module have apparently
changed how they indent output. Instead of trying to keep up we choose
to tell it to supporess all indentation in the hstore_plperl regression
tests.
Backpatch to 9.5 where this feature was introduced.
---
contrib/hstore_plperl/expected/hstore_plperlu.out | 44 ++++++-----------------
contrib/hstore_plperl/sql/hstore_plperlu.sql | 6 ++++
2 files changed, 17 insertions(+), 33 deletions(-)
diff --git a/contrib/hstore_plperl/expected/hstore_plperlu.out b/contrib/hstore_plperl/expected/hstore_plperlu.out
index b09fb78..d719d29 100644
--- a/contrib/hstore_plperl/expected/hstore_plperlu.out
+++ b/contrib/hstore_plperl/expected/hstore_plperlu.out
@@ -20,15 +20,12 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
SELECT test1('aa=>bb, cc=>NULL'::hstore);
-INFO: $VAR1 = {
- 'aa' => 'bb',
- 'cc' => undef
- };
-
+INFO: $VAR1 = {'aa' => 'bb','cc' => undef};
test1
-------
2
@@ -39,12 +36,12 @@ LANGUAGE plperlu
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
SELECT test1none('aa=>bb, cc=>NULL'::hstore);
INFO: $VAR1 = '"aa"=>"bb", "cc"=>NULL';
-
test1none
-----------
0
@@ -56,15 +53,12 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
SELECT test1list('aa=>bb, cc=>NULL'::hstore);
-INFO: $VAR1 = {
- 'aa' => 'bb',
- 'cc' => undef
- };
-
+INFO: $VAR1 = {'aa' => 'bb','cc' => undef};
test1list
-----------
2
@@ -77,18 +71,12 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]->[0], $_[0]->[1]));
return scalar(keys %{$_[0]});
$$;
SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']);
-INFO: $VAR1 = {
- 'aa' => 'bb',
- 'cc' => undef
- };
-$VAR2 = {
- 'dd' => 'ee'
- };
-
+INFO: $VAR1 = {'aa' => 'bb','cc' => undef};$VAR2 = {'dd' => 'ee'};
test1arr
----------
2
@@ -101,6 +89,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
$rv = spi_exec_query(q{SELECT 'aa=>bb, cc=>NULL'::hstore AS col1});
elog(INFO, Dumper($rv->{rows}[0]->{col1}));
@@ -111,13 +100,8 @@ $rv = spi_exec_prepared($plan, {}, $val);
elog(INFO, Dumper($rv->{rows}[0]->{col1}));
$$;
SELECT test3();
-INFO: $VAR1 = {
- 'aa' => 'bb',
- 'cc' => undef
- };
-
+INFO: $VAR1 = {'aa' => 'bb','cc' => undef};
INFO: $VAR1 = '"a"=>"1", "b"=>"boo", "c"=>NULL';
-
test3
-------
@@ -138,6 +122,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_TD->{new}));
if ($_TD->{new}{a} == 1) {
$_TD->{new}{b} = {a => 1, b => 'boo', c => undef};
@@ -147,14 +132,7 @@ return "MODIFY";
$$;
CREATE TRIGGER test4 BEFORE UPDATE ON test1 FOR EACH ROW EXECUTE PROCEDURE test4();
UPDATE test1 SET a = a;
-INFO: $VAR1 = {
- 'a' => '1',
- 'b' => {
- 'aa' => 'bb',
- 'cc' => undef
- }
- };
-
+INFO: $VAR1 = {'a' => '1','b' => {'aa' => 'bb','cc' => undef}};
SELECT * FROM test1;
a | b
---+---------------------------------
diff --git a/contrib/hstore_plperl/sql/hstore_plperlu.sql b/contrib/hstore_plperl/sql/hstore_plperlu.sql
index 8d8508c..c714b35 100644
--- a/contrib/hstore_plperl/sql/hstore_plperlu.sql
+++ b/contrib/hstore_plperl/sql/hstore_plperlu.sql
@@ -15,6 +15,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
@@ -26,6 +27,7 @@ LANGUAGE plperlu
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
@@ -38,6 +40,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]));
return scalar(keys %{$_[0]});
$$;
@@ -52,6 +55,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_[0]->[0], $_[0]->[1]));
return scalar(keys %{$_[0]});
$$;
@@ -66,6 +70,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
$rv = spi_exec_query(q{SELECT 'aa=>bb, cc=>NULL'::hstore AS col1});
elog(INFO, Dumper($rv->{rows}[0]->{col1}));
@@ -90,6 +95,7 @@ TRANSFORM FOR TYPE hstore
AS $$
use Data::Dumper;
$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
elog(INFO, Dumper($_TD->{new}));
if ($_TD->{new}{a} == 1) {
$_TD->{new}{b} = {a => 1, b => 'boo', c => undef};
--
2.1.4

View File

@ -1,43 +0,0 @@
We don't build/install interfaces by upstream's implicit rules.
This patch is used on two places; postgresql.spec and libecpg.spec -- keep those
in sync!
Related: rhbz#1618698
diff --git a/src/Makefile b/src/Makefile
index bcdbd95..4bea236 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -20,7 +20,6 @@ SUBDIRS = \
backend/utils/mb/conversion_procs \
backend/snowball \
include \
- interfaces \
backend/replication/libpqwalreceiver \
backend/replication/pgoutput \
fe_utils \
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index b9d86ac..29df69f 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -549,7 +549,7 @@ endif
# How to link to libpq. (This macro may be used as-is by backend extensions.
# Client-side code should go through libpq_pgport or libpq_pgport_shlib,
# instead.)
-libpq = -L$(libpq_builddir) -lpq
+libpq = -lpq
# libpq_pgport is for use by client executables (not libraries) that use libpq.
# We force clients to pull symbols from the non-shared libraries libpgport
@@ -579,7 +579,6 @@ endif
# Commonly used submake targets
submake-libpq: | submake-generated-headers
- $(MAKE) -C $(libpq_builddir) all
submake-libpgport: | submake-generated-headers
$(MAKE) -C $(top_builddir)/src/port all
--
2.21.0

View File

@ -1,49 +0,0 @@
PostgreSQL ecpg/initdb manual page fixes
This was generated based on automatic Red Hat manual page scan (private
RHBZ#948933).
diff -up postgresql-13.1/doc/src/sgml/man1/ecpg.1.patch6 postgresql-13.1/doc/src/sgml/man1/ecpg.1
--- postgresql-13.1/doc/src/sgml/man1/ecpg.1.patch6 2020-11-09 23:38:03.000000000 +0100
+++ postgresql-13.1/doc/src/sgml/man1/ecpg.1 2020-11-18 09:26:40.547324791 +0100
@@ -81,6 +81,11 @@ ORACLE\&.
Define a C preprocessor symbol\&.
.RE
.PP
+\fB\-h \fR
+.RS 4
+Parse a header file, this option includes option \fB\-c\fR\&.
+.RE
+.PP
\fB\-h\fR
.RS 4
Process header files\&. When this option is specified, the output file extension becomes
@@ -144,6 +149,11 @@ Allow question mark as placeholder for c
.RE
.RE
.PP
+\fB\-\-regression\fR
+.RS 4
+Run in regression testing mode\&.
+.RE
+.PP
\fB\-t\fR
.RS 4
Turn on autocommit of transactions\&. In this mode, each SQL command is automatically committed unless it is inside an explicit transaction block\&. In the default mode, commands are committed only when
diff -up postgresql-13.1/doc/src/sgml/man1/initdb.1.patch6 postgresql-13.1/doc/src/sgml/man1/initdb.1
--- postgresql-13.1/doc/src/sgml/man1/initdb.1.patch6 2020-11-09 23:38:05.000000000 +0100
+++ postgresql-13.1/doc/src/sgml/man1/initdb.1 2020-11-18 09:25:05.082348424 +0100
@@ -311,6 +311,13 @@ determines that an error prevented it fr
.PP
Other options:
.PP
+\fB\-s\fR
+.br
+\fB\-\-show\fR
+.RS 4
+Print the internal settings, then exit\&.
+.RE
+.PP
\fB\-V\fR
.br
\fB\-\-version\fR

View File

@ -1,12 +0,0 @@
diff -up postgresql-13.1/src/interfaces/Makefile.patch10 postgresql-13.1/src/interfaces/Makefile
--- postgresql-13.1/src/interfaces/Makefile.patch10 2021-02-02 21:33:23.235292305 +0100
+++ postgresql-13.1/src/interfaces/Makefile 2021-02-02 21:33:30.281365440 +0100
@@ -12,7 +12,7 @@ subdir = src/interfaces
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
-SUBDIRS = libpq ecpg
+SUBDIRS = libpq
$(recurse)

View File

@ -1,58 +0,0 @@
We should ideally provide '/bin/pg_config' in postgresql-server-devel, and
provide no pg_config binary in libpq package. But most of the Fedora packages
that use pg_config actually only build against PG libraries (and
postgresql-server-devel isn't needed). So.., to avoid the initial rush around
rhbz#1618698 change, rather provide pg_server_config binary, which int urn means
that we'll have to fix only a minimal set of packages which really build
PostgreSQL server modules.
diff -up postgresql-13.1/src/bin/pg_config/Makefile.patch9 postgresql-13.1/src/bin/pg_config/Makefile
--- postgresql-13.1/src/bin/pg_config/Makefile.patch9 2020-11-18 09:28:30.885453275 +0100
+++ postgresql-13.1/src/bin/pg_config/Makefile 2020-11-18 09:31:33.926325327 +0100
@@ -11,6 +11,8 @@
PGFILEDESC = "pg_config - report configuration information"
PGAPPICON=win32
+PG_CONFIG = pg_server_config$(X)
+
subdir = src/bin/pg_config
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
@@ -19,22 +21,22 @@ OBJS = \
$(WIN32RES) \
pg_config.o
-all: pg_config
+all: $(PG_CONFIG)
-pg_config: $(OBJS) | submake-libpgport
- $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X)
+$(PG_CONFIG): $(OBJS) | submake-libpgport
+ $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@
install: all installdirs
- $(INSTALL_SCRIPT) pg_config$(X) '$(DESTDIR)$(bindir)/pg_config$(X)'
+ $(INSTALL_SCRIPT) $(PG_CONFIG) '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
installdirs:
$(MKDIR_P) '$(DESTDIR)$(bindir)'
uninstall:
- rm -f '$(DESTDIR)$(bindir)/pg_config$(X)'
+ rm -f '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
clean distclean maintainer-clean:
- rm -f pg_config$(X) $(OBJS)
+ rm -f $(PG_CONFIG) $(OBJS)
rm -rf tmp_check
check:
diff -up postgresql-13.1/src/bin/pg_config/nls.mk.patch9 postgresql-13.1/src/bin/pg_config/nls.mk
--- postgresql-13.1/src/bin/pg_config/nls.mk.patch9 2020-11-18 09:28:30.885453275 +0100
+++ postgresql-13.1/src/bin/pg_config/nls.mk 2020-11-18 09:32:00.735599526 +0100
@@ -1,4 +1,4 @@
# src/bin/pg_config/nls.mk
-CATALOG_NAME = pg_config
+CATALOG_NAME = pg_server_config
AVAIL_LANGUAGES = cs de el es fr he it ja ka ko pl pt_BR ru sv tr uk vi zh_CN
GETTEXT_FILES = pg_config.c ../../common/config_info.c ../../common/exec.c

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
SHA512 (postgresql-15.8.tar.bz2) = da610d878819179fcafe8adf6cbe72e188537dbaff82457c67013f9089ae8cbb1f6d8c4d5ea62e4ef54c3e2a90060bd1f36b71b8531354279181e574d903940c
SHA512 (postgresql-13.16.tar.bz2) = e874c77e2d1d27d724f45c576c9d815da55f36712d67e3ec0a1f14ce4ac535fb39b6f23cdcd818cff05788e2c1e12f79572d90f8c145d7478aa35157b0b40679
SHA512 (postgresql-setup-8.8.tar.gz) = 4569e5ba83b16556312b89cd6762eb55902eb9265ce9ceb0e0fe18755e1ab7217ea748df465c4402e24d19b55b25702deab92030510dc722db8fcbc0cb639053
SHA512 (postgresql-15.8.tar.bz2.sha256) = c5e316703010fa796aaa27a12350dd3c721513a75d9367ce6c4f740e1b35a3ca50e0ba16794e79c2c827f6e24f344f3345c2b9d2c7e0d2f7a546875105d62771
SHA512 (postgresql-13.16.tar.bz2.sha256) = 5d4027a9c44a080c5bebe66a6409d6b4a28f849c7383506d9401bf36b7fdc4557cf9a37f8a25bbee9514c751310d378ea9d405cfc808d2a9c1d2bed46047603d
SHA512 (postgresql-15.8-US.pdf) = 879a64e49bb0389f1bb1baefff2e638e92ab76134be8f2d47e955897e800be350adbbba70f06daae5c23d2274ff0a4c096b2499e833971ae5324429c8d5ac8fe