Compare commits

...

No commits in common. "c8-stream-10" and "c10s" have entirely different histories.

21 changed files with 1 additions and 3697 deletions

4
.gitignore vendored
View File

@ -1,4 +0,0 @@
SOURCES/postgresql-10.23-US.pdf
SOURCES/postgresql-10.23.tar.bz2
SOURCES/postgresql-9.2.24.tar.bz2
SOURCES/postgresql-setup-8.7.tar.gz

View File

@ -1,4 +0,0 @@
a416c245ff0815fbde534bc49b0a07ffdd373894 SOURCES/postgresql-10.23-US.pdf
2df7b4b3751112f3cb543c3ea81e45531bebc7a1 SOURCES/postgresql-10.23.tar.bz2
63d6966ccdbab6aae1f9754fdb8e341ada1ef653 SOURCES/postgresql-9.2.24.tar.bz2
fb97095dc9648f9c31d58fcb406831da5e419ddf SOURCES/postgresql-setup-8.7.tar.gz

View File

@ -1,67 +0,0 @@
#
# Simplified makefile for running the PostgreSQL regression tests
# in an RPM installation
#
# maximum simultaneous connections for parallel tests
MAXCONNOPT =
ifdef MAX_CONNECTIONS
MAXCONNOPT += --max-connections=$(MAX_CONNECTIONS)
endif
# locale
NOLOCALE =
ifdef NO_LOCALE
NOLOCALE += --no-locale
endif
srcdir := .
REGRESS_OPTS += --dlpath=.
pg_regress_locale_flags = $(if $(ENCODING),--encoding=$(ENCODING)) $(NOLOCALE)
pg_regress_installcheck = ./pg_regress --inputdir=$(srcdir) --bindir=@bindir@ $(pg_regress_locale_flags)
# Test input and expected files. These are created by pg_regress itself, so we
# don't have a rule to create them. We do need rules to clean them however.
ifile_list := $(subst .source,, $(notdir $(wildcard $(srcdir)/input/*.source)))
input_files := $(foreach file, $(ifile_list), sql/$(file).sql)
ofile_list := $(subst .source,, $(notdir $(wildcard $(srcdir)/output/*.source)))
output_files := $(foreach file, $(ofile_list), expected/$(file).out)
abs_srcdir := $(shell pwd)
abs_builddir := $(shell pwd)
check: installcheck-parallel
installcheck: cleandirs
$(pg_regress_installcheck) $(REGRESS_OPTS) --schedule=$(srcdir)/serial_schedule $(EXTRA_TESTS)
installcheck-parallel: cleandirs
$(pg_regress_installcheck) $(REGRESS_OPTS) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) $(EXTRA_TESTS)
# The tests command the server to write into testtablespace and results.
# On a SELinux-enabled system this will fail unless we mark those directories
# as writable by the server.
cleandirs:
-rm -rf testtablespace results
mkdir testtablespace results
[ -x /usr/bin/chcon ] && /usr/bin/chcon -u system_u -r object_r -t postgresql_db_t testtablespace results
# old interfaces follow...
runcheck: check
runtest: installcheck
runtest-parallel: installcheck-parallel
##
## Clean up
##
clean distclean maintainer-clean:
rm -f $(output_files) $(input_files)
rm -rf testtablespace
rm -rf results tmp_check log
rm -f regression.diffs regression.out regress.out run_check.out

View File

@ -1,58 +0,0 @@
#! /bin/sh
# This script builds the PDF version of the PostgreSQL documentation.
#
# In principle we could do this as part of the RPM build, but there are
# good reasons not to:
# 1. The build would take longer and have a larger BuildRequires footprint.
# 2. The generated PDF has timestamps in it, which would inevitably result
# in multilib conflicts due to slightly different timestamps.
# So instead, we run this manually when rebasing to a new upstream release,
# and treat the resulting PDF as a separate Source file.
#
# You will need to have the docbook packages installed to run this.
# Expect it to take about 20 minutes and use about 160MB of disk.
set -e
# Pass package version (e.g., 9.1.2) as argument
VERSION=$1
test -z "$VERSION" && VERSION=`awk '/^Version:/ { print $2; }' postgresql.spec`
TARGETFILE=postgresql-$VERSION-US.pdf
test -f "$TARGETFILE" && echo "$TARGETFILE exists" && exit 1
echo Building $TARGETFILE ...
# Unpack postgresql
rm -rf postgresql-$VERSION
tar xfj postgresql-$VERSION.tar.bz2
cd postgresql-$VERSION
# Apply any patches that affect the PDF documentation
# patch -p1 < ../xxx.patch
# Configure ...
./configure >/dev/null
# Build the PDF docs
cd doc/src/sgml
make postgres-US.pdf >make.log
mv -f postgres-US.pdf ../../../../$TARGETFILE
# Clean up
cd ../../../..
rm -rf postgresql-$VERSION
exit 0

View File

@ -1,13 +0,0 @@
diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out
index 6ceabb453c..6516d4f131 100644
--- a/contrib/dblink/expected/dblink.out
+++ b/contrib/dblink/expected/dblink.out
@@ -879,7 +879,7 @@ $d$;
CREATE USER MAPPING FOR public SERVER fdtest
OPTIONS (server 'localhost'); -- fail, can't specify server here
ERROR: invalid option "server"
-HINT: Valid options in this context are: user, password
+HINT: Valid options in this context are: user, password, sslpassword
CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (user :'USER');
GRANT USAGE ON FOREIGN SERVER fdtest TO regress_dblink_user;
GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO regress_dblink_user;

View File

@ -1,249 +0,0 @@
From 681d9e4621aac0a9c71364b6f54f00f6d8c4337f Mon Sep 17 00:00:00 2001
From 8d525d7b9545884a3e0d79adcd61543f9ae2ae28 Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 8 May 2023 06:14:07 -0700
Subject: Replace last PushOverrideSearchPath() call with
set_config_option().
The two methods don't cooperate, so set_config_option("search_path",
...) has been ineffective under non-empty overrideStack. This defect
enabled an attacker having database-level CREATE privilege to execute
arbitrary code as the bootstrap superuser. While that particular attack
requires v13+ for the trusted extension attribute, other attacks are
feasible in all supported versions.
Standardize on the combination of NewGUCNestLevel() and
set_config_option("search_path", ...). It is newer than
PushOverrideSearchPath(), more-prevalent, and has no known
disadvantages. The "override" mechanism remains for now, for
compatibility with out-of-tree code. Users should update such code,
which likely suffers from the same sort of vulnerability closed here.
Back-patch to v11 (all supported versions).
Alexander Lakhin. Reported by Alexander Lakhin.
Security: CVE-2023-2454
---
contrib/seg/Makefile | 2 +-
contrib/seg/expected/security.out | 32 ++++++++++++++++++
contrib/seg/sql/security.sql | 32 ++++++++++++++++++
src/backend/catalog/namespace.c | 4 +++
src/backend/commands/schemacmds.c | 37 ++++++++++++++------
src/test/regress/expected/namespace.out | 45 +++++++++++++++++++++++++
src/test/regress/sql/namespace.sql | 24 +++++++++++++
7 files changed, 165 insertions(+), 11 deletions(-)
create mode 100644 contrib/seg/expected/security.out
create mode 100644 contrib/seg/sql/security.sql
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 14e57adee2..73ddb67882 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3515,6 +3515,10 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path)
/*
* PushOverrideSearchPath - temporarily override the search path
*
+ * Do not use this function; almost any usage introduces a security
+ * vulnerability. It exists for the benefit of legacy code running in
+ * non-security-sensitive environments.
+ *
* We allow nested overrides, hence the push/pop terminology. The GUC
* search_path variable is ignored while an override is active.
*
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 48590247f8..b6a71154a8 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -30,6 +30,7 @@
#include "commands/schemacmds.h"
#include "miscadmin.h"
#include "parser/parse_utilcmd.h"
+#include "parser/scansup.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/builtins.h"
@@ -53,14 +54,16 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
{
const char *schemaName = stmt->schemaname;
Oid namespaceId;
- OverrideSearchPath *overridePath;
List *parsetree_list;
ListCell *parsetree_item;
Oid owner_uid;
Oid saved_uid;
int save_sec_context;
+ int save_nestlevel;
+ char *nsp = namespace_search_path;
AclResult aclresult;
ObjectAddress address;
+ StringInfoData pathbuf;
GetUserIdAndSecContext(&saved_uid, &save_sec_context);
@@ -153,14 +156,26 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
CommandCounterIncrement();
/*
- * Temporarily make the new namespace be the front of the search path, as
- * well as the default creation target namespace. This will be undone at
- * the end of this routine, or upon error.
+ * Prepend the new schema to the current search path.
+ *
+ * We use the equivalent of a function SET option to allow the setting to
+ * persist for exactly the duration of the schema creation. guc.c also
+ * takes care of undoing the setting on error.
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = lcons_oid(namespaceId, overridePath->schemas);
- /* XXX should we clear overridePath->useTemp? */
- PushOverrideSearchPath(overridePath);
+ save_nestlevel = NewGUCNestLevel();
+
+ initStringInfo(&pathbuf);
+ appendStringInfoString(&pathbuf, quote_identifier(schemaName));
+
+ while (scanner_isspace(*nsp))
+ nsp++;
+
+ if (*nsp != '\0')
+ appendStringInfo(&pathbuf, ", %s", nsp);
+
+ (void) set_config_option("search_path", pathbuf.data,
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
/*
* Report the new schema to possibly interested event triggers. Note we
@@ -215,8 +230,10 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
CommandCounterIncrement();
}
- /* Reset search path to normal state */
- PopOverrideSearchPath();
+ /*
+ * Restore the GUC variable search_path we set above.
+ */
+ AtEOXact_GUC(true, save_nestlevel);
/* Reset current user and security context */
SetUserIdAndSecContext(saved_uid, save_sec_context);
diff --git a/src/test/regress/expected/namespace.out b/src/test/regress/expected/namespace.out
index 2564d1b080..a62fd8ded0 100644
--- a/src/test/regress/expected/namespace.out
+++ b/src/test/regress/expected/namespace.out
@@ -1,6 +1,14 @@
--
-- Regression tests for schemas (namespaces)
--
+-- set the whitespace-only search_path to test that the
+-- GUC list syntax is preserved during a schema creation
+SELECT pg_catalog.set_config('search_path', ' ', false);
+ set_config
+------------
+
+(1 row)
+
CREATE SCHEMA test_schema_1
CREATE UNIQUE INDEX abc_a_idx ON abc (a)
CREATE VIEW abc_view AS
@@ -9,6 +17,43 @@ CREATE SCHEMA test_schema_1
a serial,
b int UNIQUE
);
+-- verify that the correct search_path restored on abort
+SET search_path to public;
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT c FROM abc;
+ERROR: column "c" does not exist
+LINE 2: CREATE VIEW abc_view AS SELECT c FROM abc;
+ ^
+COMMIT;
+SHOW search_path;
+ search_path
+-------------
+ public
+(1 row)
+
+-- verify that the correct search_path preserved
+-- after creating the schema and on commit
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT a FROM abc;
+SHOW search_path;
+ search_path
+-----------------------
+ public, test_schema_1
+(1 row)
+
+COMMIT;
+SHOW search_path;
+ search_path
+-----------------------
+ public, test_schema_1
+(1 row)
+
+DROP SCHEMA test_schema_2 CASCADE;
+NOTICE: drop cascades to view test_schema_2.abc_view
-- verify that the objects were created
SELECT COUNT(*) FROM pg_class WHERE relnamespace =
(SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1');
diff --git a/src/test/regress/sql/namespace.sql b/src/test/regress/sql/namespace.sql
index 6b12c96193..3474f5ecf4 100644
--- a/src/test/regress/sql/namespace.sql
+++ b/src/test/regress/sql/namespace.sql
@@ -2,6 +2,10 @@
-- Regression tests for schemas (namespaces)
--
+-- set the whitespace-only search_path to test that the
+-- GUC list syntax is preserved during a schema creation
+SELECT pg_catalog.set_config('search_path', ' ', false);
+
CREATE SCHEMA test_schema_1
CREATE UNIQUE INDEX abc_a_idx ON abc (a)
@@ -13,6 +17,26 @@ CREATE SCHEMA test_schema_1
b int UNIQUE
);
+-- verify that the correct search_path restored on abort
+SET search_path to public;
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT c FROM abc;
+COMMIT;
+SHOW search_path;
+
+-- verify that the correct search_path preserved
+-- after creating the schema and on commit
+BEGIN;
+SET search_path to public, test_schema_1;
+CREATE SCHEMA test_schema_2
+ CREATE VIEW abc_view AS SELECT a FROM abc;
+SHOW search_path;
+COMMIT;
+SHOW search_path;
+DROP SCHEMA test_schema_2 CASCADE;
+
-- verify that the objects were created
SELECT COUNT(*) FROM pg_class WHERE relnamespace =
(SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1');
diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out
index e8da587564..15d2b9c5e7 100644
--- a/contrib/sepgsql/expected/ddl.out
+++ b/contrib/sepgsql/expected/ddl.out
@@ -24,7 +24,6 @@ LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_reg
CREATE USER regress_sepgsql_test_user;
CREATE SCHEMA regtest_schema;
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
-LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
GRANT ALL ON SCHEMA regtest_schema TO regress_sepgsql_test_user;
SET search_path = regtest_schema, public;
CREATE TABLE regtest_table (x serial primary key, y text);
--
2.41.0

View File

@ -1,114 +0,0 @@
From ca73753b090c33bc69ce299b4d7fff891a77b8ad Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Mon, 8 May 2023 10:12:44 -0400
Subject: Handle RLS dependencies in inlined set-returning
functions properly.
If an SRF in the FROM clause references a table having row-level
security policies, and we inline that SRF into the calling query,
we neglected to mark the plan as potentially dependent on which
role is executing it. This could lead to later executions in the
same session returning or hiding rows that should have been hidden
or returned instead.
Our thanks to Wolfgang Walther for reporting this problem.
Stephen Frost and Tom Lane
Security: CVE-2023-2455
---
src/backend/optimizer/util/clauses.c | 7 ++++++
src/test/regress/expected/rowsecurity.out | 27 +++++++++++++++++++++++
src/test/regress/sql/rowsecurity.sql | 20 +++++++++++++++++
3 files changed, 54 insertions(+)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index a9c7bc342e..11269fee3e 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -5205,6 +5205,13 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
*/
record_plan_function_dependency(root, func_oid);
+ /*
+ * We must also notice if the inserted query adds a dependency on the
+ * calling role due to RLS quals.
+ */
+ if (querytree->hasRowSecurity)
+ root->glob->dependsOnRole = true;
+
return querytree;
/* Here if func is not inlinable: release temp memory and return NULL */
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 38f53ed486..e278346420 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -4427,6 +4427,33 @@ SELECT * FROM rls_tbl;
DROP TABLE rls_tbl;
RESET SESSION AUTHORIZATION;
+-- CVE-2023-2455: inlining an SRF may introduce an RLS dependency
+create table rls_t (c text);
+insert into rls_t values ('invisible to bob');
+alter table rls_t enable row level security;
+grant select on rls_t to regress_rls_alice, regress_rls_bob;
+create policy p1 on rls_t for select to regress_rls_alice using (true);
+create policy p2 on rls_t for select to regress_rls_bob using (false);
+create function rls_f () returns setof rls_t
+ stable language sql
+ as $$ select * from rls_t $$;
+prepare q as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute q;
+ current_user | c
+-------------------+------------------
+ regress_rls_alice | invisible to bob
+(1 row)
+
+set role regress_rls_bob;
+execute q;
+ current_user | c
+--------------+---
+(0 rows)
+
+RESET ROLE;
+DROP FUNCTION rls_f();
+DROP TABLE rls_t;
--
-- Clean up objects
--
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index 0fd0cded7d..3d664538a6 100644
--- a/src/test/regress/sql/rowsecurity.sql
+++ b/src/test/regress/sql/rowsecurity.sql
@@ -2127,6 +2127,26 @@ SELECT * FROM rls_tbl;
DROP TABLE rls_tbl;
RESET SESSION AUTHORIZATION;
+-- CVE-2023-2455: inlining an SRF may introduce an RLS dependency
+create table rls_t (c text);
+insert into rls_t values ('invisible to bob');
+alter table rls_t enable row level security;
+grant select on rls_t to regress_rls_alice, regress_rls_bob;
+create policy p1 on rls_t for select to regress_rls_alice using (true);
+create policy p2 on rls_t for select to regress_rls_bob using (false);
+create function rls_f () returns setof rls_t
+ stable language sql
+ as $$ select * from rls_t $$;
+prepare q as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute q;
+set role regress_rls_bob;
+execute q;
+
+RESET ROLE;
+DROP FUNCTION rls_f();
+DROP TABLE rls_t;
+
--
-- Clean up objects
--
--
2.41.0

View File

@ -1,576 +0,0 @@
From d267cea24ea346c739c85bf7bccbd8e8f59da6b3 Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Mon, 6 Nov 2023 10:56:43 -0500
Subject: [PATCH 1/1] Detect integer overflow while computing new array
dimensions.
array_set_element() and related functions allow an array to be
enlarged by assigning to subscripts outside the current array bounds.
While these places were careful to check that the new bounds are
allowable, they neglected to consider the risk of integer overflow
in computing the new bounds. In edge cases, we could compute new
bounds that are invalid but get past the subsequent checks,
allowing bad things to happen. Memory stomps that are potentially
exploitable for arbitrary code execution are possible, and so is
disclosure of server memory.
To fix, perform the hazardous computations using overflow-detecting
arithmetic routines, which fortunately exist in all still-supported
branches.
The test cases added for this generate (after patching) errors that
mention the value of MaxArraySize, which is platform-dependent.
Rather than introduce multiple expected-files, use psql's VERBOSITY
parameter to suppress the printing of the message text. v11 psql
lacks that parameter, so omit the tests in that branch.
Our thanks to Pedro Gallegos for reporting this problem.
Security: CVE-2023-5869
Sign-Off-By: Tianyue Lan <tianyue.lan@oracle.com>
---
src/backend/utils/adt/arrayfuncs.c | 85 ++++++++++++++++++++++------
src/backend/utils/adt/arrayutils.c | 6 --
src/include/utils/array.h | 7 +++
src/test/regress/expected/arrays.out | 17 ++++++
src/test/regress/sql/arrays.sql | 19 +++++++
src/include/common/int.h | 273 +++++++++++++++++++++++++++++++++++++++
create mode 100644 src/include/common/int.h
6 files changed, 383 insertions(+), 24 deletions(-)
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 553c517..7363893 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -22,6 +22,7 @@
#include "access/htup_details.h"
#include "catalog/pg_type.h"
+#include "common/int.h"
#include "funcapi.h"
#include "libpq/pqformat.h"
#include "utils/array.h"
@@ -2309,22 +2310,38 @@ array_set_element(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts
+ * Check subscripts. We assume the existing subscripts passed
+ * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without
+ * overflow. But we must beware of other overflows in our calculations of
+ * new dim[] values.
*/
if (ndim == 1)
{
if (indx[0] < lb[0])
{
- addedbefore = lb[0] - indx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - indx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], indx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = indx[0];
if (addedbefore > 1)
newhasnulls = true; /* will insert nulls */
}
if (indx[0] >= (dim[0] + lb[0]))
{
- addedafter = indx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
+ /* addedafter = indx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(indx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
}
@@ -2568,14 +2585,23 @@ array_set_element_expanded(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts (this logic matches original array_set_element)
+ * Check subscripts (this logic must match array_set_element). We assume
+ * the existing subscripts passed ArrayCheckBounds, so that dim[i] + lb[i]
+ * can be computed without overflow. But we must beware of other
+ * overflows in our calculations of new dim[] values.
*/
if (ndim == 1)
{
if (indx[0] < lb[0])
{
- addedbefore = lb[0] - indx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - indx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], indx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = indx[0];
dimschanged = true;
if (addedbefore > 1)
@@ -2583,8 +2609,15 @@ array_set_element_expanded(Datum arraydatum,
}
if (indx[0] >= (dim[0] + lb[0]))
{
- addedafter = indx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
+ /* addedafter = indx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(indx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
dimschanged = true;
if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
@@ -2866,7 +2899,10 @@ array_set_slice(Datum arraydatum,
addedbefore = addedafter = 0;
/*
- * Check subscripts
+ * Check subscripts. We assume the existing subscripts passed
+ * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without
+ * overflow. But we must beware of other overflows in our calculations of
+ * new dim[] values.
*/
if (ndim == 1)
{
@@ -2881,18 +2917,31 @@ array_set_slice(Datum arraydatum,
errmsg("upper bound cannot be less than lower bound")));
if (lowerIndx[0] < lb[0])
{
- if (upperIndx[0] < lb[0] - 1)
- newhasnulls = true; /* will insert nulls */
- addedbefore = lb[0] - lowerIndx[0];
- dim[0] += addedbefore;
+ /* addedbefore = lb[0] - lowerIndx[0]; */
+ /* dim[0] += addedbefore; */
+ if (pg_sub_s32_overflow(lb[0], lowerIndx[0], &addedbefore) ||
+ pg_add_s32_overflow(dim[0], addedbefore, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
lb[0] = lowerIndx[0];
+ if (addedbefore > 1)
+ newhasnulls = true; /* will insert nulls */
}
if (upperIndx[0] >= (dim[0] + lb[0]))
{
- if (lowerIndx[0] > (dim[0] + lb[0]))
+ /* addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1; */
+ /* dim[0] += addedafter; */
+ if (pg_sub_s32_overflow(upperIndx[0], dim[0] + lb[0], &addedafter) ||
+ pg_add_s32_overflow(addedafter, 1, &addedafter) ||
+ pg_add_s32_overflow(dim[0], addedafter, &dim[0]))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxArraySize)));
+ if (addedafter > 1)
newhasnulls = true; /* will insert nulls */
- addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1;
- dim[0] += addedafter;
}
}
else
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index f7c6a51..eb5f2a0 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -63,10 +63,6 @@ ArrayGetOffset0(int n, const int *tup, const int *scale)
* This must do overflow checking, since it is used to validate that a user
* dimensionality request doesn't overflow what we can handle.
*
- * We limit array sizes to at most about a quarter billion elements,
- * so that it's not necessary to check for overflow in quite so many
- * places --- for instance when palloc'ing Datum arrays.
- *
* The multiplication overflow check only works on machines that have int64
* arithmetic, but that is nearly all platforms these days, and doing check
* divides for those that don't seems way too expensive.
@@ -77,8 +73,6 @@ ArrayGetNItems(int ndim, const int *dims)
int32 ret;
int i;
-#define MaxArraySize ((Size) (MaxAllocSize / sizeof(Datum)))
-
if (ndim <= 0)
return 0;
ret = 1;
diff --git a/src/include/utils/array.h b/src/include/utils/array.h
index 905f6b0..3e4c09d 100644
--- a/src/include/utils/array.h
+++ b/src/include/utils/array.h
@@ -65,6 +65,13 @@
#include "utils/expandeddatum.h"
+/*
+ * Maximum number of elements in an array. We limit this to at most about a
+ * quarter billion elements, so that it's not necessary to check for overflow
+ * in quite so many places --- for instance when palloc'ing Datum arrays.
+ */
+#define MaxArraySize ((Size) (MaxAllocSize / sizeof(Datum)))
+
/*
* Arrays are varlena objects, so must meet the varlena convention that
* the first int32 of the object contains the total object size in bytes.
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index c730563..e4ec394 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -1347,6 +1347,23 @@ insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
-- then you didn't get an indexscan plan, and something is busted.
reset enable_seqscan;
reset enable_bitmapscan;
+-- test subscript overflow detection
+-- The normal error message includes a platform-dependent limit,
+-- so suppress it to avoid needing multiple expected-files.
+\set VERBOSITY terse
+insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}');
+update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
+ERROR: array size exceeds the maximum allowed (134217727)
+update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
+ERROR: array size exceeds the maximum allowed (134217727)
+-- also exercise the expanded-array case
+do $$ declare a int[];
+begin
+ a := '[-2147483648:-2147483647]={1,2}'::int[];
+ a[2147483647] := 42;
+end $$;
+ERROR: array size exceeds the maximum allowed (134217727)
+\set VERBOSITY default
-- test [not] (like|ilike) (any|all) (...)
select 'foo' like any (array['%a', '%o']); -- t
?column?
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index 25dd4e2..4ad6e55 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -407,6 +407,25 @@ insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
reset enable_seqscan;
reset enable_bitmapscan;
+-- test subscript overflow detection
+
+-- The normal error message includes a platform-dependent limit,
+-- so suppress it to avoid needing multiple expected-files.
+\set VERBOSITY terse
+
+insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}');
+update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
+update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
+
+-- also exercise the expanded-array case
+do $$ declare a int[];
+begin
+ a := '[-2147483648:-2147483647]={1,2}'::int[];
+ a[2147483647] := 42;
+end $$;
+
+\set VERBOSITY default
+
-- test [not] (like|ilike) (any|all) (...)
select 'foo' like any (array['%a', '%o']); -- t
select 'foo' like any (array['%a', '%b']); -- f
diff --git a/src/include/common/int.h b/src/include/common/int.h
new file mode 100644
index 0000000..d754798
--- /dev/null
+++ b/src/include/common/int.h
@@ -0,0 +1,273 @@
+/*-------------------------------------------------------------------------
+ *
+ * int.h
+ * Routines to perform integer math, while checking for overflows.
+ *
+ * The routines in this file are intended to be well defined C, without
+ * relying on compiler flags like -fwrapv.
+ *
+ * To reduce the overhead of these routines try to use compiler intrinsics
+ * where available. That's not that important for the 16, 32 bit cases, but
+ * the 64 bit cases can be considerably faster with intrinsics. In case no
+ * intrinsics are available 128 bit math is used where available.
+ *
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
+ *
+ * src/include/common/int.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef COMMON_INT_H
+#define COMMON_INT_H
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#else
+ int32 res = (int32) a + (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#else
+ int32 res = (int32) a - (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s16_overflow(int16 a, int16 b, int16 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#else
+ int32 res = (int32) a * (int32) b;
+
+ if (res > PG_INT16_MAX || res < PG_INT16_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int16) res;
+ return false;
+#endif
+}
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#else
+ int64 res = (int64) a + (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#else
+ int64 res = (int64) a - (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s32_overflow(int32 a, int32 b, int32 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#else
+ int64 res = (int64) a * (int64) b;
+
+ if (res > PG_INT32_MAX || res < PG_INT32_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int32) res;
+ return false;
+#endif
+}
+
+/*
+ * If a + b overflows, return true, otherwise store the result of a + b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_add_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_add_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a + (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ if ((a > 0 && b > 0 && a > PG_INT64_MAX - b) ||
+ (a < 0 && b < 0 && a < PG_INT64_MIN - b))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a + b;
+ return false;
+#endif
+}
+
+/*
+ * If a - b overflows, return true, otherwise store the result of a - b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_sub_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_sub_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a - (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ if ((a < 0 && b > 0 && a < PG_INT64_MIN + b) ||
+ (a > 0 && b < 0 && a > PG_INT64_MAX + b))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a - b;
+ return false;
+#endif
+}
+
+/*
+ * If a * b overflows, return true, otherwise store the result of a * b into
+ * *result. The content of *result is implementation defined in case of
+ * overflow.
+ */
+static inline bool
+pg_mul_s64_overflow(int64 a, int64 b, int64 *result)
+{
+#if defined(HAVE__BUILTIN_OP_OVERFLOW)
+ return __builtin_mul_overflow(a, b, result);
+#elif defined(HAVE_INT128)
+ int128 res = (int128) a * (int128) b;
+
+ if (res > PG_INT64_MAX || res < PG_INT64_MIN)
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = (int64) res;
+ return false;
+#else
+ /*
+ * Overflow can only happen if at least one value is outside the range
+ * sqrt(min)..sqrt(max) so check that first as the division can be quite a
+ * bit more expensive than the multiplication.
+ *
+ * Multiplying by 0 or 1 can't overflow of course and checking for 0
+ * separately avoids any risk of dividing by 0. Be careful about dividing
+ * INT_MIN by -1 also, note reversing the a and b to ensure we're always
+ * dividing it by a positive value.
+ *
+ */
+ if ((a > PG_INT32_MAX || a < PG_INT32_MIN ||
+ b > PG_INT32_MAX || b < PG_INT32_MIN) &&
+ a != 0 && a != 1 && b != 0 && b != 1 &&
+ ((a > 0 && b > 0 && a > PG_INT64_MAX / b) ||
+ (a > 0 && b < 0 && b < PG_INT64_MIN / a) ||
+ (a < 0 && b > 0 && a < PG_INT64_MIN / b) ||
+ (a < 0 && b < 0 && a < PG_INT64_MAX / b)))
+ {
+ *result = 0x5EED; /* to avoid spurious warnings */
+ return true;
+ }
+ *result = a * b;
+ return false;
+#endif
+}
+
+#endif /* COMMON_INT_H */
--
2.39.3

View File

@ -1 +0,0 @@
94a4b2528372458e5662c18d406629266667c437198160a18cdfd2c4a4d6eee9 postgresql-10.23.tar.bz2

View File

@ -1 +0,0 @@
a754c02f7051c2f21e52f8669a421b50485afcde9a581674d6106326b189d126 postgresql-9.2.24.tar.bz2

View File

@ -1,4 +0,0 @@
[ -f /etc/profile ] && source /etc/profile
PGDATA=/var/lib/pgsql/data
export PGDATA

View File

@ -1,41 +0,0 @@
Default to stderr-based logging with a week's worth of daily logfiles.
diff -Naur postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample
--- postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 17:23:13.000000000 -0400
+++ postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 18:39:39.697526799 -0400
@@ -279,7 +279,7 @@
# requires logging_collector to be on.
# This is used when logging to stderr:
-#logging_collector = off # Enable capturing of stderr and csvlog
+logging_collector = on # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
@@ -355,11 +355,11 @@
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
-#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
+log_filename = 'postgresql-%a.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
-#log_truncate_on_rotation = off # If on, an existing log file with the
+log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
@@ -367,9 +367,9 @@
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
-#log_rotation_age = 1d # Automatic rotation of logfiles will
+log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
-#log_rotation_size = 10MB # Automatic rotation of logfiles will
+log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.

View File

@ -1,37 +0,0 @@
PostgreSQL ecpg/initdb manual page fixes
This was generated based on automatic Red Hat manual page scan (private
RHBZ#948933).
diff -up ./doc/src/sgml/man1/ecpg.1.man948933 ./doc/src/sgml/man1/ecpg.1
--- ./doc/src/sgml/man1/ecpg.1.man948933 2014-12-16 02:13:15.000000000 +0100
+++ ./doc/src/sgml/man1/ecpg.1 2014-12-23 11:26:37.883644047 +0100
@@ -128,6 +133,11 @@ Allow question mark as placeholder for c
.RE
.RE
.PP
+\fB\-\-regression\fR
+.RS 4
+Run in regression testing mode\&.
+.RE
+.PP
\fB\-t\fR
.RS 4
Turn on autocommit of transactions\&. In this mode, each SQL command is automatically committed unless it is inside an explicit transaction block\&. In the default mode, commands are committed only when
diff -up ./doc/src/sgml/man1/initdb.1.man948933 ./doc/src/sgml/man1/initdb.1
--- ./doc/src/sgml/man1/initdb.1.man948933 2014-12-16 02:13:21.000000000 +0100
+++ ./doc/src/sgml/man1/initdb.1 2014-12-23 11:26:37.883644047 +0100
@@ -281,6 +281,13 @@ determines that an error prevented it fr
.PP
Other options:
.PP
+\fB\-s\fR
+.br
+\fB\-\-show\fR
+.RS 4
+Print the internal settings, then exit\&.
+.RE
+.PP
\fB\-V\fR
.br
\fB\-\-version\fR

View File

@ -1,33 +0,0 @@
diff --git a/src/Makefile b/src/Makefile
index febbced..9737b55 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -20,7 +20,6 @@ SUBDIRS = \
backend/utils/mb/conversion_procs \
backend/snowball \
include \
- interfaces \
backend/replication/libpqwalreceiver \
backend/replication/pgoutput \
fe_utils \
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index 4ed5174..d0e0dae 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -457,7 +457,7 @@ endif
# This macro is for use by libraries linking to libpq. (Because libpgport
# isn't created with the same link flags as libpq, it can't be used.)
-libpq = -L$(libpq_builddir) -lpq
+libpq = -lpq
# This macro is for use by client executables (not libraries) that use libpq.
# We force clients to pull symbols from the non-shared libraries libpgport
@@ -483,7 +483,6 @@ endif
# Commonly used submake targets
submake-libpq:
- $(MAKE) -C $(libpq_builddir) all
submake-libpgport:
$(MAKE) -C $(top_builddir)/src/port all

View File

@ -1,51 +0,0 @@
diff --git a/src/bin/pg_config/Makefile b/src/bin/pg_config/Makefile
index c410087..e546b7b 100644
--- a/src/bin/pg_config/Makefile
+++ b/src/bin/pg_config/Makefile
@@ -11,28 +11,30 @@
PGFILEDESC = "pg_config - report configuration information"
PGAPPICON=win32
+PG_CONFIG = pg_server_config$(X)
+
subdir = src/bin/pg_config
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
OBJS= pg_config.o $(WIN32RES)
-all: pg_config
+all: $(PG_CONFIG)
-pg_config: $(OBJS) | submake-libpgport
- $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X)
+$(PG_CONFIG): $(OBJS) | submake-libpgport
+ $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@
install: all installdirs
- $(INSTALL_SCRIPT) pg_config$(X) '$(DESTDIR)$(bindir)/pg_config$(X)'
+ $(INSTALL_SCRIPT) $(PG_CONFIG) '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
installdirs:
$(MKDIR_P) '$(DESTDIR)$(bindir)'
uninstall:
- rm -f '$(DESTDIR)$(bindir)/pg_config$(X)'
+ rm -f '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
clean distclean maintainer-clean:
- rm -f pg_config$(X) $(OBJS)
+ rm -f $(PG_CONFIG) $(OBJS)
rm -rf tmp_check
check:
diff --git a/src/bin/pg_config/nls.mk b/src/bin/pg_config/nls.mk
index 1d41f90ee0..0f34f371cc 100644
--- a/src/bin/pg_config/nls.mk
+++ b/src/bin/pg_config/nls.mk
@@ -1,4 +1,4 @@
# src/bin/pg_config/nls.mk
-CATALOG_NAME = pg_config
+CATALOG_NAME = pg_server_config
AVAIL_LANGUAGES = cs de es fr he it ja ko nb pl pt_BR ro ru sv ta tr zh_CN zh_TW
GETTEXT_FILES = pg_config.c ../../common/config_info.c ../../common/exec.c

View File

@ -1,53 +0,0 @@
Change the built-in default socket directory to be /var/run/postgresql.
For backwards compatibility with (probably non-libpq-based) clients that
might still expect to find the socket in /tmp, also create a socket in
/tmp. This is to resolve communication problems with clients operating
under systemd's PrivateTmp environment, which won't be using the same
global /tmp directory as the server; see bug #825448.
Note that we apply the socket directory change at the level of the
hard-wired defaults in the C code, not by just twiddling the setting in
postgresql.conf.sample; this is so that the change will take effect on
server package update, without requiring any existing postgresql.conf
to be updated. (Of course, a user who dislikes this behavior can still
override it via postgresql.conf.)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 9481f2d..75532c7 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3196,7 +3196,7 @@ static struct config_string ConfigureNamesString[] =
},
&Unix_socket_directories,
#ifdef HAVE_UNIX_SOCKETS
- DEFAULT_PGSOCKET_DIR,
+ DEFAULT_PGSOCKET_DIR ", /tmp",
#else
"",
#endif
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index feeff9e..3e3d784 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1234,7 +1234,7 @@ setup_config(void)
#ifdef HAVE_UNIX_SOCKETS
snprintf(repltok, sizeof(repltok), "#unix_socket_directories = '%s'",
- DEFAULT_PGSOCKET_DIR);
+ DEFAULT_PGSOCKET_DIR ", /tmp");
#else
snprintf(repltok, sizeof(repltok), "#unix_socket_directories = ''");
#endif
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index e278fa0..9ee15d4 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -169,7 +169,7 @@
* here's where to twiddle it. You can also override this at runtime
* with the postmaster's -k switch.
*/
-#define DEFAULT_PGSOCKET_DIR "/tmp"
+#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql"
/*
* This is the default event source for Windows event log.

View File

@ -1,3 +0,0 @@
#%PAM-1.0
auth include password-auth
account include password-auth

View File

@ -1 +0,0 @@
d /run/postgresql 0755 postgres postgres -

View File

@ -1,72 +0,0 @@
For the RPMs, we want the custom installation directories to end in
/pgsql not /postgresql. This is historical but not worth changing.
Notice that this patch also makes the appending of /pgsql unconditional.
This is to avoid unexpected behavior if the RPM is built in a working
directory whose path happens to include "postgres" or "pgsql" already.
However, datadir and sysconfdir are already set up in the specfile's
configure call, so we do not have to append anything to them.
diff -Naur postgresql-9.0.1.orig/src/Makefile.global.in postgresql-9.0.1/src/Makefile.global.in
--- postgresql-9.0.1.orig/src/Makefile.global.in 2010-10-01 10:25:44.000000000 -0400
+++ postgresql-9.0.1/src/Makefile.global.in 2010-10-11 11:52:05.224975308 -0400
@@ -55,8 +55,7 @@
# Installation directories
#
# These are set by the equivalent --xxxdir configure options. We
-# append "postgresql" to some of them, if the string does not already
-# contain "pgsql" or "postgres", in order to avoid directory clutter.
+# append "pgsql" to some of them, in order to avoid directory clutter.
#
# In a PGXS build, we cannot use the values inserted into Makefile.global
# by configure, since the installation tree may have been relocated.
@@ -74,45 +73,23 @@
bindir := @bindir@
datadir := @datadir@
-ifeq "$(findstring pgsql, $(datadir))" ""
-ifeq "$(findstring postgres, $(datadir))" ""
-override datadir := $(datadir)/postgresql
-endif
-endif
sysconfdir := @sysconfdir@
-ifeq "$(findstring pgsql, $(sysconfdir))" ""
-ifeq "$(findstring postgres, $(sysconfdir))" ""
-override sysconfdir := $(sysconfdir)/postgresql
-endif
-endif
libdir := @libdir@
pkglibdir = $(libdir)
-ifeq "$(findstring pgsql, $(pkglibdir))" ""
-ifeq "$(findstring postgres, $(pkglibdir))" ""
-override pkglibdir := $(pkglibdir)/postgresql
-endif
-endif
+override pkglibdir := $(pkglibdir)/pgsql
includedir := @includedir@
pkgincludedir = $(includedir)
-ifeq "$(findstring pgsql, $(pkgincludedir))" ""
-ifeq "$(findstring postgres, $(pkgincludedir))" ""
-override pkgincludedir := $(pkgincludedir)/postgresql
-endif
-endif
+override pkgincludedir := $(pkgincludedir)/pgsql
mandir := @mandir@
docdir := @docdir@
-ifeq "$(findstring pgsql, $(docdir))" ""
-ifeq "$(findstring postgres, $(docdir))" ""
-override docdir := $(docdir)/postgresql
-endif
-endif
+override docdir := $(docdir)/pgsql
htmldir := @htmldir@

File diff suppressed because it is too large Load Diff

1
dead.package Normal file
View File

@ -0,0 +1 @@
postgresql was removed due to minimization efforts prior to public launch