Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 1 addition & 15 deletions contrib/postgres_fdw/Makefile
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
# contrib/postgres_fdw/Makefile

MODULE_big = postgres_fdw
OBJS_FRONTEND = \
link-canary.o

OBJS = \
$(WIN32RES) \
$(OBJS_FRONTEND) \
connection.o \
deparse.o \
option.o \
Expand All @@ -16,7 +13,7 @@ OBJS = \
PGFILEDESC = "postgres_fdw - foreign data wrapper for PostgreSQL"

PG_CPPFLAGS = -I$(libpq_srcdir)
SHLIB_LINK_INTERNAL = -Wl,-Bsymbolic -Wl,-Bstatic -Wl,-Bstatic $(libpq) -Wl,-Bdynamic
SHLIB_LINK_INTERNAL = $(libpq)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

look 667f0c3


EXTENSION = postgres_fdw
DATA = postgres_fdw--1.0.sql postgres_fdw--1.0--1.1.sql
Expand All @@ -36,17 +33,6 @@ include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif

link-canary.c : % : $(top_srcdir)/src/common/%
rm -f $@ && $(LN_S) $< .

link-canary.o: link-canary.c
$(CC) $(CFLAGS) -DFRONTEND $(CPPFLAGS) -c $< -o $@

clean: clean-symlinks

clean-symlinks:
rm -f link-canary.c

# For postgres_fdw test
export PG_PORT=5432
installcheck: install prep_postgres
Expand Down
18 changes: 9 additions & 9 deletions contrib/postgres_fdw/expected/gp2pg_postgres_fdw.out
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,13 @@ CREATE TABLE "S 1"."T 4" (
);
-- Disable autovacuum for these tables to avoid unexpected effects of that
ALTER TABLE "S 1"."T 1" SET (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
ALTER TABLE "S 1"."T 2" SET (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
ALTER TABLE "S 1"."T 3" SET (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
ALTER TABLE "S 1"."T 4" SET (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
INSERT INTO "S 1"."T 1"
SELECT id,
id % 10,
Expand Down Expand Up @@ -7735,7 +7735,7 @@ CREATE TABLE a (aa TEXT);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'aa' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
ALTER TABLE a SET (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
\! env PGOPTIONS='' psql -p ${PG_PORT} contrib_regression -c 'CREATE TABLE loct (aa TEXT, bb TEXT);'
CREATE TABLE
\! env PGOPTIONS='' psql -p ${PG_PORT} contrib_regression -c 'ALTER TABLE loct SET (autovacuum_enabled = 'false');'
Expand Down Expand Up @@ -7849,9 +7849,9 @@ ALTER TABLE
\! env PGOPTIONS='' psql -p ${PG_PORT} contrib_regression -c 'alter table loct2 set (autovacuum_enabled = 'false');'
ALTER TABLE
alter table loct1 set (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
alter table loct2 set (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
create table foo (f1 int, f2 int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
Expand All @@ -7863,9 +7863,9 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
create foreign table bar2 (f3 int) inherits (bar)
server pgserver options (table_name 'loct2');
alter table foo set (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
alter table bar set (autovacuum_enabled = 'false');
WARNING: autovacuum is not supported in Greenplum
WARNING: autovacuum is not supported in Cloudberry
insert into foo values(1,1);
insert into foo values(3,3);
insert into foo2 values(2,2,2);
Expand Down
1,034 changes: 525 additions & 509 deletions contrib/postgres_fdw/expected/gp2pg_postgres_fdw_optimizer.out

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions contrib/postgres_fdw/expected/gp_postgres_fdw_optimizer.out
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
-- ===================================================================
-- Greenplum-specific features for postgres_fdw
-- Cloudberry-specific features for postgres_fdw
-- ===================================================================
-- ===================================================================
-- Create source tables and populate with data
Expand Down Expand Up @@ -667,9 +667,9 @@ SELECT * FROM postgres_fdw_gp."GP 1" ORDER BY f1;

TRUNCATE TABLE postgres_fdw_gp."GP 1";
-- ===================================================================
-- validate writes on coordinator (mpp_execute set to coordinator)
-- validate writes on master (mpp_execute set to master)
-- ===================================================================
ALTER FOREIGN TABLE gp_ft1 OPTIONS ( SET mpp_execute 'coordinator' );
ALTER FOREIGN TABLE gp_ft1 OPTIONS ( SET mpp_execute 'master' );
EXPLAIN (COSTS FALSE) INSERT INTO gp_ft1 SELECT * FROM table_dist_rand;
QUERY PLAN
------------------------------------------------
Expand Down Expand Up @@ -1365,27 +1365,27 @@ ALTER TABLE sub_part_1_prt_1 EXCHANGE PARTITION for(1) WITH TABLE sub_part_1_prt
ALTER TABLE sub_part_1_prt_1 EXCHANGE PARTITION for(2) WITH TABLE sub_part_1_prt_1_2_prt_two_foreign;
-- explain with ORCA should fall back to planner, rather than raise ERROR
explain select * from sub_part;
QUERY PLAN
---------------------------------------------------------------------------------------------
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Append (cost=100.00..14631.81 rows=641404 width=12)
-> Foreign Scan on sub_part_1_prt_1_2_prt_one (cost=100.00..383.06 rows=9102 width=12)
-> Foreign Scan on sub_part_1_prt_1_2_prt_two (cost=100.00..383.06 rows=9102 width=12)
-> Foreign Scan on sub_part_1_prt_1_2_prt_one sub_part_1 (cost=100.00..383.06 rows=9102 width=12)
-> Foreign Scan on sub_part_1_prt_1_2_prt_two sub_part_2 (cost=100.00..383.06 rows=9102 width=12)
-> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_2_2_prt_one (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_2_2_prt_one sub_part_3 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice2; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_2_2_prt_two (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_2_2_prt_two sub_part_4 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice3; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_3_2_prt_one (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_3_2_prt_one sub_part_5 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice4; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_3_2_prt_two (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_3_2_prt_two sub_part_6 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice5; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_4_2_prt_one (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_4_2_prt_one sub_part_7 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice6; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_4_2_prt_two (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_4_2_prt_two sub_part_8 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice7; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_5_2_prt_one (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_5_2_prt_one sub_part_9 (cost=0.00..293.67 rows=25967 width=12)
-> Gather Motion 3:1 (slice8; segments: 3) (cost=0.00..1332.33 rows=77900 width=12)
-> Seq Scan on sub_part_1_prt_5_2_prt_two (cost=0.00..293.67 rows=25967 width=12)
-> Seq Scan on sub_part_1_prt_5_2_prt_two sub_part_10 (cost=0.00..293.67 rows=25967 width=12)
Optimizer: Postgres query optimizer
(20 rows)

Expand Down
44 changes: 2 additions & 42 deletions contrib/postgres_fdw/postgres_fdw.c
Original file line number Diff line number Diff line change
Expand Up @@ -5881,27 +5881,7 @@ apply_server_options(PgFdwRelationInfo *fpinfo)
DefElem *def = (DefElem *) lfirst(lc);

if (strcmp(def->defname, "use_remote_estimate") == 0)
{
/*
* GPDB_13_MERGE_FIXME: For updable statement, different forked Backends by Master
* (QD and entrydb instances)
* will hold Exclusive lock on the same table, which causes lock hang issue.
* For Postgres, there is only one backend, and connnections have been shared,
* so the issue doesn't exist.
*
* For example, following query will hang:
* SELECT *
* FROM ft1, ft2, ft4, ft5, local_tbl
* WHERE ft1.c1 = ft2.c1 AND
* ft1.c2 = ft4.c1 AND
* ft1.c2 = ft5.c1 AND
* ft1.c2 = local_tbl.c1 AND
* ft1.c1 < 100 AND
* ft2.c1 < 100 FOR UPDATE;
*/
elog(WARNING, "fdw option 'use_remote_estimate' is not supported.");
fpinfo->use_remote_estimate = false;
}
fpinfo->use_remote_estimate = defGetBoolean(def);
else if (strcmp(def->defname, "fdw_startup_cost") == 0)
(void) parse_real(defGetString(def), &fpinfo->fdw_startup_cost, 0,
NULL);
Expand Down Expand Up @@ -5933,27 +5913,7 @@ apply_table_options(PgFdwRelationInfo *fpinfo)
DefElem *def = (DefElem *) lfirst(lc);

if (strcmp(def->defname, "use_remote_estimate") == 0)
{
/*
* GPDB_13_MERGE_FIXME: For updable statement, different forked Backends by Master
* (QD and entrydb instances)
* will hold Exclusive lock on the same table, which causes lock hang issue.
* For Postgres, there is only one backend, and connnections have been shared,
* so the issue doesn't exist.
*
* For example, following query will hang:
* SELECT *
* FROM ft1, ft2, ft4, ft5, local_tbl
* WHERE ft1.c1 = ft2.c1 AND
* ft1.c2 = ft4.c1 AND
* ft1.c2 = ft5.c1 AND
* ft1.c2 = local_tbl.c1 AND
* ft1.c1 < 100 AND
* ft2.c1 < 100 FOR UPDATE;
*/
elog(WARNING, "fdw option 'use_remote_estimate' is not supported.");
fpinfo->use_remote_estimate = false;
}
fpinfo->use_remote_estimate = defGetBoolean(def);
else if (strcmp(def->defname, "fetch_size") == 0)
(void) parse_int(defGetString(def), &fpinfo->fetch_size, 0, NULL);
else if (strcmp(def->defname, "async_capable") == 0)
Expand Down
26 changes: 0 additions & 26 deletions contrib/postgres_fdw/postgres_fdw.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,33 +19,7 @@
#include "nodes/pathnodes.h"
#include "utils/relcache.h"

/* GPDB_13_MERGE_FIXME: Do we still needs this patch? */
/* postgres_fdw is compiled as a backend, it needs the server's
* header files such as executor/tuptable.h. It also needs libpq
* to connect to a remote postgres database, so it's statically
* linked to libpq.a which is compiled as a frontend using
* -DFRONTEND.
*
* But the struct PQconninfoOption's length is different between
* backend and frontend, there is no "connofs" field in frontend.
* When postgres_fdw calls the function "PQconndefaults" implemented
* in libpq.a and uses the returned PQconninfoOption variable, it crashs,
* because the PQconninfoOption variable returned by libpq.a doesn't contain
* the "connofs" value, but the postgres_fdw thinks it has, so it crashes.
*
* We define FRONTEND here to include frontend libpq header files.
*/
#ifdef LIBPQ_FE_H
#error "postgres_fdw.h" must be included before "libpq-fe.h"
#endif /* LIBPQ_FE_H */

#ifndef FRONTEND
#define FRONTEND
#include "libpq-fe.h"
#undef FRONTEND
#else
#include "libpq-fe.h"
#endif /* FRONTEND */

/*
* FDW-specific planner information kept in RelOptInfo.fdw_private for a
Expand Down
62 changes: 62 additions & 0 deletions contrib/postgres_fdw/postgres_setup.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/bin/bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ ! -d testdata ]; then
mkdir testdata
fi
pushd ${DIR}/testdata
GPPORT=${PGPORT}
GPOPTIONS=${PGOPTIONS}
export PGPORT=${PG_PORT}
# set PGOPTIONS to be empty and restart the GP.
# Becuase PGOPTIONS='-c optimizer=off' is sometimes set on gp cluster
# and it will be sent to pg through postgres_fdw, but pg can not
# recognize the 'optimizer' config. PGOPTIONS is not useful for gp
# cluster, it is used by psql.
export PGOPTIONS=''
pgbin="pgsql"

# install postgres
if [ ! -d "${pgbin}" ] ; then
mkdir ${pgbin}
if [ ! -d postgresql-14.4 ]; then
wget https://ftp.postgresql.org/pub/source/v14.4/postgresql-14.4.tar.gz
tar -xf postgresql-14.4.tar.gz
fi
pushd postgresql-14.4
./configure --prefix=${DIR}/testdata/${pgbin}
make -sj$(nproc) MAKELEVEL=0 install
rm -rf postgresql-14.4.tar.gz
popd
fi

# start postgres 1
# there may be already a postgres postgres running, anyway, stop it
if [ -d "pgdata" ] ; then
${pgbin}/bin/pg_ctl -D pgdata stop || true
rm -r pgdata
fi
${pgbin}/bin/initdb -D pgdata
${pgbin}/bin/pg_ctl -D pgdata -l pglog start

# init postgres 1
${pgbin}/bin/dropdb --if-exists contrib_regression
${pgbin}/bin/createdb contrib_regression

# start postgres 2
# listening to port 5555
# there may be already a postgres postgres running, anyway, stop it
if [ -d "pgdata2" ] ; then
${pgbin}/bin/pg_ctl -D pgdata2 stop || true
rm -r pgdata2
fi
${pgbin}/bin/initdb -D pgdata2
${pgbin}/bin/pg_ctl -D pgdata2 -l pglog2 -o "-p 5555" start

# init postgres 2
${pgbin}/bin/dropdb -p 5555 --if-exists contrib_regression
${pgbin}/bin/createdb -p 5555 contrib_regression

export PGPORT=${GPPORT}
# export PGOPTIONS=${GPOPTIONS}
popd
gpstop -ar
6 changes: 5 additions & 1 deletion contrib/postgres_fdw/sql/gp2pg_postgres_fdw.sql
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
-- 2.3 gpdb don't support REINDEX CONCURRENTLY.
-- 3. gpdb will generate different PLAN from postgres, such as join local table and remote table, select for update/share and so on.

-- start_matchignore
-- m/^DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: .*$
-- end_matchignore

-- ===================================================================
-- create FDW objects
-- ===================================================================
Expand Down Expand Up @@ -303,7 +307,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- Nu
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1; -- OpExpr(r)
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
Expand Down
28 changes: 0 additions & 28 deletions contrib/postgres_fdw/sql/gp_postgres_fdw.sql
Original file line number Diff line number Diff line change
Expand Up @@ -20,34 +20,6 @@ $d$;

CREATE USER MAPPING IF NOT EXISTS FOR CURRENT_USER SERVER loopback;

CREATE EXTENSION postgres_fdw;

CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
DO $d$
BEGIN
EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (dbname '$$||current_database()||$$',
port '$$||current_setting('port')||$$'
)$$;
EXECUTE $$CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (dbname '$$||current_database()||$$',
port '$$||current_setting('port')||$$'
)$$;
EXECUTE $$CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (dbname '$$||current_database()||$$',
port '$$||current_setting('port')||$$'
)$$;
END;
$d$;

CREATE USER MAPPING FOR public SERVER testserver1
OPTIONS (user 'value', password 'value');
CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
CREATE USER MAPPING FOR CURRENT_USER SERVER loopback2;
CREATE USER MAPPING FOR public SERVER loopback3;

CREATE SCHEMA "S 1";

CREATE TABLE table_dist_rand
(
f1 int,
Expand Down
9 changes: 9 additions & 0 deletions contrib/postgres_fdw/sql/mpp_postgres_fdw.sql
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
-- start_ignore
alter system set optimizer = off;
select pg_reload_conf();
-- end_ignore

CREATE EXTENSION IF NOT EXISTS postgres_fdw;

CREATE SERVER testserver2 FOREIGN DATA WRAPPER postgres_fdw;
Expand Down Expand Up @@ -135,3 +140,7 @@ explain (costs off) select count(*) from fs1, fs2 where fs1.a = fs2.a and fs1.gp
select count(*) from fs1,fs2 where fs1.a = fs2.a and fs1.gp_foreign_server = fs2.gp_foreign_server;

reset enable_parallel;
-- start_ignore
alter system reset optimizer;
select pg_reload_conf();
-- end_ignore
Loading
Loading