diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index cbaf49669..d6a0dce1c 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -45,7 +45,7 @@ jobs: include: - ruby: "3.1" platform: "x64-mingw-ucrt" - PGVERSION: 14.2-1-windows-x64 + PGVERSION: 15.1-1-windows-x64 - ruby: "2.5" platform: "x64-mingw32" PGVERSION: 10.20-1-windows @@ -56,7 +56,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Set up Ruby - uses: MSP-Greg/ruby-setup-ruby@win-ucrt-1 + uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby }} @@ -83,4 +83,4 @@ jobs: - run: bundle install - run: gem install --local pg-*${{ matrix.platform }}.gem --verbose - name: Run specs - run: ruby -rpg -S rspec spec/**/*_spec.rb + run: ruby -rpg -S rspec -fd spec/**/*_spec.rb diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index 504c40e42..3a6158741 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -31,15 +31,15 @@ jobs: include: - os: windows ruby: "head" - PGVERSION: 14.2-1-windows-x64 - PGVER: "14" + PGVERSION: 15.1-1-windows-x64 + PGVER: "15" - os: windows ruby: "2.5" - PGVERSION: 9.3.25-1-windows-x64 - PGVER: "9.3" + PGVERSION: 9.4.26-1-windows-x64 + PGVER: "9.4" - os: ubuntu ruby: "head" - PGVER: "14" + PGVER: "15" - os: ubuntu ruby: "3.1" PGVER: "12" @@ -51,21 +51,22 @@ jobs: PGVER: "13" - os: ubuntu ruby: "truffleruby-head" - PGVER: "14" + PGVER: "15" - os: macos ruby: "head" - PGVERSION: 14.2-1-osx - PGVER: "14" + PGVERSION: 15.1-1-osx + PGVER: "15" runs-on: ${{ matrix.os }}-latest env: PGVERSION: ${{ matrix.PGVERSION }} PGVER: ${{ matrix.PGVER }} + MAKE: make -j2 V=1 steps: - uses: actions/checkout@v2 - name: Set up Ruby - uses: MSP-Greg/ruby-setup-ruby@win-ucrt-1 + uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby }} @@ -107,8 +108,9 @@ jobs: if: matrix.os == 'macos' run: | wget https://get.enterprisedb.com/postgresql/postgresql-$PGVERSION-binaries.zip && \ - unzip postgresql-$PGVERSION-binaries.zip && \ - echo `pwd`/pgsql/bin >> $GITHUB_PATH + sudo mkdir -p /Library/PostgreSQL && \ + sudo unzip postgresql-$PGVERSION-binaries.zip -d /Library/PostgreSQL/$PGVER && \ + echo /Library/PostgreSQL/$PGVER/bin >> $GITHUB_PATH - run: gem update --system - run: bundle install @@ -116,7 +118,6 @@ jobs: - run: gem install --local *.gem --verbose - name: Run specs - continue-on-error: ${{ matrix.ruby == 'truffleruby-head' }} env: PG_DEBUG: 0 run: ruby -rpg -S rspec spec/**/*_spec.rb -cfdoc diff --git a/History.rdoc b/History.rdoc index d657a7914..44d428383 100644 --- a/History.rdoc +++ b/History.rdoc @@ -1,3 +1,111 @@ +== v1.4.5 [2022-11-17] Lars Kanis + +- Return the libpq default port when blank in conninfo. #492 +- Add PG::DEF_PGPORT constant and use it in specs. #492 +- Fix name resolution when empty or `nil` port is given. +- Update error codes to PostgreSQL-15. +- Update Windows fat binary gem to PostgreSQL-15.1 AND OpenSSL-1.1.1s. + + +== v1.4.4 [2022-10-11] Lars Kanis + +- Revert to let libpq do the host iteration while connecting. #485 + Ensure that parameter `connect_timeout` is still respected. +- Handle multiple hosts in the connection string, where only one host has writable session. #476 +- Add some useful information to PG::Connection#inspect. #487 +- Support new pgresult_stream_any API in sequel_pg-1.17.0. #481 +- Update Windows fat binary gem to PostgreSQL-14.5. + + +== v1.4.3 [2022-08-09] Lars Kanis + +- Avoid memory bloat possible in put_copy_data in pg-1.4.0 to 1.4.2. #473 +- Use Encoding::BINARY for JOHAB, removing some useless code. #472 + + +== v1.4.2 [2022-07-27] Lars Kanis + +Bugfixes: + +- Properly handle empty host parameter when connecting. #471 +- Update Windows fat binary gem to OpenSSL-1.1.1q. + + +== v1.4.1 [2022-06-24] Lars Kanis + +Bugfixes: + +- Fix another ruby-2.7 keyword warning. #465 +- Allow PG::Error to be created without arguments. #466 + + +== v1.4.0 [2022-06-20] Lars Kanis + +Added: + +- Add PG::Connection#hostaddr, present since PostgreSQL-12. #453 +- Add PG::Connection.conninfo_parse to wrap PQconninfoParse. #453 + +Bugfixes: + +- Try IPv6 and IPv4 addresses, if DNS resolves to both. #452 +- Re-add block-call semantics to PG::Connection.new accidently removed in pg-1.3.0. #454 +- Handle client error after all data consumed in #copy_data for output. #455 +- Avoid spurious keyword argument warning on Ruby 2.7. #456 +- Change connection setup to respect connect_timeout parameter. #459 +- Fix indefinite hang in case of connection error on Windows #458 +- Set connection attribute of PG::Error in various places where it was missing. #461 +- Fix transaction leak on early break/return. #463 +- Update Windows fat binary gem to OpenSSL-1.1.1o and PostgreSQL-14.4. + +Enhancements: + +- Don't flush at each put_copy_data call, but flush at get_result. #462 + + +== v1.3.5 [2022-03-31] Lars Kanis + +Bugfixes: + +- Handle PGRES_COMMAND_OK in pgresult_stream_any. #447 + Fixes usage when trying to stream the result of a procedure call that returns no results. + +Enhancements: + +- Rename BasicTypeRegistry#define_default_types to #register_default_types to use a more consistent terminology. + Keeping define_default_types for compatibility. +- BasicTypeRegistry: return self instead of objects by accident. + This allows call chaining. +- Add some April fun. #449 + +Documentation: +- Refine documentation of conn.socket_io and conn.connect_poll + + +== v1.3.4 [2022-03-10] Lars Kanis + +Bugfixes: + +- Don't leak IO in case of connection errors. #439 + Previously it was kept open until the PG::Connection was garbage collected. +- Fix a performance regession in conn.get_result noticed in single row mode. #442 +- Fix occasional error Errno::EBADF (Bad file descriptor) while connecting. #444 +- Fix compatibility of res.stream_each* methods with Fiber.scheduler. #446 +- Remove FL_TEST and FL_SET, which are MRI-internal. #437 + +Enhancements: + +- Allow pgresult_stream_any to be used by sequel_pg. #443 + + +== v1.3.3 [2022-02-22] Lars Kanis + +Bugfixes: + +- Fix omission of the third digit of IPv4 addresses in connection URI. #435 +- Fix wrong permission of certs/larskanis-2022.pem in the pg-1.3.2.gem. #432 + + == v1.3.2 [2022-02-14] Lars Kanis Bugfixes: @@ -55,7 +163,7 @@ API Enhancements: - Run Connection.ping in a second thread. - Make discard_results scheduler friendly - Do all socket waiting through the conn.socket_io object. - - Avoid PG.connect blocking while address resolution by automatically providing the +hostaddr+ parameter. + - Avoid PG.connect blocking while address resolution by automatically providing the +hostaddr+ parameter and resolving in Ruby instead of libpq. - On Windows Fiber.scheduler support requires Ruby-3.1+. It is also only partly usable since may ruby IO methods are not yet scheduler aware on Windows. - Add support for pipeline mode of PostgreSQL-14. #401 diff --git a/README.rdoc b/README.rdoc index ab6834898..cd7fa5d5b 100644 --- a/README.rdoc +++ b/README.rdoc @@ -171,12 +171,31 @@ The following type maps are prefilled with type mappings from the PG::BasicTypeR To report bugs, suggest features, or check out the source with Git, {check out the project page}[https://github.com/ged/ruby-pg]. -After checking out the source, run: +After checking out the source, install all dependencies: - $ rake newb + $ bundle install -This task will install any missing dependencies, run the tests/specs, and -generate the API documentation. +Cleanup extension files, packaging files, test databases: + + $ rake clean + +Compile extension: + + $ rake compile + +Run tests/specs with PostgreSQL tools like `initdb` in the path: + + $ PATH=$PATH:/usr/lib/postgresql/14/bin rake test + +Or run a specific test with the line number: + + $ PATH=$PATH:/usr/lib/postgresql/14/bin rspec -Ilib -fd spec/pg/connection_spec.rb:455 + +Generate the API documentation: + + $ rake docs + +Make sure, that all bugs and new features are verified by tests. The current maintainers are Michael Granger and Lars Kanis . @@ -184,7 +203,7 @@ Lars Kanis . == Copying -Copyright (c) 1997-2019 by the authors. +Copyright (c) 1997-2022 by the authors. * Jeff Davis * Guy Decoux (ts) diff --git a/Rakefile b/Rakefile index 505c9f5fe..c5f2a956b 100644 --- a/Rakefile +++ b/Rakefile @@ -16,14 +16,14 @@ LIBDIR = BASEDIR + 'lib' EXTDIR = BASEDIR + 'ext' PKGDIR = BASEDIR + 'pkg' TMPDIR = BASEDIR + 'tmp' -TESTDIR = BASEDIR + "tmp_test_specs" +TESTDIR = BASEDIR + "tmp_test_*" DLEXT = RbConfig::CONFIG['DLEXT'] EXT = LIBDIR + "pg_ext.#{DLEXT}" GEMSPEC = 'pg.gemspec' -CLOBBER.include( TESTDIR.to_s ) +CLEAN.include( TESTDIR.to_s ) CLEAN.include( PKGDIR.to_s, TMPDIR.to_s ) CLEAN.include "lib/*/libpq.dll" CLEAN.include "lib/pg_ext.*" @@ -92,7 +92,7 @@ end desc "Update list of server error codes" task :update_error_codes do - URL_ERRORCODES_TXT = "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;f=src/backend/utils/errcodes.txt;hb=refs/tags/REL_14_0" + URL_ERRORCODES_TXT = "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;f=src/backend/utils/errcodes.txt;hb=refs/tags/REL_15_0" ERRORCODES_TXT = "ext/errorcodes.txt" sh "wget #{URL_ERRORCODES_TXT.inspect} -O #{ERRORCODES_TXT.inspect} || curl #{URL_ERRORCODES_TXT.inspect} -o #{ERRORCODES_TXT.inspect}" diff --git a/Rakefile.cross b/Rakefile.cross index 617d275cd..a9e13f860 100644 --- a/Rakefile.cross +++ b/Rakefile.cross @@ -7,6 +7,7 @@ require 'rake/clean' require 'rake/extensiontask' require 'rake/extensioncompiler' require 'ostruct' +require_relative 'rakelib/task_extension' MISCDIR = BASEDIR + 'misc' @@ -20,6 +21,7 @@ end class CrossLibrary < OpenStruct include Rake::DSL + prepend TaskExtension def initialize(for_platform, openssl_config, toolchain) super() @@ -29,8 +31,8 @@ class CrossLibrary < OpenStruct self.host_platform = toolchain # Cross-compilation constants - self.openssl_version = ENV['OPENSSL_VERSION'] || '1.1.1m' - self.postgresql_version = ENV['POSTGRESQL_VERSION'] || '14.2' + self.openssl_version = ENV['OPENSSL_VERSION'] || '1.1.1s' + self.postgresql_version = ENV['POSTGRESQL_VERSION'] || '15.1' # Check if symlinks work in the current working directory. # This fails, if rake-compiler-dock is running on a Windows box. diff --git a/ext/errorcodes.def b/ext/errorcodes.def index ca6708820..5c501d790 100644 --- a/ext/errorcodes.def +++ b/ext/errorcodes.def @@ -366,6 +366,10 @@ VALUE klass = define_error_class( "SqlJsonScalarRequired", "22" ); register_error_class( "2203F", klass ); } +{ + VALUE klass = define_error_class( "SqlJsonItemCannotBeCastToTargetType", "22" ); + register_error_class( "2203G", klass ); +} { VALUE klass = define_error_class( "IntegrityConstraintViolation", NULL ); register_error_class( "23000", klass ); diff --git a/ext/errorcodes.rb b/ext/errorcodes.rb old mode 100755 new mode 100644 diff --git a/ext/errorcodes.txt b/ext/errorcodes.txt index 9874a7780..62418a051 100644 --- a/ext/errorcodes.txt +++ b/ext/errorcodes.txt @@ -2,7 +2,7 @@ # errcodes.txt # PostgreSQL error codes # -# Copyright (c) 2003-2021, PostgreSQL Global Development Group +# Copyright (c) 2003-2022, PostgreSQL Global Development Group # # This list serves as the basis for generating source files containing error # codes. It is kept in a common format to make sure all these source files have @@ -222,6 +222,7 @@ Section: Class 22 - Data Exception 2203D E ERRCODE_TOO_MANY_JSON_ARRAY_ELEMENTS too_many_json_array_elements 2203E E ERRCODE_TOO_MANY_JSON_OBJECT_MEMBERS too_many_json_object_members 2203F E ERRCODE_SQL_JSON_SCALAR_REQUIRED sql_json_scalar_required +2203G E ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE sql_json_item_cannot_be_cast_to_target_type Section: Class 23 - Integrity Constraint Violation diff --git a/ext/extconf.rb b/ext/extconf.rb old mode 100755 new mode 100644 index 409c27602..9bf1c2e5f --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -37,12 +37,12 @@ if pgconfig && pgconfig != 'ignore' $stderr.puts "Using config values from %s" % [ pgconfig ] - incdir = `"#{pgconfig}" --includedir`.chomp - libdir = `"#{pgconfig}" --libdir`.chomp + incdir = IO.popen([pgconfig, "--includedir"], &:read).chomp + libdir = IO.popen([pgconfig, "--libdir"], &:read).chomp dir_config 'pg', incdir, libdir # Windows traditionally stores DLLs beside executables, not in libdir - dlldir = RUBY_PLATFORM=~/mingw|mswin/ ? `"#{pgconfig}" --bindir`.chomp : libdir + dlldir = RUBY_PLATFORM=~/mingw|mswin/ ? IO.popen([pgconfig, "--bindir"], &:read).chomp : libdir elsif checking_for "libpq per pkg-config" do _cflags, ldflags, _libs = pkg_config("libpq") @@ -87,7 +87,7 @@ module PG have_library( 'libpq', 'PQconnectdb', ['libpq-fe.h'] ) || have_library( 'ms/libpq', 'PQconnectdb', ['libpq-fe.h'] ) -rescue SystemExit => err +rescue SystemExit install_text = case RUBY_PLATFORM when /linux/ <<-EOT diff --git a/ext/pg.c b/ext/pg.c index 0f4517893..7f362cf9f 100644 --- a/ext/pg.c +++ b/ext/pg.c @@ -47,7 +47,6 @@ */ #include "pg.h" -#include "pg_config.h" int pg_skip_deprecation_warning; VALUE rb_mPG; @@ -127,26 +126,6 @@ const char * const (pg_enc_pg2ruby_mapping[][2]) = { static struct st_table *enc_pg2ruby; -/* - * Look up the JOHAB encoding, creating it as a dummy encoding if it's not - * already defined. - */ -static rb_encoding * -pg_find_or_create_johab(void) -{ - static const char * const aliases[] = { "JOHAB", "Windows-1361", "CP1361" }; - int enc_index; - size_t i; - - for (i = 0; i < sizeof(aliases)/sizeof(aliases[0]); ++i) { - enc_index = rb_enc_find_index(aliases[i]); - if (enc_index > 0) return rb_enc_from_index(enc_index); - } - - enc_index = rb_define_dummy_encoding(aliases[0]); - return rb_enc_from_index(enc_index); -} - /* * Return the given PostgreSQL encoding ID as an rb_encoding. * @@ -187,10 +166,6 @@ pg_get_pg_encname_as_rb_encoding( const char *pg_encname ) return rb_enc_find( pg_enc_pg2ruby_mapping[i][1] ); } - /* JOHAB isn't a builtin encoding, so make up a dummy encoding if it's seen */ - if ( strncmp(pg_encname, "JOHAB", 5) == 0 ) - return pg_find_or_create_johab(); - /* Fallthrough to ASCII-8BIT */ return rb_ascii8bit_encoding(); } @@ -377,7 +352,7 @@ pg_s_init_ssl(VALUE self, VALUE do_ssl) **************************************************************************/ void -Init_pg_ext() +Init_pg_ext(void) { if( RTEST(rb_eval_string("ENV['PG_SKIP_DEPRECATION_WARNING']")) ){ /* Set all bits to disable all deprecation warnings. */ @@ -704,6 +679,9 @@ Init_pg_ext() rb_define_const(rb_mPGconstants, "INVALID_OID", INT2FIX(InvalidOid)); rb_define_const(rb_mPGconstants, "InvalidOid", INT2FIX(InvalidOid)); + /* PostgreSQL compiled in default port */ + rb_define_const(rb_mPGconstants, "DEF_PGPORT", INT2FIX(DEF_PGPORT)); + /* Add the constants to the toplevel namespace */ rb_include_module( rb_mPG, rb_mPGconstants ); diff --git a/ext/pg.h b/ext/pg.h index 8fdf5bff2..615708f56 100644 --- a/ext/pg.h +++ b/ext/pg.h @@ -57,6 +57,7 @@ #endif /* PostgreSQL headers */ +#include "pg_config.h" #include "libpq-fe.h" #include "libpq/libpq-fs.h" /* large-object interface */ #include "pg_config_manual.h" @@ -344,6 +345,7 @@ void pg_typemap_compact _(( void * )); PGconn *pg_get_pgconn _(( VALUE )); t_pg_connection *pg_get_connection _(( VALUE )); +VALUE pgconn_block _(( int, VALUE *, VALUE )); VALUE pg_new_result _(( PGresult *, VALUE )); VALUE pg_new_result_autoclear _(( PGresult *, VALUE )); diff --git a/ext/pg_binary_decoder.c b/ext/pg_binary_decoder.c index 8e42f4e44..0ed8d892a 100644 --- a/ext/pg_binary_decoder.c +++ b/ext/pg_binary_decoder.c @@ -205,7 +205,7 @@ pg_bin_dec_timestamp(t_pg_coder *conv, const char *val, int len, int tuple, int */ void -init_pg_binary_decoder() +init_pg_binary_decoder(void) { /* This module encapsulates all decoder classes with binary input format */ rb_mPG_BinaryDecoder = rb_define_module_under( rb_mPG, "BinaryDecoder" ); diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index 2d97b4213..61266b041 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -139,7 +139,7 @@ pg_bin_enc_from_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermed } void -init_pg_binary_encoder() +init_pg_binary_encoder(void) { /* This module encapsulates all encoder classes with binary output format */ rb_mPG_BinaryEncoder = rb_define_module_under( rb_mPG, "BinaryEncoder" ); diff --git a/ext/pg_coder.c b/ext/pg_coder.c index a6facab98..04219638a 100644 --- a/ext/pg_coder.c +++ b/ext/pg_coder.c @@ -537,7 +537,7 @@ pg_coder_dec_func(t_pg_coder *this, int binary) void -init_pg_coder() +init_pg_coder(void) { s_id_encode = rb_intern("encode"); s_id_decode = rb_intern("decode"); diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 4f9138b17..f99739e43 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -24,11 +24,32 @@ static VALUE pgconn_set_default_encoding( VALUE self ); static VALUE pgconn_wait_for_flush( VALUE self ); static void pgconn_set_internal_encoding_index( VALUE ); static const rb_data_type_t pg_connection_type; +static VALUE pgconn_async_flush(VALUE self); /* * Global functions */ +/* + * Convenience function to raise connection errors + */ +#ifdef __GNUC__ +__attribute__((format(printf, 3, 4))) +#endif +static void +pg_raise_conn_error( VALUE klass, VALUE self, const char *format, ...) +{ + VALUE msg, error; + va_list ap; + + va_start(ap, format); + msg = rb_vsprintf(format, ap); + va_end(ap); + error = rb_exc_new_str(klass, msg); + rb_iv_set(error, "@connection", self); + rb_exc_raise(error); +} + /* * Fetch the PG::Connection object data pointer. */ @@ -52,7 +73,7 @@ pg_get_connection_safe( VALUE self ) TypedData_Get_Struct( self, t_pg_connection, &pg_connection_type, this); if ( !this->pgconn ) - rb_raise( rb_eConnectionBad, "connection is closed" ); + pg_raise_conn_error( rb_eConnectionBad, self, "connection is closed"); return this; } @@ -70,8 +91,9 @@ pg_get_pgconn( VALUE self ) t_pg_connection *this; TypedData_Get_Struct( self, t_pg_connection, &pg_connection_type, this); - if ( !this->pgconn ) - rb_raise( rb_eConnectionBad, "connection is closed" ); + if ( !this->pgconn ){ + pg_raise_conn_error( rb_eConnectionBad, self, "connection is closed"); + } return this->pgconn; } @@ -89,9 +111,8 @@ pgconn_close_socket_io( VALUE self ) if ( RTEST(socket_io) ) { #if defined(_WIN32) - if( rb_w32_unwrap_io_handle(this->ruby_sd) ){ - rb_raise(rb_eConnectionBad, "Could not unwrap win32 socket handle"); - } + if( rb_w32_unwrap_io_handle(this->ruby_sd) ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not unwrap win32 socket handle"); #endif rb_funcall( socket_io, rb_intern("close"), 0 ); } @@ -245,6 +266,7 @@ pgconn_s_allocate( VALUE klass ) this->encoder_for_put_copy_data = Qnil; this->decoder_for_get_copy_data = Qnil; this->trace_stream = Qnil; + rb_ivar_set(self, rb_intern("@calls_to_put_copy_data"), INT2FIX(0)); return self; } @@ -254,7 +276,6 @@ pgconn_s_sync_connect(int argc, VALUE *argv, VALUE klass) { t_pg_connection *this; VALUE conninfo; - VALUE error; VALUE self = pgconn_s_allocate( klass ); this = pg_get_connection( self ); @@ -262,13 +283,10 @@ pgconn_s_sync_connect(int argc, VALUE *argv, VALUE klass) this->pgconn = gvl_PQconnectdb(StringValueCStr(conninfo)); if(this->pgconn == NULL) - rb_raise(rb_ePGerror, "PQconnectdb() unable to allocate structure"); + rb_raise(rb_ePGerror, "PQconnectdb() unable to allocate PGconn structure"); - if (PQstatus(this->pgconn) == CONNECTION_BAD) { - error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if (PQstatus(this->pgconn) == CONNECTION_BAD) + pg_raise_conn_error( rb_eConnectionBad, self, "%s", PQerrorMessage(this->pgconn)); pgconn_set_default_encoding( self ); @@ -301,7 +319,6 @@ pgconn_s_connect_start( int argc, VALUE *argv, VALUE klass ) { VALUE rb_conn; VALUE conninfo; - VALUE error; t_pg_connection *this; /* @@ -314,13 +331,10 @@ pgconn_s_connect_start( int argc, VALUE *argv, VALUE klass ) this->pgconn = gvl_PQconnectStart( StringValueCStr(conninfo) ); if( this->pgconn == NULL ) - rb_raise(rb_ePGerror, "PQconnectStart() unable to allocate structure"); + rb_raise(rb_ePGerror, "PQconnectStart() unable to allocate PGconn structure"); - if ( PQstatus(this->pgconn) == CONNECTION_BAD ) { - error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", rb_conn); - rb_exc_raise(error); - } + if ( PQstatus(this->pgconn) == CONNECTION_BAD ) + pg_raise_conn_error( rb_eConnectionBad, rb_conn, "%s", PQerrorMessage(this->pgconn)); if ( rb_block_given_p() ) { return rb_ensure( rb_yield, rb_conn, pgconn_finish, rb_conn ); @@ -376,6 +390,36 @@ pgconn_s_conndefaults(VALUE self) return array; } +/* + * Document-method: PG::Connection.conninfo_parse + * + * call-seq: + * PG::Connection.conninfo_parse(conninfo_string) -> Array + * + * Returns parsed connection options from the provided connection string as an array of hashes. + * Each hash has the same keys as PG::Connection.conndefaults() . + * The values from the +conninfo_string+ are stored in the +:val+ key. + */ +static VALUE +pgconn_s_conninfo_parse(VALUE self, VALUE conninfo) +{ + VALUE array; + char *errmsg = NULL; + PQconninfoOption *options = PQconninfoParse(StringValueCStr(conninfo), &errmsg); + if(errmsg){ + VALUE error = rb_str_new_cstr(errmsg); + PQfreemem(errmsg); + rb_raise(rb_ePGerror, "%"PRIsVALUE, error); + } + array = pgconn_make_conninfo_array( options ); + + PQconninfoFree(options); + + UNUSED( self ); + + return array; +} + #ifdef HAVE_PQENCRYPTPASSWORDCONN static VALUE @@ -396,7 +440,7 @@ pgconn_sync_encrypt_password(int argc, VALUE *argv, VALUE self) rval = rb_str_new2( encrypted ); PQfreemem( encrypted ); } else { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); } return rval; @@ -450,17 +494,18 @@ pgconn_s_encrypt_password(VALUE self, VALUE password, VALUE username) * the asynchronous connection is ready * * Example: - * conn = PG::Connection.connect_start("dbname=mydatabase") - * socket = conn.socket_io + * require "io/wait" + * + * conn = PG::Connection.connect_start(dbname: 'mydatabase') * status = conn.connect_poll * while(status != PG::PGRES_POLLING_OK) do * # do some work while waiting for the connection to complete * if(status == PG::PGRES_POLLING_READING) - * if(not select([socket], [], [], 10.0)) + * unless conn.socket_io.wait_readable(10.0) * raise "Asynchronous connection timed out!" * end * elsif(status == PG::PGRES_POLLING_WRITING) - * if(not select([], [socket], [], 10.0)) + * unless conn.socket_io.wait_writable(10.0) * raise "Asynchronous connection timed out!" * end * end @@ -475,9 +520,7 @@ pgconn_connect_poll(VALUE self) PostgresPollingStatusType status; status = gvl_PQconnectPoll(pg_get_pgconn(self)); - if ( status == PGRES_POLLING_FAILED ) { - pgconn_close_socket_io(self); - } + pgconn_close_socket_io(self); return INT2FIX((int)status); } @@ -538,7 +581,7 @@ pgconn_reset_start(VALUE self) { pgconn_close_socket_io( self ); if(gvl_PQresetStart(pg_get_pgconn(self)) == 0) - rb_raise(rb_eUnableToSend, "reset has failed"); + pg_raise_conn_error( rb_eUnableToSend, self, "reset has failed"); return Qnil; } @@ -556,9 +599,7 @@ pgconn_reset_poll(VALUE self) PostgresPollingStatusType status; status = gvl_PQresetPoll(pg_get_pgconn(self)); - if ( status == PGRES_POLLING_FAILED ) { - pgconn_close_socket_io(self); - } + pgconn_close_socket_io(self); return INT2FIX((int)status); } @@ -610,7 +651,18 @@ pgconn_pass(VALUE self) * call-seq: * conn.host() * - * Returns the connected server name. + * Returns the server host name of the active connection. + * This can be a host name, an IP address, or a directory path if the connection is via Unix socket. + * (The path case can be distinguished because it will always be an absolute path, beginning with +/+ .) + * + * If the connection parameters specified both host and hostaddr, then +host+ will return the host information. + * If only hostaddr was specified, then that is returned. + * If multiple hosts were specified in the connection parameters, +host+ returns the host actually connected to. + * + * If there is an error producing the host information (perhaps if the connection has not been fully established or there was an error), it returns an empty string. + * + * If multiple hosts were specified in the connection parameters, it is not possible to rely on the result of +host+ until the connection is established. + * The status of the connection can be checked using the function Connection#status . */ static VALUE pgconn_host(VALUE self) @@ -620,6 +672,26 @@ pgconn_host(VALUE self) return rb_str_new2(host); } +/* PQhostaddr() appeared in PostgreSQL-12 together with PQresultMemorySize() */ +#if defined(HAVE_PQRESULTMEMORYSIZE) +/* + * call-seq: + * conn.hostaddr() + * + * Returns the server IP address of the active connection. + * This can be the address that a host name resolved to, or an IP address provided through the hostaddr parameter. + * If there is an error producing the host information (perhaps if the connection has not been fully established or there was an error), it returns an empty string. + * + */ +static VALUE +pgconn_hostaddr(VALUE self) +{ + char *host = PQhostaddr(pg_get_pgconn(self)); + if (!host) return Qnil; + return rb_str_new2(host); +} +#endif + /* * call-seq: * conn.port() @@ -630,7 +702,10 @@ static VALUE pgconn_port(VALUE self) { char* port = PQport(pg_get_pgconn(self)); - return INT2NUM(atoi(port)); + if (!port || port[0] == '\0') + return INT2NUM(DEF_PGPORT); + else + return INT2NUM(atoi(port)); } /* @@ -690,6 +765,9 @@ pgconn_conninfo( VALUE self ) * PG::Constants::CONNECTION_BAD * * ... and other constants of kind PG::Constants::CONNECTION_* + * + * Example: + * PG.constants.grep(/CONNECTION_/).find{|c| PG.const_get(c) == conn.status} # => :CONNECTION_OK */ static VALUE pgconn_status(VALUE self) @@ -814,7 +892,8 @@ pgconn_socket(VALUE self) pg_deprecated(4, ("conn.socket is deprecated and should be replaced by conn.socket_io")); if( (sd = PQsocket(pg_get_pgconn(self))) < 0) - rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); + pg_raise_conn_error( rb_eConnectionBad, self, "PQsocket() can't get socket descriptor"); + return INT2NUM(sd); } @@ -822,13 +901,15 @@ pgconn_socket(VALUE self) * call-seq: * conn.socket_io() -> IO * - * Fetch a memorized IO object created from the Connection's underlying socket. - * This object can be used for IO.select to wait for events while running - * asynchronous API calls. + * Fetch an IO object created from the Connection's underlying socket. + * This object can be used per socket_io.wait_readable, socket_io.wait_writable or for IO.select to wait for events while running asynchronous API calls. + * IO#wait_*able is is Fiber.scheduler compatible in contrast to IO.select. * - * Using this instead of #socket avoids the problem of the underlying connection - * being closed by Ruby when an IO created using IO.for_fd(conn.socket) - * goes out of scope. In contrast to #socket, it also works on Windows. + * The IO object can change while the connection is established, but is memorized afterwards. + * So be sure not to cache the IO object, but repeat calling conn.socket_io instead. + * + * Using this method also works on Windows in contrast to using #socket . + * It also avoids the problem of the underlying connection being closed by Ruby when an IO created using IO.for_fd(conn.socket) goes out of scope. */ static VALUE pgconn_socket_io(VALUE self) @@ -840,14 +921,15 @@ pgconn_socket_io(VALUE self) VALUE socket_io = this->socket_io; if ( !RTEST(socket_io) ) { - if( (sd = PQsocket(this->pgconn)) < 0) - rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); + if( (sd = PQsocket(this->pgconn)) < 0){ + pg_raise_conn_error( rb_eConnectionBad, self, "PQsocket() can't get socket descriptor"); + } #ifdef _WIN32 ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); - if( ruby_sd == -1 ){ - rb_raise(rb_eConnectionBad, "Could not wrap win32 socket handle"); - } + if( ruby_sd == -1 ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not wrap win32 socket handle"); + this->ruby_sd = ruby_sd; #else ruby_sd = sd; @@ -912,7 +994,7 @@ pgconn_backend_key(VALUE self) cancel = (struct pg_cancel*)PQgetCancel(conn); if(cancel == NULL) - rb_raise(rb_ePGerror,"Invalid connection!"); + pg_raise_conn_error( rb_ePGerror, self, "Invalid connection!"); if( cancel->be_pid != PQbackendPID(conn) ) rb_raise(rb_ePGerror,"Unexpected binary struct layout - please file a bug report at ruby-pg!"); @@ -1448,8 +1530,7 @@ pgconn_sync_describe_prepared(VALUE self, VALUE stmt_name) * It's not recommended to use explicit sync or async variants but #describe_portal instead, unless you have a good reason to do so. */ static VALUE -pgconn_sync_describe_portal(self, stmt_name) - VALUE self, stmt_name; +pgconn_sync_describe_portal(VALUE self, VALUE stmt_name) { PGresult *result; VALUE rb_pgresult; @@ -1541,9 +1622,9 @@ pgconn_s_escape(VALUE self, VALUE string) if( !singleton ) { size = PQescapeStringConn(pg_get_pgconn(self), RSTRING_PTR(result), RSTRING_PTR(string), RSTRING_LEN(string), &error); - if(error) { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(pg_get_pgconn(self))); - } + if(error) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(pg_get_pgconn(self))); + } else { size = PQescapeString(RSTRING_PTR(result), RSTRING_PTR(string), RSTRING_LEN(string)); } @@ -1639,7 +1720,6 @@ pgconn_escape_literal(VALUE self, VALUE string) { t_pg_connection *this = pg_get_connection_safe( self ); char *escaped = NULL; - VALUE error; VALUE result = Qnil; int enc_idx = this->enc_idx; @@ -1650,12 +1730,8 @@ pgconn_escape_literal(VALUE self, VALUE string) escaped = PQescapeLiteral(this->pgconn, RSTRING_PTR(string), RSTRING_LEN(string)); if (escaped == NULL) - { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - return Qnil; - } + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); + result = rb_str_new2(escaped); PQfreemem(escaped); PG_ENCODING_SET_NOCHECK(result, enc_idx); @@ -1678,7 +1754,6 @@ pgconn_escape_identifier(VALUE self, VALUE string) { t_pg_connection *this = pg_get_connection_safe( self ); char *escaped = NULL; - VALUE error; VALUE result = Qnil; int enc_idx = this->enc_idx; @@ -1689,12 +1764,8 @@ pgconn_escape_identifier(VALUE self, VALUE string) escaped = PQescapeIdentifier(this->pgconn, RSTRING_PTR(string), RSTRING_LEN(string)); if (escaped == NULL) - { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - return Qnil; - } + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); + result = rb_str_new2(escaped); PQfreemem(escaped); PG_ENCODING_SET_NOCHECK(result, enc_idx); @@ -1742,14 +1813,9 @@ static VALUE pgconn_set_single_row_mode(VALUE self) { PGconn *conn = pg_get_pgconn(self); - VALUE error; if( PQsetSingleRowMode(conn) == 0 ) - { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); return self; } @@ -1773,15 +1839,12 @@ static VALUE pgconn_send_query(int argc, VALUE *argv, VALUE self) { t_pg_connection *this = pg_get_connection_safe( self ); - VALUE error; /* If called with no or nil parameters, use PQexec for compatibility */ if ( argc == 1 || (argc >= 2 && argc <= 4 && NIL_P(argv[1]) )) { - if(gvl_PQsendQuery(this->pgconn, pg_cstr_enc(argv[0], this->enc_idx)) == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(gvl_PQsendQuery(this->pgconn, pg_cstr_enc(argv[0], this->enc_idx)) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pgconn_wait_for_flush( self ); return Qnil; } @@ -1838,7 +1901,6 @@ pgconn_send_query_params(int argc, VALUE *argv, VALUE self) t_pg_connection *this = pg_get_connection_safe( self ); int result; VALUE command, in_res_fmt; - VALUE error; int nParams; int resultFormat; struct query_params_data paramsData = { this->enc_idx }; @@ -1855,11 +1917,9 @@ pgconn_send_query_params(int argc, VALUE *argv, VALUE self) free_query_params( ¶msData ); - if(result == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(result == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pgconn_wait_for_flush( self ); return Qnil; } @@ -1891,7 +1951,6 @@ pgconn_send_prepare(int argc, VALUE *argv, VALUE self) int result; VALUE name, command, in_paramtypes; VALUE param; - VALUE error; int i = 0; int nParams = 0; Oid *paramTypes = NULL; @@ -1920,9 +1979,7 @@ pgconn_send_prepare(int argc, VALUE *argv, VALUE self) xfree(paramTypes); if(result == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); } pgconn_wait_for_flush( self ); return Qnil; @@ -1966,7 +2023,6 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) t_pg_connection *this = pg_get_connection_safe( self ); int result; VALUE name, in_res_fmt; - VALUE error; int nParams; int resultFormat; struct query_params_data paramsData = { this->enc_idx }; @@ -1988,11 +2044,9 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) free_query_params( ¶msData ); - if(result == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(result == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pgconn_wait_for_flush( self ); return Qnil; } @@ -2007,14 +2061,11 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) static VALUE pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) { - VALUE error; t_pg_connection *this = pg_get_connection_safe( self ); /* returns 0 on failure */ - if(gvl_PQsendDescribePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(gvl_PQsendDescribePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pgconn_wait_for_flush( self ); return Qnil; } @@ -2030,14 +2081,11 @@ pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_send_describe_portal(VALUE self, VALUE portal) { - VALUE error; t_pg_connection *this = pg_get_connection_safe( self ); /* returns 0 on failure */ - if(gvl_PQsendDescribePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) { - error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(gvl_PQsendDescribePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pgconn_wait_for_flush( self ); return Qnil; } @@ -2070,18 +2118,15 @@ pgconn_sync_get_result(VALUE self) * or *notifies* to see if the state has changed. */ static VALUE -pgconn_consume_input(self) - VALUE self; +pgconn_consume_input(VALUE self) { - VALUE error; PGconn *conn = pg_get_pgconn(self); /* returns 0 on error */ if(PQconsumeInput(conn) == 0) { pgconn_close_socket_io(self); - error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(conn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); + pg_raise_conn_error( rb_eConnectionBad, self, "%s", PQerrorMessage(conn)); } + return Qnil; } @@ -2093,18 +2138,15 @@ pgconn_consume_input(self) * #get_result would block. Otherwise returns +false+. */ static VALUE -pgconn_is_busy(self) - VALUE self; +pgconn_is_busy(VALUE self) { return gvl_PQisBusy(pg_get_pgconn(self)) ? Qtrue : Qfalse; } static VALUE -pgconn_sync_setnonblocking(self, state) - VALUE self, state; +pgconn_sync_setnonblocking(VALUE self, VALUE state) { int arg; - VALUE error; PGconn *conn = pg_get_pgconn(self); if(state == Qtrue) arg = 1; @@ -2113,18 +2155,15 @@ pgconn_sync_setnonblocking(self, state) else rb_raise(rb_eArgError, "Boolean value expected"); - if(PQsetnonblocking(conn, arg) == -1) { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(PQsetnonblocking(conn, arg) == -1) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + return Qnil; } static VALUE -pgconn_sync_isnonblocking(self) - VALUE self; +pgconn_sync_isnonblocking(VALUE self) { return PQisnonblocking(pg_get_pgconn(self)) ? Qtrue : Qfalse; } @@ -2133,14 +2172,10 @@ static VALUE pgconn_sync_flush(VALUE self) { PGconn *conn = pg_get_pgconn(self); - int ret; - VALUE error; - ret = PQflush(conn); - if(ret == -1) { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + int ret = PQflush(conn); + if(ret == -1) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + return (ret) ? Qfalse : Qtrue; } @@ -2154,7 +2189,7 @@ pgconn_sync_cancel(VALUE self) cancel = PQgetCancel(pg_get_pgconn(self)); if(cancel == NULL) - rb_raise(rb_ePGerror,"Invalid connection!"); + pg_raise_conn_error( rb_ePGerror, self, "Invalid connection!"); ret = gvl_PQcancel(cancel, errbuf, sizeof(errbuf)); if(ret == 1) @@ -2343,21 +2378,12 @@ pg_rb_io_wait(VALUE io, VALUE events, VALUE timeout) { static void * wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable)(PGconn *)) { - VALUE socket_io; VALUE ret; void *retval; struct timeval aborttime={0,0}, currtime, waittime; VALUE wait_timeout = Qnil; PGconn *conn = pg_get_pgconn(self); - socket_io = pgconn_socket_io(self); - - /* Check for connection errors (PQisBusy is true on connection errors) */ - if ( PQconsumeInput(conn) == 0 ) { - pgconn_close_socket_io(self); - rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); - } - if ( ptimeout ) { gettimeofday(&currtime, NULL); timeradd(&currtime, ptimeout, &aborttime); @@ -2372,6 +2398,14 @@ wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable) /* Is the given timeout valid? */ if( !ptimeout || (waittime.tv_sec >= 0 && waittime.tv_usec >= 0) ){ + VALUE socket_io; + + /* before we wait for data, make sure everything has been sent */ + pgconn_async_flush(self); + if ((retval=is_readable(conn))) + return retval; + + socket_io = pgconn_socket_io(self); /* Wait for the socket to become readable before checking again */ ret = pg_rb_io_wait(socket_io, RB_INT2NUM(PG_RUBY_IO_READABLE), wait_timeout); } else { @@ -2386,7 +2420,7 @@ wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable) /* Check for connection errors (PQisBusy is true on connection errors) */ if ( PQconsumeInput(conn) == 0 ){ pgconn_close_socket_io(self); - rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); + pg_raise_conn_error(rb_eConnectionBad, self, "PQconsumeInput() %s", PQerrorMessage(conn)); } } @@ -2399,8 +2433,8 @@ wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable) * * Attempts to flush any queued output data to the server. * Returns +true+ if data is successfully flushed, +false+ - * if not (can only return +false+ if connection is - * nonblocking. + * if not. It can only return +false+ if connection is + * in nonblocking mode. * Raises PG::Error if some other failure occurred. */ static VALUE @@ -2536,11 +2570,9 @@ pgconn_sync_put_copy_data(int argc, VALUE *argv, VALUE self) Check_Type(buffer, T_STRING); ret = gvl_PQputCopyData(this->pgconn, RSTRING_PTR(buffer), RSTRING_LENINT(buffer)); - if(ret == -1) { - VALUE error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(ret == -1) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); + RB_GC_GUARD(intermediate); RB_GC_GUARD(buffer); @@ -2551,7 +2583,6 @@ static VALUE pgconn_sync_put_copy_end(int argc, VALUE *argv, VALUE self) { VALUE str; - VALUE error; int ret; const char *error_message = NULL; t_pg_connection *this = pg_get_connection_safe( self ); @@ -2562,11 +2593,9 @@ pgconn_sync_put_copy_end(int argc, VALUE *argv, VALUE self) error_message = pg_cstr_enc(str, this->enc_idx); ret = gvl_PQputCopyEnd(this->pgconn, error_message); - if(ret == -1) { - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); - } + if(ret == -1) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); + return (ret) ? Qtrue : Qfalse; } @@ -2574,7 +2603,6 @@ static VALUE pgconn_sync_get_copy_data(int argc, VALUE *argv, VALUE self ) { VALUE async_in; - VALUE error; VALUE result; int ret; char *buffer; @@ -2594,10 +2622,8 @@ pgconn_sync_get_copy_data(int argc, VALUE *argv, VALUE self ) } ret = gvl_PQgetCopyData(this->pgconn, &buffer, RTEST(async_in)); - if(ret == -2) { /* error */ - error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); - rb_iv_set(error, "@connection", self); - rb_exc_raise(error); + if(ret == -2){ /* error */ + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); } if(ret == -1) { /* No data left */ return Qnil; @@ -2902,9 +2928,9 @@ pgconn_sync_set_client_encoding(VALUE self, VALUE str) Check_Type(str, T_STRING); - if ( (gvl_PQsetClientEncoding(conn, StringValueCStr(str))) == -1 ) { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); - } + if ( (gvl_PQsetClientEncoding(conn, StringValueCStr(str))) == -1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + pgconn_set_internal_encoding_index( self ); return Qnil; @@ -2984,7 +3010,7 @@ get_result_readable(PGconn *conn) * If +true+ is returned, +conn.is_busy+ will return +false+ * and +conn.get_result+ will not block. */ -static VALUE +VALUE pgconn_block( int argc, VALUE *argv, VALUE self ) { struct timeval timeout; struct timeval *ptimeout = NULL; @@ -3072,7 +3098,8 @@ pgconn_async_get_last_result(VALUE self) for(;;) { int status; - pgconn_block( 0, NULL, self ); /* wait for input (without blocking) before reading the last result */ + /* wait for input (without blocking) before reading each result */ + wait_socket_readable(self, NULL, get_result_readable); cur = gvl_PQgetResult(conn); if (cur == NULL) @@ -3536,11 +3563,10 @@ pgconn_enter_pipeline_mode(VALUE self) { PGconn *conn = pg_get_pgconn(self); int res = PQenterPipelineMode(conn); - if( res == 1 ) { - return Qnil; - } else { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); - } + if( res != 1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return Qnil; } /* @@ -3559,11 +3585,10 @@ pgconn_exit_pipeline_mode(VALUE self) { PGconn *conn = pg_get_pgconn(self); int res = PQexitPipelineMode(conn); - if( res == 1 ) { - return Qnil; - } else { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); - } + if( res != 1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return Qnil; } @@ -3583,11 +3608,10 @@ pgconn_pipeline_sync(VALUE self) { PGconn *conn = pg_get_pgconn(self); int res = PQpipelineSync(conn); - if( res == 1 ) { - return Qnil; - } else { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); - } + if( res != 1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return Qnil; } /* @@ -3607,11 +3631,10 @@ pgconn_send_flush_request(VALUE self) { PGconn *conn = pg_get_pgconn(self); int res = PQsendFlushRequest(conn); - if( res == 1 ) { - return Qnil; - } else { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); - } + if( res != 1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return Qnil; } #endif @@ -3642,7 +3665,7 @@ pgconn_locreat(int argc, VALUE *argv, VALUE self) lo_oid = lo_creat(conn, mode); if (lo_oid == 0) - rb_raise(rb_ePGerror, "lo_creat failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_creat failed"); return UINT2NUM(lo_oid); } @@ -3663,7 +3686,7 @@ pgconn_locreate(VALUE self, VALUE in_lo_oid) ret = lo_create(conn, lo_oid); if (ret == InvalidOid) - rb_raise(rb_ePGerror, "lo_create failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_create failed"); return UINT2NUM(ret); } @@ -3687,7 +3710,7 @@ pgconn_loimport(VALUE self, VALUE filename) lo_oid = lo_import(conn, StringValueCStr(filename)); if (lo_oid == 0) { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); } return UINT2NUM(lo_oid); } @@ -3708,7 +3731,7 @@ pgconn_loexport(VALUE self, VALUE lo_oid, VALUE filename) oid = NUM2UINT(lo_oid); if (lo_export(conn, oid, StringValueCStr(filename)) < 0) { - rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); } return Qnil; } @@ -3739,7 +3762,7 @@ pgconn_loopen(int argc, VALUE *argv, VALUE self) mode = NUM2INT(nmode); if((fd = lo_open(conn, lo_oid, mode)) < 0) { - rb_raise(rb_ePGerror, "can't open large object: %s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "can't open large object: %s", PQerrorMessage(conn)); } return INT2FIX(fd); } @@ -3761,11 +3784,11 @@ pgconn_lowrite(VALUE self, VALUE in_lo_desc, VALUE buffer) Check_Type(buffer, T_STRING); if( RSTRING_LEN(buffer) < 0) { - rb_raise(rb_ePGerror, "write buffer zero string"); + pg_raise_conn_error( rb_ePGerror, self, "write buffer zero string"); } if((n = lo_write(conn, fd, StringValuePtr(buffer), RSTRING_LEN(buffer))) < 0) { - rb_raise(rb_ePGerror, "lo_write failed: %s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "lo_write failed: %s", PQerrorMessage(conn)); } return INT2FIX(n); @@ -3788,16 +3811,12 @@ pgconn_loread(VALUE self, VALUE in_lo_desc, VALUE in_len) VALUE str; char *buffer; - buffer = ALLOC_N(char, len); - if(buffer == NULL) - rb_raise(rb_eNoMemError, "ALLOC failed!"); - - if (len < 0){ - rb_raise(rb_ePGerror,"nagative length %d given", len); - } + if (len < 0) + pg_raise_conn_error( rb_ePGerror, self, "negative length %d given", len); + buffer = ALLOC_N(char, len); if((ret = lo_read(conn, lo_desc, buffer, len)) < 0) - rb_raise(rb_ePGerror, "lo_read failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_read failed"); if(ret == 0) { xfree(buffer); @@ -3827,7 +3846,7 @@ pgconn_lolseek(VALUE self, VALUE in_lo_desc, VALUE offset, VALUE whence) int ret; if((ret = lo_lseek(conn, lo_desc, NUM2INT(offset), NUM2INT(whence))) < 0) { - rb_raise(rb_ePGerror, "lo_lseek failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_lseek failed"); } return INT2FIX(ret); @@ -3847,7 +3866,7 @@ pgconn_lotell(VALUE self, VALUE in_lo_desc) int lo_desc = NUM2INT(in_lo_desc); if((position = lo_tell(conn, lo_desc)) < 0) - rb_raise(rb_ePGerror,"lo_tell failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_tell failed"); return INT2FIX(position); } @@ -3866,7 +3885,7 @@ pgconn_lotruncate(VALUE self, VALUE in_lo_desc, VALUE in_len) size_t len = NUM2INT(in_len); if(lo_truncate(conn,lo_desc,len) < 0) - rb_raise(rb_ePGerror,"lo_truncate failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_truncate failed"); return Qnil; } @@ -3884,7 +3903,7 @@ pgconn_loclose(VALUE self, VALUE in_lo_desc) int lo_desc = NUM2INT(in_lo_desc); if(lo_close(conn,lo_desc) < 0) - rb_raise(rb_ePGerror,"lo_close failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_close failed"); return Qnil; } @@ -3902,7 +3921,7 @@ pgconn_lounlink(VALUE self, VALUE in_oid) Oid oid = NUM2UINT(in_oid); if(lo_unlink(conn,oid) < 0) - rb_raise(rb_ePGerror,"lo_unlink failed"); + pg_raise_conn_error( rb_ePGerror, self, "lo_unlink failed"); return Qnil; } @@ -4309,7 +4328,7 @@ pgconn_field_name_type_get(VALUE self) * Document-class: PG::Connection */ void -init_pg_connection() +init_pg_connection(void) { s_id_encode = rb_intern("encode"); s_id_autoclose_set = rb_intern("autoclose="); @@ -4336,6 +4355,7 @@ init_pg_connection() rb_define_singleton_method(rb_cPGconn, "quote_ident", pgconn_s_quote_ident, 1); rb_define_singleton_method(rb_cPGconn, "connect_start", pgconn_s_connect_start, -1); rb_define_singleton_method(rb_cPGconn, "conndefaults", pgconn_s_conndefaults, 0); + rb_define_singleton_method(rb_cPGconn, "conninfo_parse", pgconn_s_conninfo_parse, 1); rb_define_singleton_method(rb_cPGconn, "sync_ping", pgconn_s_sync_ping, -1); rb_define_singleton_method(rb_cPGconn, "sync_connect", pgconn_s_sync_connect, -1); @@ -4353,6 +4373,9 @@ init_pg_connection() rb_define_method(rb_cPGconn, "user", pgconn_user, 0); rb_define_method(rb_cPGconn, "pass", pgconn_pass, 0); rb_define_method(rb_cPGconn, "host", pgconn_host, 0); +#if defined(HAVE_PQRESULTMEMORYSIZE) + rb_define_method(rb_cPGconn, "hostaddr", pgconn_hostaddr, 0); +#endif rb_define_method(rb_cPGconn, "port", pgconn_port, 0); rb_define_method(rb_cPGconn, "tty", pgconn_tty, 0); rb_define_method(rb_cPGconn, "conninfo", pgconn_conninfo, 0); diff --git a/ext/pg_copy_coder.c b/ext/pg_copy_coder.c index aa91b589c..c8f58b2ca 100644 --- a/ext/pg_copy_coder.c +++ b/ext/pg_copy_coder.c @@ -592,7 +592,7 @@ pg_text_dec_copy_row(t_pg_coder *conv, const char *input_line, int len, int _tup void -init_pg_copycoder() +init_pg_copycoder(void) { /* Document-class: PG::CopyCoder < PG::Coder * diff --git a/ext/pg_errors.c b/ext/pg_errors.c index cee6af1a9..8e04d3792 100644 --- a/ext/pg_errors.c +++ b/ext/pg_errors.c @@ -70,7 +70,7 @@ lookup_error_class(const char *sqlstate) } void -init_pg_errors() +init_pg_errors(void) { rb_hErrors = rb_hash_new(); rb_define_const( rb_mPG, "ERROR_CLASSES", rb_hErrors ); diff --git a/ext/pg_record_coder.c b/ext/pg_record_coder.c index d6c4c5092..3dbcf94a2 100644 --- a/ext/pg_record_coder.c +++ b/ext/pg_record_coder.c @@ -344,10 +344,12 @@ record_isspace(char ch) * oids = conn.exec( "SELECT (NULL::complex).*" ) * # Build a type map (PG::TypeMapByColumn) for decoding the "complex" type * dtm = PG::BasicTypeMapForResults.new(conn).build_column_map( oids ) - * # Register a record decoder for decoding our type "complex" - * PG::BasicTypeRegistry.register_coder(PG::TextDecoder::Record.new(type_map: dtm, name: "complex")) - * # Apply the basic type registry to all results retrieved from the server - * conn.type_map_for_results = PG::BasicTypeMapForResults.new(conn) + * # Build a type map and populate with basic types + * btr = PG::BasicTypeRegistry.new.register_default_types + * # Register a new record decoder for decoding our type "complex" + * btr.register_coder(PG::TextDecoder::Record.new(type_map: dtm, name: "complex")) + * # Apply our basic type registry to all results retrieved from the server + * conn.type_map_for_results = PG::BasicTypeMapForResults.new(conn, registry: btr) * # Now queries decode the "complex" type (and many basic types) automatically * conn.exec("SELECT * FROM my_table").to_a * # => [{"v1"=>[2.0, 3.0], "v2"=>[4.0, 5.0]}, {"v1"=>[6.0, 7.0], "v2"=>[8.0, 9.0]}] @@ -492,7 +494,7 @@ pg_text_dec_record(t_pg_coder *conv, char *input_line, int len, int _tuple, int void -init_pg_recordcoder() +init_pg_recordcoder(void) { /* Document-class: PG::RecordCoder < PG::Coder * diff --git a/ext/pg_result.c b/ext/pg_result.c index 8306be1c0..27ba6bafa 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -1382,22 +1382,21 @@ pgresult_type_map_get(VALUE self) } -static void -yield_hash(VALUE self, int ntuples, int nfields) +static int +yield_hash(VALUE self, int ntuples, int nfields, void *data) { int tuple_num; - t_pg_result *this = pgresult_get_this(self); UNUSED(nfields); for(tuple_num = 0; tuple_num < ntuples; tuple_num++) { rb_yield(pgresult_aref(self, INT2NUM(tuple_num))); } - pgresult_clear( this ); + return 1; /* clear the result */ } -static void -yield_array(VALUE self, int ntuples, int nfields) +static int +yield_array(VALUE self, int ntuples, int nfields, void *data) { int row; t_pg_result *this = pgresult_get_this(self); @@ -1413,11 +1412,11 @@ yield_array(VALUE self, int ntuples, int nfields) rb_yield( rb_ary_new4( nfields, row_values )); } - pgresult_clear( this ); + return 1; /* clear the result */ } -static void -yield_tuple(VALUE self, int ntuples, int nfields) +static int +yield_tuple(VALUE self, int ntuples, int nfields, void *data) { int tuple_num; t_pg_result *this = pgresult_get_this(self); @@ -1434,10 +1433,12 @@ yield_tuple(VALUE self, int ntuples, int nfields) VALUE tuple = pgresult_tuple(copy, INT2FIX(tuple_num)); rb_yield( tuple ); } + return 0; /* don't clear the result */ } -static VALUE -pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int)) +/* Non-static, and data pointer for use by sequel_pg */ +VALUE +pgresult_stream_any(VALUE self, int (*yielder)(VALUE, int, int, void*), void* data) { t_pg_result *this; int nfields; @@ -1456,6 +1457,7 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int)) switch( PQresultStatus(pgresult) ){ case PGRES_TUPLES_OK: + case PGRES_COMMAND_OK: if( ntuples == 0 ) return self; rb_raise( rb_eInvalidResultStatus, "PG::Result is not in single row mode"); @@ -1465,14 +1467,21 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int)) pg_result_check( self ); } - yielder( self, ntuples, nfields ); + if( yielder( self, ntuples, nfields, data ) ){ + pgresult_clear( this ); + } + + if( gvl_PQisBusy(pgconn) ){ + /* wait for input (without blocking) before reading each result */ + pgconn_block( 0, NULL, this->connection ); + } pgresult = gvl_PQgetResult(pgconn); if( pgresult == NULL ) - rb_raise( rb_eNoResultError, "no result received - possibly an intersection with another result retrieval"); + rb_raise( rb_eNoResultError, "no result received - possibly an intersection with another query"); if( nfields != PQnfields(pgresult) ) - rb_raise( rb_eInvalidChangeOfResultFields, "number of fields must not change in single row mode"); + rb_raise( rb_eInvalidChangeOfResultFields, "number of fields changed in single row mode from %d to %d - this is a sign for intersection with another query", nfields, PQnfields(pgresult)); this->pgresult = pgresult; } @@ -1516,7 +1525,7 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int)) static VALUE pgresult_stream_each(VALUE self) { - return pgresult_stream_any(self, yield_hash); + return pgresult_stream_any(self, yield_hash, NULL); } /* @@ -1532,7 +1541,7 @@ pgresult_stream_each(VALUE self) static VALUE pgresult_stream_each_row(VALUE self) { - return pgresult_stream_any(self, yield_array); + return pgresult_stream_any(self, yield_array, NULL); } /* @@ -1549,7 +1558,7 @@ pgresult_stream_each_tuple(VALUE self) /* allocate VALUEs that are shared between all streamed tuples */ ensure_init_for_tuple(self); - return pgresult_stream_any(self, yield_tuple); + return pgresult_stream_any(self, yield_tuple, NULL); } /* @@ -1610,7 +1619,7 @@ pgresult_field_name_type_get(VALUE self) } void -init_pg_result() +init_pg_result(void) { sym_string = ID2SYM(rb_intern("string")); sym_symbol = ID2SYM(rb_intern("symbol")); diff --git a/ext/pg_text_decoder.c b/ext/pg_text_decoder.c index cdf47e062..120a2babe 100644 --- a/ext/pg_text_decoder.c +++ b/ext/pg_text_decoder.c @@ -923,7 +923,7 @@ pg_text_dec_inet(t_pg_coder *conv, const char *val, int len, int tuple, int fiel } void -init_pg_text_decoder() +init_pg_text_decoder(void) { rb_require("ipaddr"); s_IPAddr = rb_funcall(rb_cObject, rb_intern("const_get"), 1, rb_str_new2("IPAddr")); diff --git a/ext/pg_text_encoder.c b/ext/pg_text_encoder.c index 2690c92f5..7cfc49793 100644 --- a/ext/pg_text_encoder.c +++ b/ext/pg_text_encoder.c @@ -775,7 +775,7 @@ pg_text_enc_to_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermedi void -init_pg_text_encoder() +init_pg_text_encoder(void) { s_id_encode = rb_intern("encode"); s_id_to_i = rb_intern("to_i"); diff --git a/ext/pg_tuple.c b/ext/pg_tuple.c index d78e72594..d779e3062 100644 --- a/ext/pg_tuple.c +++ b/ext/pg_tuple.c @@ -471,10 +471,7 @@ pg_tuple_dump(VALUE self) values = rb_ary_new4(this->num_fields, &this->values[0]); a = rb_ary_new3(2, field_names, values); - if (FL_TEST(self, FL_EXIVAR)) { - rb_copy_generic_ivar(a, self); - FL_SET(a, FL_EXIVAR); - } + rb_copy_generic_ivar(a, self); return a; } @@ -542,16 +539,13 @@ pg_tuple_load(VALUE self, VALUE a) RTYPEDDATA_DATA(self) = this; - if (FL_TEST(a, FL_EXIVAR)) { - rb_copy_generic_ivar(self, a); - FL_SET(self, FL_EXIVAR); - } + rb_copy_generic_ivar(self, a); return self; } void -init_pg_tuple() +init_pg_tuple(void) { rb_cPG_Tuple = rb_define_class_under( rb_mPG, "Tuple", rb_cObject ); rb_define_alloc_func( rb_cPG_Tuple, pg_tuple_s_allocate ); diff --git a/ext/pg_type_map.c b/ext/pg_type_map.c index 8918ece61..1ba353686 100644 --- a/ext/pg_type_map.c +++ b/ext/pg_type_map.c @@ -176,7 +176,7 @@ pg_typemap_with_default_type_map(VALUE self, VALUE typemap) } void -init_pg_type_map() +init_pg_type_map(void) { s_id_fit_to_query = rb_intern("fit_to_query"); s_id_fit_to_result = rb_intern("fit_to_result"); diff --git a/ext/pg_type_map_all_strings.c b/ext/pg_type_map_all_strings.c index ce702b6a8..a55903d22 100644 --- a/ext/pg_type_map_all_strings.c +++ b/ext/pg_type_map_all_strings.c @@ -105,7 +105,7 @@ pg_tmas_s_allocate( VALUE klass ) void -init_pg_type_map_all_strings() +init_pg_type_map_all_strings(void) { /* * Document-class: PG::TypeMapAllStrings < PG::TypeMap diff --git a/ext/pg_type_map_by_class.c b/ext/pg_type_map_by_class.c index 2b8ca75f8..77347217c 100644 --- a/ext/pg_type_map_by_class.c +++ b/ext/pg_type_map_by_class.c @@ -247,7 +247,7 @@ pg_tmbk_coders( VALUE self ) } void -init_pg_type_map_by_class() +init_pg_type_map_by_class(void) { /* * Document-class: PG::TypeMapByClass < PG::TypeMap diff --git a/ext/pg_type_map_by_column.c b/ext/pg_type_map_by_column.c index 5028a088b..56ca4049c 100644 --- a/ext/pg_type_map_by_column.c +++ b/ext/pg_type_map_by_column.c @@ -243,7 +243,7 @@ pg_tmbc_s_allocate( VALUE klass ) } VALUE -pg_tmbc_allocate() +pg_tmbc_allocate(void) { return pg_tmbc_s_allocate(rb_cTypeMapByColumn); } @@ -320,7 +320,7 @@ pg_tmbc_coders(VALUE self) } void -init_pg_type_map_by_column() +init_pg_type_map_by_column(void) { s_id_decode = rb_intern("decode"); s_id_encode = rb_intern("encode"); diff --git a/ext/pg_type_map_by_mri_type.c b/ext/pg_type_map_by_mri_type.c index ee99bc668..c7e50fdcd 100644 --- a/ext/pg_type_map_by_mri_type.c +++ b/ext/pg_type_map_by_mri_type.c @@ -286,7 +286,7 @@ pg_tmbmt_coders( VALUE self ) } void -init_pg_type_map_by_mri_type() +init_pg_type_map_by_mri_type(void) { /* * Document-class: PG::TypeMapByMriType < PG::TypeMap diff --git a/ext/pg_type_map_by_oid.c b/ext/pg_type_map_by_oid.c index f6a24df6f..e0ba5ec26 100644 --- a/ext/pg_type_map_by_oid.c +++ b/ext/pg_type_map_by_oid.c @@ -356,7 +356,7 @@ pg_tmbo_build_column_map( VALUE self, VALUE result ) void -init_pg_type_map_by_oid() +init_pg_type_map_by_oid(void) { s_id_decode = rb_intern("decode"); diff --git a/ext/pg_type_map_in_ruby.c b/ext/pg_type_map_in_ruby.c index 94c803f2f..267d33539 100644 --- a/ext/pg_type_map_in_ruby.c +++ b/ext/pg_type_map_in_ruby.c @@ -299,7 +299,7 @@ pg_tmir_s_allocate( VALUE klass ) void -init_pg_type_map_in_ruby() +init_pg_type_map_in_ruby(void) { s_id_fit_to_result = rb_intern("fit_to_result"); s_id_fit_to_query = rb_intern("fit_to_query"); diff --git a/lib/pg.rb b/lib/pg.rb index adcc4094a..a8375b98d 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -59,14 +59,14 @@ class NotInBlockingMode < PG::Error # Get the PG library version. # # +include_buildnum+ is no longer used and any value passed will be ignored. - def self::version_string( include_buildnum=nil ) - return "%s %s" % [ self.name, VERSION ] + def self.version_string( include_buildnum=nil ) + "%s %s" % [ self.name, VERSION ] end ### Convenience alias for PG::Connection.new. - def self::connect( *args, **kwargs ) - return PG::Connection.new( *args, **kwargs ) + def self.connect( *args, &block ) + Connection.new( *args, &block ) end diff --git a/lib/pg/basic_type_registry.rb b/lib/pg/basic_type_registry.rb index a8b9cb143..8db915998 100644 --- a/lib/pg/basic_type_registry.rb +++ b/lib/pg/basic_type_registry.rb @@ -22,7 +22,7 @@ # end # # conn = PG.connect -# regi = PG::BasicTypeRegistry.new.define_default_types +# regi = PG::BasicTypeRegistry.new.register_default_types # regi.register_type(0, 'inet', InetEncoder, InetDecoder) # conn.type_map_for_results = PG::BasicTypeMapForResults.new(conn, registry: regi) class PG::BasicTypeRegistry @@ -184,6 +184,7 @@ def register_coder(coder) name = coder.name || raise(ArgumentError, "name of #{coder.inspect} must be defined") h[:encoder][name] = coder if coder.respond_to?(:encode) h[:decoder][name] = coder if coder.respond_to?(:decode) + self end # Register the given +encoder_class+ and/or +decoder_class+ for casting a PostgreSQL type. @@ -193,6 +194,7 @@ def register_coder(coder) def register_type(format, name, encoder_class, decoder_class) register_coder(encoder_class.new(name: name, format: format)) if encoder_class register_coder(decoder_class.new(name: name, format: format)) if decoder_class + self end # Alias the +old+ type to the +new+ type. @@ -205,10 +207,11 @@ def alias_type(format, new, old) @coders_by_name[format][ende].delete(new) end end + self end # Populate the registry with all builtin types of ruby-pg - def define_default_types + def register_default_types register_type 0, 'int2', PG::TextEncoder::Integer, PG::TextDecoder::Integer alias_type 0, 'int4', 'int2' alias_type 0, 'int8', 'int2' @@ -281,8 +284,10 @@ def define_default_types self end + alias define_default_types register_default_types + # @private - DEFAULT_TYPE_REGISTRY = PG::BasicTypeRegistry.new.define_default_types + DEFAULT_TYPE_REGISTRY = PG::BasicTypeRegistry.new.register_default_types # Delegate class method calls to DEFAULT_TYPE_REGISTRY class << self diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 37031b908..39db3502a 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -46,37 +46,6 @@ def self.connect_hash_to_string( hash ) hash.map { |k,v| "#{k}=#{quote_connstr(v)}" }.join( ' ' ) end - # Decode a connection string to Hash options - # - # Value are properly unquoted and unescaped. - def self.connect_string_to_hash( str ) - options = {} - key = nil - value = String.new - str.scan(/\G\s*(?>([^\s\\\']+)\s*=\s*|([^\s\\\']+)|'((?:[^\'\\]|\\.)*)'|(\\.?)|(\S))(\s|\z)?/m) do - |k, word, sq, esc, garbage, sep| - raise ArgumentError, "unterminated quoted string in connection info string: #{str.inspect}" if garbage - if k - key = k - else - value << (word || (sq || esc).gsub(/\\(.)/, '\\1')) - end - if sep - raise ArgumentError, "missing = after #{value.inspect}" unless key - options[key.to_sym] = value - key = nil - value = String.new - end - end - options - end - - # URI defined in RFC3986 - # This regexp is modified to allow host to specify multiple comma separated components captured as and to disallow comma in hostnames. - # Taken from: https://github.com/ruby/ruby/blob/be04006c7d2f9aeb7e9d8d09d945b3a9c7850202/lib/uri/rfc3986_parser.rb#L6 - HOST_AND_PORT = /(?(?(?\[(?:(?(?:\h{1,4}:){6}(?\h{1,4}:\h{1,4}|(?(?[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]|\d)\.\g\.\g\.\g))|::(?:\h{1,4}:){5}\g|\h{1,4}?::(?:\h{1,4}:){4}\g|(?:(?:\h{1,4}:)?\h{1,4})?::(?:\h{1,4}:){3}\g|(?:(?:\h{1,4}:){,2}\h{1,4})?::(?:\h{1,4}:){2}\g|(?:(?:\h{1,4}:){,3}\h{1,4})?::\h{1,4}:\g|(?:(?:\h{1,4}:){,4}\h{1,4})?::\g|(?:(?:\h{1,4}:){,5}\h{1,4})?::\h{1,4}|(?:(?:\h{1,4}:){,6}\h{1,4})?::)|(?v\h+\.[!$&-.0-;=A-Z_a-z~]+))\])|\g|(?(?:%\h\h|[-\.!$&-+0-9;=A-Z_a-z~])+))?(?::(?\d*))?)/ - POSTGRESQL_URI = /\A(?(?[A-Za-z][+\-.0-9A-Za-z]*):(?\/\/(?(?:(?(?:%\h\h|[!$&-.0-;=A-Z_a-z~])*)@)?(?#{HOST_AND_PORT}(?:,\g)*))(?(?:\/(?(?:%\h\h|[!$&-.0-;=@-Z_a-z~])*))*)|(?\/(?:(?(?:%\h\h|[!$&-.0-;=@-Z_a-z~])+)(?:\/\g)*)?)|(?\g(?:\/\g)*)|(?))(?:\?(?[^#]*))?(?:\#(?(?:%\h\h|[!$&-.0-;=@-Z_a-z~\/?])*))?)\z/ - # Parse the connection +args+ into a connection-parameter string. # See PG::Connection.new for valid arguments. # @@ -87,91 +56,64 @@ def self.connect_string_to_hash( str ) # * URI object # * positional arguments # - # The method adds the option "hostaddr" and "fallback_application_name" if they aren't already set. - # The URI and the options string is passed through and "hostaddr" as well as "fallback_application_name" - # are added to the end. - def self::parse_connect_args( *args ) + # The method adds the option "fallback_application_name" if it isn't already set. + # It returns a connection string with "key=value" pairs. + def self.parse_connect_args( *args ) hash_arg = args.last.is_a?( Hash ) ? args.pop.transform_keys(&:to_sym) : {} - option_string = "" iopts = {} if args.length == 1 case args.first - when URI, POSTGRESQL_URI - uri = args.first.to_s - uri_match = POSTGRESQL_URI.match(uri) - if uri_match['query'] - iopts = URI.decode_www_form(uri_match['query']).to_h.transform_keys(&:to_sym) - end - # extract "host1,host2" from "host1:5432,host2:5432" - iopts[:host] = uri_match['hostports'].split(',', -1).map do |hostport| - hostmatch = HOST_AND_PORT.match(hostport) - hostmatch['IPv6address'] || hostmatch['IPv4address'] || hostmatch['reg-name']&.gsub(/%(\h\h)/){ $1.hex.chr } - end.join(',') - oopts = {} - when /=/ - # Option string style - option_string = args.first.to_s - iopts = connect_string_to_hash(option_string) - oopts = {} + when URI, /=/, /:\/\// + # Option or URL string style + conn_string = args.first.to_s + iopts = PG::Connection.conninfo_parse(conn_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } else # Positional parameters (only host given) iopts[CONNECT_ARGUMENT_ORDER.first.to_sym] = args.first - oopts = iopts.dup end else - # Positional parameters + # Positional parameters with host and more max = CONNECT_ARGUMENT_ORDER.length raise ArgumentError, - "Extra positional parameter %d: %p" % [ max + 1, args[max] ] if args.length > max + "Extra positional parameter %d: %p" % [ max + 1, args[max] ] if args.length > max CONNECT_ARGUMENT_ORDER.zip( args ) do |(k,v)| iopts[ k.to_sym ] = v if v end iopts.delete(:tty) # ignore obsolete tty parameter - oopts = iopts.dup end iopts.merge!( hash_arg ) - oopts.merge!( hash_arg ) - - # Resolve DNS in Ruby to avoid blocking state while connecting, when it ... - if (host=iopts[:host]) && !iopts[:hostaddr] - hostaddrs = host.split(",", -1).map do |mhost| - if !mhost.empty? && !mhost.start_with?("/") && # isn't UnixSocket - # isn't a path on Windows - (RUBY_PLATFORM !~ /mingw|mswin/ || mhost !~ /\A\w:[\/\\]/) - - if Fiber.respond_to?(:scheduler) && - Fiber.scheduler && - RUBY_VERSION < '3.1.' - - # Use a second thread to avoid blocking of the scheduler. - # `IPSocket.getaddress` isn't fiber aware before ruby-3.1. - Thread.new{ IPSocket.getaddress(mhost) rescue '' }.value - else - IPSocket.getaddress(mhost) rescue '' - end - end - end - oopts[:hostaddr] = hostaddrs.join(",") if hostaddrs.any? - end if !iopts[:fallback_application_name] - oopts[:fallback_application_name] = $0.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 } + iopts[:fallback_application_name] = $0.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 } end - if uri - uri += uri_match['query'] ? "&" : "?" - uri += URI.encode_www_form( oopts ) - return uri + return connect_hash_to_string(iopts) + end + + # Return a String representation of the object suitable for debugging. + def inspect + str = self.to_s + str[-1,0] = if finished? + " finished" else - option_string += ' ' unless option_string.empty? && oopts.empty? - return option_string + connect_hash_to_string(oopts) + stats = [] + stats << " status=#{ PG.constants.grep(/CONNECTION_/).find{|c| PG.const_get(c) == status} }" if status != CONNECTION_OK + stats << " transaction_status=#{ PG.constants.grep(/PQTRANS_/).find{|c| PG.const_get(c) == transaction_status} }" if transaction_status != PG::PQTRANS_IDLE + stats << " nonblocking=#{ isnonblocking }" if isnonblocking + stats << " pipeline_status=#{ PG.constants.grep(/PQ_PIPELINE_/).find{|c| PG.const_get(c) == pipeline_status} }" if respond_to?(:pipeline_status) && pipeline_status != PG::PQ_PIPELINE_OFF + stats << " client_encoding=#{ get_client_encoding }" if get_client_encoding != "UTF8" + stats << " type_map_for_results=#{ type_map_for_results.to_s }" unless type_map_for_results.is_a?(PG::TypeMapAllStrings) + stats << " type_map_for_queries=#{ type_map_for_queries.to_s }" unless type_map_for_queries.is_a?(PG::TypeMapAllStrings) + stats << " encoder_for_put_copy_data=#{ encoder_for_put_copy_data.to_s }" if encoder_for_put_copy_data + stats << " decoder_for_get_copy_data=#{ decoder_for_get_copy_data.to_s }" if decoder_for_get_copy_data + " host=#{host} port=#{port} user=#{user}#{stats.join}" end + return str end - # call-seq: # conn.copy_data( sql [, coder] ) {|sql_result| ... } -> PG::Result # @@ -241,7 +183,7 @@ def self::parse_connect_args( *args ) # ["more", "data", "to", "copy"] def copy_data( sql, coder=nil ) - raise PG::NotInBlockingMode, "copy_data can not be used in nonblocking mode" if nonblocking? + raise PG::NotInBlockingMode.new("copy_data can not be used in nonblocking mode", connection: self) if nonblocking? res = exec( sql ) case res.result_status @@ -273,11 +215,15 @@ def copy_data( sql, coder=nil ) yield res rescue Exception => err cancel - while get_copy_data + begin + while get_copy_data + end + rescue PG::Error + # Ignore error in cleanup to avoid losing original exception end while get_result end - raise + raise err else res = get_last_result if !res || res.result_status != PGRES_COMMAND_OK @@ -285,7 +231,7 @@ def copy_data( sql, coder=nil ) end while get_result end - raise PG::NotAllCopyDataRetrieved, "Not all COPY data retrieved" + raise PG::NotAllCopyDataRetrieved.new("Not all COPY data retrieved", connection: self) end res ensure @@ -310,16 +256,17 @@ class << self # and a +COMMIT+ at the end of the block, or # +ROLLBACK+ if any exception occurs. def transaction + rollback = false exec "BEGIN" - res = yield(self) + yield(self) rescue Exception + rollback = true cancel if transaction_status == PG::PQTRANS_ACTIVE block exec "ROLLBACK" raise - else - exec "COMMIT" - res + ensure + exec "COMMIT" unless rollback end ### Returns an array of Hashes with connection defaults. See ::conndefaults @@ -482,10 +429,20 @@ def isnonblocking # See also #copy_data. # def put_copy_data(buffer, encoder=nil) - until sync_put_copy_data(buffer, encoder) - flush + # sync_put_copy_data does a non-blocking attept to flush data. + until res=sync_put_copy_data(buffer, encoder) + # It didn't flush immediately and allocation of more buffering memory failed. + # Wait for all data sent by doing a blocking flush. + res = flush end - flush + + # And do a blocking flush every 100 calls. + # This is to avoid memory bloat, when sending the data is slower than calls to put_copy_data happen. + if (@calls_to_put_copy_data += 1) > 100 + @calls_to_put_copy_data = 0 + res = flush + end + res end alias async_put_copy_data put_copy_data @@ -505,6 +462,7 @@ def put_copy_end(*args) until sync_put_copy_end(*args) flush end + @calls_to_put_copy_data = 0 flush end alias async_put_copy_end put_copy_end @@ -545,6 +503,7 @@ def encrypt_password( password, username, algorithm=nil ) def reset reset_start async_connect_or_reset(:reset_poll) + self end alias async_reset reset @@ -612,37 +571,72 @@ def cancel alias async_cancel cancel private def async_connect_or_reset(poll_meth) - # Now grab a reference to the underlying socket so we know when the connection is established - socket = socket_io - # Track the progress of the connection, waiting for the socket to become readable/writable before polling it + + if (timeo = conninfo_hash[:connect_timeout].to_i) && timeo > 0 + # Lowest timeout is 2 seconds - like in libpq + timeo = [timeo, 2].max + host_count = conninfo_hash[:host].to_s.count(",") + 1 + stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + poll_status = PG::PGRES_POLLING_WRITING until poll_status == PG::PGRES_POLLING_OK || poll_status == PG::PGRES_POLLING_FAILED - # If the socket needs to read, wait 'til it becomes readable to poll again - case poll_status - when PG::PGRES_POLLING_READING - socket.wait_readable + # Set single timeout to parameter "connect_timeout" but + # don't exceed total connection time of number-of-hosts * connect_timeout. + timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time + event = if !timeout || timeout >= 0 + # If the socket needs to read, wait 'til it becomes readable to poll again + case poll_status + when PG::PGRES_POLLING_READING + if defined?(IO::READABLE) # ruby-3.0+ + socket_io.wait(IO::READABLE | IO::PRIORITY, timeout) + else + IO.select([socket_io], nil, [socket_io], timeout) + end - # ...and the same for when the socket needs to write - when PG::PGRES_POLLING_WRITING - socket.wait_writable + # ...and the same for when the socket needs to write + when PG::PGRES_POLLING_WRITING + if defined?(IO::WRITABLE) # ruby-3.0+ + # Use wait instead of wait_readable, since connection errors are delivered as + # exceptional/priority events on Windows. + socket_io.wait(IO::WRITABLE | IO::PRIORITY, timeout) + else + # io#wait on ruby-2.x doesn't wait for priority, so fallback to IO.select + IO.select(nil, [socket_io], [socket_io], timeout) + end + end + end + # connection to server at "localhost" (127.0.0.1), port 5433 failed: timeout expired (PG::ConnectionBad) + # connection to server on socket "/var/run/postgresql/.s.PGSQL.5433" failed: No such file or directory + unless event + if self.class.send(:host_is_named_pipe?, host) + connhost = "on socket \"#{host}\"" + elsif respond_to?(:hostaddr) + connhost = "at \"#{host}\" (#{hostaddr}), port #{port}" + else + connhost = "at \"#{host}\", port #{port}" + end + raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) end # Check to see if it's finished or failed yet poll_status = send( poll_meth ) end - raise(PG::ConnectionBad, error_message) unless status == PG::CONNECTION_OK + unless status == PG::CONNECTION_OK + msg = error_message + finish + raise PG::ConnectionBad.new(msg, connection: self) + end # Set connection to nonblocking to handle all blocking states in ruby. # That way a fiber scheduler is able to handle IO requests. sync_setnonblocking(true) self.flush_data = true set_default_encoding - - self end class << self @@ -697,13 +691,17 @@ class << self # connection will have its +client_encoding+ set accordingly. # # Raises a PG::Error if the connection fails. - def new(*args, **kwargs) - conn = self.connect_start(*args, **kwargs ) or - raise(PG::Error, "Unable to create a new connection") - - raise(PG::ConnectionBad, conn.error_message) if conn.status == PG::CONNECTION_BAD - - conn.send(:async_connect_or_reset, :connect_poll) + def new(*args) + conn = connect_to_hosts(*args) + + if block_given? + begin + return yield conn + ensure + conn.finish + end + end + conn end alias async_connect new alias connect new @@ -711,6 +709,65 @@ def new(*args, **kwargs) alias setdb new alias setdblogin new + private def connect_to_hosts(*args) + option_string = parse_connect_args(*args) + iopts = PG::Connection.conninfo_parse(option_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } + iopts = PG::Connection.conndefaults.each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }.merge(iopts) + + if iopts[:hostaddr] + # hostaddr is provided -> no need to resolve hostnames + + elsif iopts[:host] && !iopts[:host].empty? && PG.library_version >= 100000 + # Resolve DNS in Ruby to avoid blocking state while connecting. + # Multiple comma-separated values are generated, if the hostname resolves to both IPv4 and IPv6 addresses. + # This requires PostgreSQL-10+, so no DNS resolving is done on earlier versions. + ihosts = iopts[:host].split(",", -1) + iports = iopts[:port].split(",", -1) + iports = [nil] if iports.size == 0 + iports = iports * ihosts.size if iports.size == 1 + raise PG::ConnectionBad, "could not match #{iports.size} port numbers to #{ihosts.size} hosts" if iports.size != ihosts.size + + dests = ihosts.each_with_index.flat_map do |mhost, idx| + unless host_is_named_pipe?(mhost) + if Fiber.respond_to?(:scheduler) && + Fiber.scheduler && + RUBY_VERSION < '3.1.' + + # Use a second thread to avoid blocking of the scheduler. + # `TCPSocket.gethostbyname` isn't fiber aware before ruby-3.1. + hostaddrs = Thread.new{ Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue [''] }.value + else + hostaddrs = Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue [''] + end + else + # No hostname to resolve (UnixSocket) + hostaddrs = [nil] + end + hostaddrs.map { |hostaddr| [hostaddr, mhost, iports[idx]] } + end + iopts.merge!( + hostaddr: dests.map{|d| d[0] }.join(","), + host: dests.map{|d| d[1] }.join(","), + port: dests.map{|d| d[2] }.join(",")) + else + # No host given + end + conn = self.connect_start(iopts) or + raise(PG::Error, "Unable to create a new connection") + + raise PG::ConnectionBad, conn.error_message if conn.status == PG::CONNECTION_BAD + + conn.send(:async_connect_or_reset, :connect_poll) + conn + end + + private def host_is_named_pipe?(host_string) + host_string.empty? || host_string.start_with?("/") || # it's UnixSocket? + host_string.start_with?("@") || # it's UnixSocket in the abstract namespace? + # it's a path on Windows? + (RUBY_PLATFORM =~ /mingw|mswin/ && host_string =~ /\A([\/\\]|\w:[\/\\])/) + end + # call-seq: # PG::Connection.ping(connection_hash) -> Integer # PG::Connection.ping(connection_string) -> Integer diff --git a/lib/pg/exceptions.rb b/lib/pg/exceptions.rb index 9347385e2..8940ce7c3 100644 --- a/lib/pg/exceptions.rb +++ b/lib/pg/exceptions.rb @@ -6,7 +6,13 @@ module PG - class Error < StandardError; end + class Error < StandardError + def initialize(msg=nil, connection: nil, result: nil) + @connection = connection + @result = result + super(msg) + end + end end # module PG diff --git a/lib/pg/version.rb b/lib/pg/version.rb index b667309be..94ba433de 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,4 +1,4 @@ module PG # Library version - VERSION = '1.3.2' + VERSION = '1.4.5' end diff --git a/misc/openssl-pg-segfault.rb b/misc/openssl-pg-segfault.rb old mode 100755 new mode 100644 diff --git a/rakelib/task_extension.rb b/rakelib/task_extension.rb new file mode 100644 index 000000000..09534c700 --- /dev/null +++ b/rakelib/task_extension.rb @@ -0,0 +1,46 @@ +# This source code is borrowed from: +# https://github.com/oneclick/rubyinstaller2/blob/b3dcbf69f131e44c78ea3a1c5e0041c223f266ce/lib/ruby_installer/build/utils.rb#L104-L144 + +module TaskExtension + # Extend rake's file task to be defined only once and to check the expected file is indeed generated + # + # The same as #task, but for #file. + # In addition this file task raises an error, if the file that is expected to be generated is not present after the block was executed. + def file(name, *args, &block) + task_once(name, block) do + super(name, *args) do |ta| + block.call(ta).tap do + raise "file #{ta.name} is missing after task executed" unless File.exist?(ta.name) + end + end + end + end + + # Extend rake's task definition to be defined only once, even if called several times + # + # This allows to define common tasks next to specific tasks. + # It is expected that any variation of the task's block is reflected in the task name or namespace. + # If the task name is identical, the task block is executed only once, even if the file task definition is executed twice. + def task(name, *args, &block) + task_once(name, block) do + super + end + end + + private def task_once(name, block) + name = name.keys.first if name.is_a?(Hash) + if block && + Rake::Task.task_defined?(name) && + Rake::Task[name].instance_variable_get('@task_block_location') == block.source_location + # task is already defined for this target and the same block + # So skip double definition of the same action + Rake::Task[name] + elsif block + yield.tap do + Rake::Task[name].instance_variable_set('@task_block_location', block.source_location) + end + else + yield + end + end +end diff --git a/sample/array_insert.rb b/sample/array_insert.rb old mode 100755 new mode 100644 diff --git a/sample/async_api.rb b/sample/async_api.rb old mode 100755 new mode 100644 index 983bfeb59..364330a61 --- a/sample/async_api.rb +++ b/sample/async_api.rb @@ -27,10 +27,6 @@ def output_progress( msg ) abort "Connection failed: %s" % [ conn.error_message ] if conn.status == PG::CONNECTION_BAD -# Now grab a reference to the underlying socket so we know when the -# connection is established -socket = conn.socket_io - # Track the progress of the connection, waiting for the socket to become readable/writable # before polling it poll_status = PG::PGRES_POLLING_WRITING @@ -41,13 +37,13 @@ def output_progress( msg ) case poll_status when PG::PGRES_POLLING_READING output_progress " waiting for socket to become readable" - select( [socket], nil, nil, TIMEOUT ) or + select( [conn.socket_io], nil, nil, TIMEOUT ) or raise "Asynchronous connection timed out!" # ...and the same for when the socket needs to write when PG::PGRES_POLLING_WRITING output_progress " waiting for socket to become writable" - select( nil, [socket], nil, TIMEOUT ) or + select( nil, [conn.socket_io], nil, TIMEOUT ) or raise "Asynchronous connection timed out!" end @@ -85,7 +81,7 @@ def output_progress( msg ) # Buffer any incoming data on the socket until a full result is ready. conn.consume_input while conn.is_busy - select( [socket], nil, nil, TIMEOUT ) or + select( [conn.socket_io], nil, nil, TIMEOUT ) or raise "Timeout waiting for query response." conn.consume_input end diff --git a/sample/async_copyto.rb b/sample/async_copyto.rb old mode 100755 new mode 100644 diff --git a/sample/async_mixed.rb b/sample/async_mixed.rb old mode 100755 new mode 100644 diff --git a/sample/check_conn.rb b/sample/check_conn.rb old mode 100755 new mode 100644 diff --git a/sample/copydata.rb b/sample/copydata.rb old mode 100755 new mode 100644 diff --git a/sample/copyfrom.rb b/sample/copyfrom.rb old mode 100755 new mode 100644 diff --git a/sample/copyto.rb b/sample/copyto.rb old mode 100755 new mode 100644 diff --git a/sample/cursor.rb b/sample/cursor.rb old mode 100755 new mode 100644 diff --git a/sample/disk_usage_report.rb b/sample/disk_usage_report.rb old mode 100755 new mode 100644 diff --git a/sample/issue-119.rb b/sample/issue-119.rb old mode 100755 new mode 100644 diff --git a/sample/losample.rb b/sample/losample.rb old mode 100755 new mode 100644 diff --git a/sample/minimal-testcase.rb b/sample/minimal-testcase.rb old mode 100755 new mode 100644 diff --git a/sample/notify_wait.rb b/sample/notify_wait.rb old mode 100755 new mode 100644 diff --git a/sample/pg_statistics.rb b/sample/pg_statistics.rb old mode 100755 new mode 100644 diff --git a/sample/replication_monitor.rb b/sample/replication_monitor.rb old mode 100755 new mode 100644 diff --git a/sample/test_binary_values.rb b/sample/test_binary_values.rb old mode 100755 new mode 100644 diff --git a/sample/wal_shipper.rb b/sample/wal_shipper.rb old mode 100755 new mode 100644 diff --git a/sample/warehouse_partitions.rb b/sample/warehouse_partitions.rb old mode 100755 new mode 100644 diff --git a/spec/helpers.rb b/spec/helpers.rb index 602c8468e..927a210ca 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -4,10 +4,12 @@ require 'rspec' require 'shellwords' require 'pg' +require 'openssl' require_relative 'helpers/scheduler.rb' require_relative 'helpers/tcp_gate_scheduler.rb' +require_relative 'helpers/tcp_gate_switcher.rb' -DEFAULT_TEST_DIR_STR = File.join(Dir.pwd, "tmp_test_specs") +DEFAULT_TEST_DIR_STR = Dir.pwd TEST_DIR_STR = ENV['RUBY_PG_TEST_DIR'] || DEFAULT_TEST_DIR_STR TEST_DIRECTORY = Pathname.new(TEST_DIR_STR) DATA_OBJ_MEMSIZE = 40 @@ -21,11 +23,30 @@ def self::included( mod ) if mod.respond_to?( :around ) mod.before( :all ) do - @conn = connect_testing_db + @port = $pg_server.port + @conninfo = $pg_server.conninfo + @unix_socket = $pg_server.unix_socket + @conn = $pg_server.connect + + # Find a local port that is not in use + @port_down = @port + 10 + loop do + @port_down = @port_down + 1 + begin + TCPSocket.new("::1", @port_down) + rescue SystemCallError + begin + TCPSocket.new("127.0.0.1", @port_down) + rescue SystemCallError + break + end + end + end end mod.around( :each ) do |example| begin + @conn.set_client_encoding "UTF8" @conn.set_default_encoding @conn.exec( 'BEGIN' ) unless example.metadata[:without_transaction] desc = example.source_location.join(':') @@ -44,6 +65,11 @@ def self::included( mod ) end @conn.exit_pipeline_mode end + @conn.setnonblocking false + @conn.type_map_for_results = PG::TypeMapAllStrings.new + @conn.type_map_for_queries = PG::TypeMapAllStrings.new + @conn.encoder_for_put_copy_data = nil + @conn.decoder_for_get_copy_data = nil @conn.exec( 'ROLLBACK' ) unless example.metadata[:without_transaction] end end @@ -91,96 +117,99 @@ def self::included( mod ) module_function ############### - ### Create a string that contains the ANSI codes specified and return it - def ansi_code( *attributes ) - attributes.flatten! - attributes.collect! {|at| at.to_s } + module Loggable + ### Create a string that contains the ANSI codes specified and return it + def ansi_code( *attributes ) + attributes.flatten! + attributes.collect! {|at| at.to_s } - return '' unless /(?:vt10[03]|xterm(?:-color)?|linux|screen)/i =~ ENV['TERM'] - attributes = ANSI_ATTRIBUTES.values_at( *attributes ).compact.join(';') + return '' unless /(?:vt10[03]|xterm(?:-color)?|linux|screen)/i =~ ENV['TERM'] + attributes = ANSI_ATTRIBUTES.values_at( *attributes ).compact.join(';') - # $stderr.puts " attr is: %p" % [attributes] - if attributes.empty? - return '' - else - return "\e[%sm" % attributes + # $stderr.puts " attr is: %p" % [attributes] + if attributes.empty? + return '' + else + return "\e[%sm" % attributes + end end - end - ### Colorize the given +string+ with the specified +attributes+ and return it, handling - ### line-endings, color reset, etc. - def colorize( *args ) - string = '' + ### Colorize the given +string+ with the specified +attributes+ and return it, handling + ### line-endings, color reset, etc. + def colorize( *args ) + string = '' - if block_given? - string = yield - else - string = args.shift - end + if block_given? + string = yield + else + string = args.shift + end - ending = string[/(\s)$/] || '' - string = string.rstrip + ending = string[/(\s)$/] || '' + string = string.rstrip - return ansi_code( args.flatten ) + string + ansi_code( 'reset' ) + ending - end + return ansi_code( args.flatten ) + string + ansi_code( 'reset' ) + ending + end - ### Output a message with highlighting. - def message( *msg ) - $stderr.puts( colorize(:bold) { msg.flatten.join(' ') } ) - end + ### Output a message with highlighting. + def message( *msg ) + $stderr.puts( colorize(:bold) { msg.flatten.join(' ') } ) + end - ### Output a logging message if $VERBOSE is true - def trace( *msg ) - return unless $VERBOSE - output = colorize( msg.flatten.join(' '), 'yellow' ) - $stderr.puts( output ) - end + ### Output a logging message if $VERBOSE is true + def trace( *msg ) + return unless $VERBOSE + output = colorize( msg.flatten.join(' '), 'yellow' ) + $stderr.puts( output ) + end - ### Return the specified args as a string, quoting any that have a space. - def quotelist( *args ) - return args.flatten.collect {|part| part.to_s =~ /\s/ ? part.to_s.inspect : part.to_s } - end + ### Return the specified args as a string, quoting any that have a space. + def quotelist( *args ) + return args.flatten.collect {|part| part.to_s =~ /\s/ ? part.to_s.inspect : part.to_s } + end - ### Run the specified command +cmd+ with system(), failing if the execution - ### fails. - def run( *cmd ) - cmd.flatten! + ### Run the specified command +cmd+ with system(), failing if the execution + ### fails. + def run( *cmd ) + cmd.flatten! - if cmd.length > 1 - trace( quotelist(*cmd) ) - else - trace( cmd ) - end + if cmd.length > 1 + trace( quotelist(*cmd) ) + else + trace( cmd ) + end - system( *cmd ) - raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? - end + system( *cmd ) + raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? + end - ### Run the specified command +cmd+ after redirecting stdout and stderr to the specified - ### +logpath+, failing if the execution fails. - def log_and_run( logpath, *cmd ) - cmd.flatten! + ### Run the specified command +cmd+ after redirecting stdout and stderr to the specified + ### +logpath+, failing if the execution fails. + def log_and_run( logpath, *cmd ) + cmd.flatten! - if cmd.length > 1 - trace( quotelist(*cmd) ) - else - trace( cmd ) - end + if cmd.length > 1 + trace( quotelist(*cmd) ) + else + trace( cmd ) + end - # Eliminate the noise of creating/tearing down the database by - # redirecting STDERR/STDOUT to a logfile - logfh = File.open( logpath, File::WRONLY|File::CREAT|File::APPEND ) - system( *cmd, [STDOUT, STDERR] => logfh ) + # Eliminate the noise of creating/tearing down the database by + # redirecting STDERR/STDOUT to a logfile + logfh = File.open( logpath, File::WRONLY|File::CREAT|File::APPEND ) + system( *cmd, [STDOUT, STDERR] => logfh ) - raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? + raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? + end end + extend Loggable ### Check the current directory for directories that look like they're ### testing directories from previous tests, and tell any postgres instances @@ -208,68 +237,225 @@ def stop_existing_postmasters end end - def define_testing_conninfo - ENV['PGPORT'] ||= "54321" - @port = ENV['PGPORT'].to_i - ENV['PGHOST'] = 'localhost' - @conninfo = "host=localhost port=#{@port} dbname=test" - @unix_socket = TEST_DIRECTORY.to_s - end - - ### Set up a PostgreSQL database instance for testing. - def setup_testing_db( description ) - stop_existing_postmasters() + class PostgresServer + include Loggable + + attr_reader :port + attr_reader :conninfo + attr_reader :unix_socket + + ### Set up a PostgreSQL database instance for testing. + def initialize( name, port: 54321, postgresql_conf: '' ) + trace "Setting up test database for #{name}" + @name = name + @port = port + @test_dir = TEST_DIRECTORY + "tmp_test_#{@name}" + @test_pgdata = @test_dir + 'data' + @test_pgdata.mkpath + + @logfile = @test_dir + 'setup.log' + trace "Command output logged to #{@logfile}" + + begin + unless (@test_pgdata+"postgresql.conf").exist? + FileUtils.rm_rf( @test_pgdata, :verbose => $DEBUG ) + trace "Running initdb" + log_and_run @logfile, 'initdb', '-E', 'UTF8', '--no-locale', '-D', @test_pgdata.to_s + end - trace "Setting up test database for #{description}" - @test_pgdata = TEST_DIRECTORY + 'data' - @test_pgdata.mkpath + unless (@test_pgdata+"ruby-pg-server-cert").exist? + trace "Enable SSL" + # Enable SSL in server config + File.open(@test_pgdata+"postgresql.conf", "a+") do |fd| + fd.puts <<-EOT +ssl = on +ssl_ca_file = 'ruby-pg-ca-cert' +ssl_cert_file = 'ruby-pg-server-cert' +ssl_key_file = 'ruby-pg-server-key' +#{postgresql_conf} +EOT + end - define_testing_conninfo + # Enable MD5 authentication in hba config + hba_content = File.read(@test_pgdata+"pg_hba.conf") + File.open(@test_pgdata+"pg_hba.conf", "w") do |fd| + fd.puts <<-EOT +# TYPE DATABASE USER ADDRESS METHOD +host all testusermd5 ::1/128 md5 +EOT + fd.puts hba_content + end - @logfile = TEST_DIRECTORY + 'setup.log' - trace "Command output logged to #{@logfile}" + trace "Generate certificates" + generate_ssl_certs(@test_pgdata.to_s) + end - begin - unless (@test_pgdata+"postgresql.conf").exist? - FileUtils.rm_rf( @test_pgdata, :verbose => $DEBUG ) - trace "Running initdb" - log_and_run @logfile, 'initdb', '-E', 'UTF8', '--no-locale', '-D', @test_pgdata.to_s + trace "Starting postgres" + sopt = "-p #{@port}" + sopt += " -k #{@test_dir.to_s.dump}" unless RUBY_PLATFORM=~/mingw|mswin/i + log_and_run @logfile, 'pg_ctl', '-w', '-o', sopt, + '-D', @test_pgdata.to_s, 'start' + sleep 2 + + td = @test_pgdata + @conninfo = "host=localhost port=#{@port} dbname=test sslrootcert=#{td + 'ruby-pg-ca-cert'} sslcert=#{td + 'ruby-pg-client-cert'} sslkey=#{td + 'ruby-pg-client-key'}" + @unix_socket = @test_dir.to_s + rescue => err + $stderr.puts "%p during test setup: %s" % [ err.class, err.message ] + $stderr.puts "See #{@logfile} for details." + $stderr.puts err.backtrace if $DEBUG + fail end + end + + def generate_ssl_certs(output_dir) + gen = CertGenerator.new(output_dir) - trace "Starting postgres" - unix_socket = ['-o', "-k #{TEST_DIRECTORY.to_s.dump}"] unless RUBY_PLATFORM=~/mingw|mswin/i - log_and_run @logfile, 'pg_ctl', '-w', *unix_socket, - '-D', @test_pgdata.to_s, 'start' - sleep 2 + trace "create ca-key" + ca_key = gen.create_key('ruby-pg-ca-key') + ca_cert = gen.create_ca_cert('ruby-pg-ca-cert', ca_key, '/CN=ruby-pg root key') + trace "create server cert" + key = gen.create_key('ruby-pg-server-key') + csr = gen.create_signing_request('ruby-pg-server-csr', '/CN=localhost', key) + gen.create_cert_from_csr('ruby-pg-server-cert', csr, ca_cert, ca_key, dns_names: %w[localhost] ) + + trace "create client cert" + key = gen.create_key('ruby-pg-client-key') + csr = gen.create_signing_request('ruby-pg-client-csr', '/CN=ruby-pg client', key) + gen.create_cert_from_csr('ruby-pg-client-cert', csr, ca_cert, ca_key) + end + + def create_test_db trace "Creating the test DB" - log_and_run @logfile, 'psql', '-e', '-c', 'DROP DATABASE IF EXISTS test', 'postgres' - log_and_run @logfile, 'createdb', '-e', 'test' + log_and_run @logfile, 'psql', '-p', @port.to_s, '-e', '-c', 'DROP DATABASE IF EXISTS test', 'postgres' + log_and_run @logfile, 'createdb', '-p', @port.to_s, '-e', 'test' + end - rescue => err - $stderr.puts "%p during test setup: %s" % [ err.class, err.message ] - $stderr.puts "See #{@logfile} for details." - $stderr.puts err.backtrace if $DEBUG - fail + def connect + conn = PG.connect( @conninfo ) + conn.set_notice_processor do |message| + $stderr.puts( @name + ':' + message ) if $DEBUG + end + + return conn + end + + def teardown + trace "Tearing down test database for #{@name}" + + log_and_run @logfile, 'pg_ctl', '-D', @test_pgdata.to_s, 'stop' end end - def connect_testing_db - define_testing_conninfo - conn = PG.connect( @conninfo ) - conn.set_notice_processor do |message| - $stderr.puts( description + ':' + message ) if $DEBUG + class CertGenerator + attr_reader :output_dir + + def initialize(output_dir='.') + @output_dir = output_dir + @serial = Time.now.to_i end - return conn - end + def next_serial + @serial += 1 + end - def teardown_testing_db - trace "Tearing down test database" + def create_ca_cert(name, ca_key, x509_name, valid_years: 10) + ca_key = OpenSSL::PKey::RSA.new File.read "#{ca_key}" unless ca_key.kind_of?(OpenSSL::PKey::RSA) + ca_name = OpenSSL::X509::Name.parse x509_name - log_and_run @logfile, 'pg_ctl', '-D', @test_pgdata.to_s, 'stop' - end + ca_cert = OpenSSL::X509::Certificate.new + ca_cert.serial = next_serial + ca_cert.version = 2 + ca_cert.not_before = Time.now + ca_cert.not_after = Time.now + valid_years*365*24*60*60 + + ca_cert.public_key = ca_key.public_key + ca_cert.subject = ca_name + ca_cert.issuer = ca_name + + extension_factory = OpenSSL::X509::ExtensionFactory.new + extension_factory.subject_certificate = ca_cert + extension_factory.issuer_certificate = ca_cert + ca_cert.add_extension extension_factory.create_extension('subjectKeyIdentifier', 'hash') + ca_cert.add_extension extension_factory.create_extension('basicConstraints', 'CA:TRUE', true) + ca_cert.add_extension extension_factory.create_extension('keyUsage', 'cRLSign,keyCertSign', true) + + ca_cert.sign ca_key, OpenSSL::Digest::SHA256.new + + File.open "#{output_dir}/#{name}", 'w' do |io| + io.puts ca_cert.to_text + io.write ca_cert.to_pem + end + ca_cert + end + + def create_key(name, rsa_size: 2048) + ca_key = OpenSSL::PKey::RSA.new rsa_size + + #cipher = OpenSSL::Cipher.new 'AES-128-CBC' + + File.open "#{output_dir}/#{name}", 'w', 0600 do |io| + io.puts ca_key.to_text + io.write ca_key.export # (cipher) + end + ca_key + end + + def create_signing_request(name, x509_name, key) + key = OpenSSL::PKey::RSA.new File.read "#{key}" unless key.kind_of?(OpenSSL::PKey::RSA) + csr = OpenSSL::X509::Request.new + csr.version = 0 + csr.subject = OpenSSL::X509::Name.parse x509_name + csr.public_key = key.public_key + csr.sign key, OpenSSL::Digest::SHA256.new + + File.open "#{output_dir}/#{name}", 'w' do |io| + io.puts csr.to_text + io.write csr.to_pem + end + csr + end + + def create_cert_from_csr(name, csr, ca_cert, ca_key, valid_years: 10, dns_names: nil) + ca_key = OpenSSL::PKey::RSA.new File.read "#{ca_key}" unless ca_key.kind_of?(OpenSSL::PKey::RSA) + ca_cert = OpenSSL::X509::Certificate.new File.read "#{ca_cert}" unless ca_cert.kind_of?(OpenSSL::X509::Certificate) + csr = OpenSSL::X509::Request.new File.read "#{csr}" unless csr.kind_of?(OpenSSL::X509::Request) + raise 'CSR can not be verified' unless csr.verify csr.public_key + + csr_cert = OpenSSL::X509::Certificate.new + csr_cert.serial = next_serial + csr_cert.version = 2 + csr_cert.not_before = Time.now + csr_cert.not_after = Time.now + valid_years*365*24*60*60 + + csr_cert.subject = csr.subject + csr_cert.public_key = csr.public_key + csr_cert.issuer = ca_cert.subject + + extension_factory = OpenSSL::X509::ExtensionFactory.new + extension_factory.subject_certificate = csr_cert + extension_factory.issuer_certificate = ca_cert + + csr_cert.add_extension extension_factory.create_extension('basicConstraints', 'CA:FALSE') + csr_cert.add_extension extension_factory.create_extension('keyUsage', 'keyEncipherment,dataEncipherment,digitalSignature') + csr_cert.add_extension extension_factory.create_extension('subjectKeyIdentifier', 'hash') + if dns_names + san = dns_names.map{|n| "DNS:#{n}" }.join(",") + csr_cert.add_extension extension_factory.create_extension('subjectAltName', san) + end + + csr_cert.sign ca_key, OpenSSL::Digest::SHA256.new + + open "#{output_dir}/#{name}", 'w' do |io| + io.puts csr_cert.to_text + io.write csr_cert.to_pem + end + + csr_cert + end + end def check_for_lingering_connections( conn ) conn.exec( "SELECT * FROM pg_stat_activity" ) do |res| @@ -379,12 +565,103 @@ def wait_for_query_result(conn) def wait_for_flush(conn) until conn.flush() # wait for the socket to become read- or write-ready - readable, writable = IO.select([conn.socket_io], [conn.socket_io]) + readable, _writable = IO.select([conn.socket_io], [conn.socket_io]) if readable.any? conn.consume_input end end end + + def scheduler_setup + # Run examples with gated scheduler + sched = Helpers::TcpGateScheduler.new(external_host: 'localhost', external_port: ENV['PGPORT'].to_i, debug: ENV['PG_DEBUG']=='1') + Fiber.set_scheduler(sched) + @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{sched.internal_port} sslmode=disable") + + # Run examples with default scheduler + #Fiber.set_scheduler(Helpers::Scheduler.new) + #@conninfo_gate = @conninfo + + # Run examples without scheduler + #def Fiber.schedule; yield; end + #@conninfo_gate = @conninfo + end + + def scheduler_teardown + Fiber.set_scheduler(nil) + end + + def scheduler_stop + if Fiber.scheduler && Fiber.scheduler.respond_to?(:finish) + Fiber.scheduler.finish + end + end + + def thread_with_timeout(timeout) + th = Thread.new do + yield + end + unless th.join(timeout) + th.kill + $scheduler_timeout = true + raise("scheduler timeout in:\n#{th.backtrace.join("\n")}") + end + end + + def run_with_scheduler(timeout=10) + thread_with_timeout(timeout) do + scheduler_setup + Fiber.schedule do + conn = PG.connect(@conninfo_gate) + + yield conn + + conn.finish + scheduler_stop + end + end + scheduler_teardown + end + + def gate_setup + # Run examples with gate + gate = Helpers::TcpGateSwitcher.new(external_host: 'localhost', external_port: ENV['PGPORT'].to_i, debug: ENV['PG_DEBUG']=='1') + @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{gate.internal_port} sslmode=disable") + + # Run examples without gate + #@conninfo_gate = @conninfo + gate + end + + def gate_stop(gate) + gate&.finish + end + + def run_with_gate(timeout=10) + thread_with_timeout(timeout) do + gate = gate_setup + conn = PG.connect(@conninfo_gate) + + yield conn, gate + + conn.finish + gate_stop(gate) + end + end + + # Define environment variables for the time of the given block + # + # All environment variables are restored to the original value or undefined after the block. + def with_env_vars(**kwargs) + kwargs = kwargs.map{|k,v| [k.to_s, v && v.to_s] }.to_h + old_values = kwargs.map{|k,_| [k, ENV[k]] }.to_h + ENV.update(kwargs) + begin + yield + ensure + ENV.update(old_values) + end + end end @@ -411,14 +688,21 @@ def wait_for_flush(conn) config.filter_run_excluding( :postgresql_12 ) if PG.library_version < 120000 config.filter_run_excluding( :postgresql_14 ) if PG.library_version < 140000 config.filter_run_excluding( :unix_socket ) if RUBY_PLATFORM=~/mingw|mswin/i - config.filter_run_excluding( :scheduler ) if RUBY_VERSION < "3.0" + config.filter_run_excluding( :scheduler ) if RUBY_VERSION < "3.0" || !Fiber.respond_to?(:scheduler) config.filter_run_excluding( :scheduler_address_resolve ) if RUBY_VERSION < "3.1" + config.filter_run_excluding( :ipv6 ) if Addrinfo.getaddrinfo("localhost", nil, nil, :STREAM).size < 2 ### Automatically set up and tear down the database config.before(:suite) do |*args| - PG::TestingHelpers.setup_testing_db("the spec suite") + PG::TestingHelpers.stop_existing_postmasters + + ENV['PGHOST'] = 'localhost' + ENV['PGPORT'] ||= "54321" + port = ENV['PGPORT'].to_i + $pg_server = PG::TestingHelpers::PostgresServer.new("specs", port: port) + $pg_server.create_test_db end config.after(:suite) do - PG::TestingHelpers.teardown_testing_db + $pg_server.teardown end end diff --git a/spec/helpers/tcp_gate_scheduler.rb b/spec/helpers/tcp_gate_scheduler.rb index 5e06e95e3..9d48f34dc 100644 --- a/spec/helpers/tcp_gate_scheduler.rb +++ b/spec/helpers/tcp_gate_scheduler.rb @@ -277,7 +277,7 @@ def io_wait(io, events, duration) # compare and store the fileno for debugging if conn.observed_fd && conn.observed_fd != io.fileno - raise "observed fd changed: old:#{conn.observed_fd} new:#{io.fileno}" + puts "observed fd changed: old:#{conn.observed_fd} new:#{io.fileno}" end conn.observed_fd = io.fileno diff --git a/spec/helpers/tcp_gate_switcher.rb b/spec/helpers/tcp_gate_switcher.rb new file mode 100644 index 000000000..954edad40 --- /dev/null +++ b/spec/helpers/tcp_gate_switcher.rb @@ -0,0 +1,190 @@ +# frozen_string_literal: true + +# This is a transparent TCP proxy for testing blocking behaviour in a time insensitive way. +# +# It works as a gate between the client and the server, which is enabled or disabled by the spec. +# Data transfer can be blocked per API. +# The TCP communication in a C extension can be verified in a (mostly) timing insensitive way. +# If a call does IO but doesn't handle non-blocking state, the test will block and can be caught by an external timeout. +# +# PG.connect +# port:5444 TcpGateSwitcher DB +# ------------- ---------------------------------------- -------- +# | non- | | TCPServer TCPSocket | | | +# | blocking |----->| port 5444 port 5432|----->|Server| +# | specs | | | | port | +# '------|----' |,--> stop_read : <-send data-- | | 5432 | +# '---------------> stop_write: --send data-> | '------' +# '--------------------------------------' + +module Helpers +class TcpGateSwitcher + class Connection + attr_reader :internal_io + attr_reader :external_io + + def initialize(internal_io, external_host, external_port, debug: false) + @internal_io = internal_io + @external_host = external_host + @external_port = external_port + @external_io = nil + @mutex = Mutex.new + @debug = debug + @wait = nil + + Thread.new do + read + end + Thread.new do + write + end + end + + def print_data(desc, data) + return unless @debug + if data.bytesize >= 70 + sdata = data[0..70] + puts "#{desc}: #{sdata.inspect} (... #{data.bytesize} bytes)" + else + puts "#{desc}: #{data.inspect} (#{data.bytesize} bytes)" + end + end + + def puts(*args) + return unless @debug + super + end + + def connect + # Not yet connected? + @mutex.synchronize do + if !@external_io + @external_io = TCPSocket.new(@external_host, @external_port) + puts "connected ext:#{@external_io.inspect} (belongs to int:#{@internal_io.fileno})" + end + end + end + + # transfer data in read direction + def read + connect + + loop do + @wait&.deq + begin + read_str = @external_io.read_nonblock(65536) + print_data("read-transfer #{read_fds}", read_str) + @internal_io.write(read_str) + rescue IO::WaitReadable, Errno::EINTR + @external_io.wait_readable + rescue EOFError, Errno::ECONNRESET + puts "read_eof from #{read_fds}" + @internal_io.close_write + break + end + end + end + + # transfer data in write direction + def write + connect + + # transfer data blocks of up to 65536 bytes + loop do + @wait&.deq + begin + read_str = @internal_io.read_nonblock(65536) + print_data("write-transfer #{write_fds}", read_str) + @external_io.write(read_str) + rescue IO::WaitReadable, Errno::EINTR + @internal_io.wait_readable + rescue EOFError, Errno::ECONNRESET + puts "write_eof from #{write_fds}" + @external_io.close_write + break + end + end + end + + def read_fds + "ext:#{@external_io&.fileno || '-'}->int:#{@internal_io.fileno}" + end + + def write_fds + "int:#{@internal_io.fileno}->ext:#{@external_io&.fileno || '-'}" + end + + # Make sure all data is transferred and both connections are closed. + def finish + puts "finish transfers #{write_fds} and #{read_fds}" + write + read + end + + def start + @wait&.close + @wait = nil + end + + def stop + @wait ||= Queue.new + end + end + + UnknownConnection = Struct.new :fileno, :events + + def initialize(external_host:, external_port:, internal_host: 'localhost', internal_port: 0, debug: false) + super() + @connections = [] + @server_io = TCPServer.new(internal_host, internal_port) + @external_host = external_host + @external_port = external_port + @finish = false + @debug = debug + puts "TcpGate server listening: #{@server_io.inspect}" + + run + end + + def finish + @finish = true + TCPSocket.new('localhost', internal_port).close + end + + def internal_port + @server_io.local_address.ip_port + end + + def start + @connections.each(&:start) + end + + def stop + @connections.each(&:stop) + end + + def run + Thread.new do + # Wait for new connections to the TCP gate + while client=@server_io.accept + if @finish + @connections.each(&:finish) + break + else + conn = Connection.new(client, @external_host, @external_port, debug: @debug) + puts "accept new int:#{conn.internal_io.inspect} from #{conn.internal_io.remote_address.inspect} server fd:#{@server_io.fileno}" + @connections << conn + + # Handle the reading and writing in a separate thread + conn.start + end + + # Remove old connections + @connections.reject! do |conn| + conn.internal_io.closed? || conn.external_io&.closed? + end + end + end + end +end +end diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index e99d2287a..9ca01de91 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -130,7 +130,7 @@ [1, 0].each do |format| it "should convert format #{format} timestamps per TimestampUtc" do - regi = PG::BasicTypeRegistry.new.define_default_types + regi = PG::BasicTypeRegistry.new.register_default_types regi.register_type 0, 'timestamp', nil, PG::TextDecoder::TimestampUtc @conn.type_map_for_results = PG::BasicTypeMapForResults.new(@conn, registry: regi) res = @conn.exec_params( "SELECT CAST('2013-07-31 23:58:59+02' AS TIMESTAMP WITHOUT TIME ZONE), diff --git a/spec/pg/basic_type_registry_spec.rb b/spec/pg/basic_type_registry_spec.rb index 8d3f854fb..374fe626c 100644 --- a/spec/pg/basic_type_registry_spec.rb +++ b/spec/pg/basic_type_registry_spec.rb @@ -7,8 +7,9 @@ describe PG::BasicTypeRegistry do it "can register_type" do regi = PG::BasicTypeRegistry.new - regi.register_type(1, 'int4', PG::BinaryEncoder::Int8, PG::BinaryDecoder::Integer) + res = regi.register_type(1, 'int4', PG::BinaryEncoder::Int8, PG::BinaryDecoder::Integer) + expect( res ).to be( regi ) expect( regi.coders_for(1, :encoder)['int4'] ).to be_kind_of(PG::BinaryEncoder::Int8) expect( regi.coders_for(1, :decoder)['int4'] ).to be_kind_of(PG::BinaryDecoder::Integer) end @@ -16,17 +17,37 @@ it "can alias_type" do regi = PG::BasicTypeRegistry.new regi.register_type(1, 'int4', PG::BinaryEncoder::Int4, PG::BinaryDecoder::Integer) - regi.alias_type(1, 'int8', 'int4') + res = regi.alias_type(1, 'int8', 'int4') + expect( res ).to be( regi ) expect( regi.coders_for(1, :encoder)['int8'] ).to be_kind_of(PG::BinaryEncoder::Int4) expect( regi.coders_for(1, :decoder)['int8'] ).to be_kind_of(PG::BinaryDecoder::Integer) end + it "can register_default_types" do + regi = PG::BasicTypeRegistry.new + res = regi.register_default_types + + expect( res ).to be( regi ) + expect( regi.coders_for(0, :encoder)['float8'] ).to be_kind_of(PG::TextEncoder::Float) + expect( regi.coders_for(0, :decoder)['float8'] ).to be_kind_of(PG::TextDecoder::Float) + end + + it "can define_default_types (alias to register_default_types)" do + regi = PG::BasicTypeRegistry.new + res = regi.define_default_types + + expect( res ).to be( regi ) + expect( regi.coders_for(0, :encoder)['float8'] ).to be_kind_of(PG::TextEncoder::Float) + expect( regi.coders_for(0, :decoder)['float8'] ).to be_kind_of(PG::TextDecoder::Float) + end + it "can register_coder" do regi = PG::BasicTypeRegistry.new enco = PG::BinaryEncoder::Int8.new(name: 'test') - regi.register_coder(enco) + res = regi.register_coder(enco) + expect( res ).to be( regi ) expect( regi.coders_for(1, :encoder)['test'] ).to be(enco) expect( regi.coders_for(1, :decoder)['test'] ).to be_nil end diff --git a/spec/pg/connection_async_spec.rb b/spec/pg/connection_async_spec.rb new file mode 100644 index 000000000..36e6be7e3 --- /dev/null +++ b/spec/pg/connection_async_spec.rb @@ -0,0 +1,138 @@ +# -*- rspec -*- +#encoding: utf-8 + +require_relative '../helpers' + +require 'socket' +require 'pg' + +describe PG::Connection do + + it "tries to connect to localhost with IPv6 and IPv4", :ipv6, :postgresql_10 do + uri = "postgres://localhost:#{@port+1}/test" + expect(described_class).to receive(:parse_connect_args).once.ordered.with(uri, any_args).and_call_original + expect(described_class).to receive(:parse_connect_args).once.ordered.with(hash_including(hostaddr: "::1,127.0.0.1")).and_call_original + expect{ described_class.connect( uri ) }.to raise_error(PG::ConnectionBad) + end + + def interrupt_thread(exc=nil) + start = Time.now + t = Thread.new do + begin + yield + rescue Exception => err + err + end + end + sleep 0.1 + + if exc + t.raise exc, "Stop the query by #{exc}" + else + t.kill + end + t.join + + [t, Time.now - start] + end + + it "can stop a thread that runs a blocking query with exec" do + t, duration = interrupt_thread do + @conn.exec( 'select pg_sleep(10)' ) + end + + expect( t.value ).to be_nil + expect( duration ).to be < 10 + @conn.cancel # Stop the query that is still running on the server + end + + describe "#transaction" do + + it "stops a thread that runs a blocking transaction with exec" do + t, duration = interrupt_thread(Interrupt) do + @conn.transaction do |c| + c.exec( 'select pg_sleep(10)' ) + end + end + + expect( t.value ).to be_kind_of( Interrupt ) + expect( duration ).to be < 10 + end + + it "stops a thread that runs a failing transaction with exec" do + t, duration = interrupt_thread(Interrupt) do + @conn.transaction do |c| + c.exec( 'select nonexist' ) + end + end + + expect( t.value ).to be_kind_of( PG::UndefinedColumn ) + expect( duration ).to be < 10 + end + + it "stops a thread that runs a no query but a transacted ruby sleep" do + t, duration = interrupt_thread(Interrupt) do + @conn.transaction do + sleep 10 + end + end + + expect( t.value ).to be_kind_of( Interrupt ) + expect( duration ).to be < 10 + end + + it "doesn't worry about an already finished connection" do + t, _ = interrupt_thread(Interrupt) do + @conn.transaction do + @conn.exec("ROLLBACK") + end + end + + expect( t.value ).to be_kind_of( PG::Result ) + expect( t.value.result_status ).to eq( PG::PGRES_COMMAND_OK ) + end + end + + it "should work together with signal handlers", :unix do + signal_received = false + trap 'USR2' do + signal_received = true + end + + Thread.new do + sleep 0.1 + Process.kill("USR2", Process.pid) + end + @conn.exec("select pg_sleep(0.3)") + expect( signal_received ).to be_truthy + end + + context "OS thread support" do + it "Connection#exec shouldn't block a second thread" do + t = Thread.new do + @conn.exec( "select pg_sleep(1)" ) + end + + sleep 0.1 + expect( t ).to be_alive() + t.kill + @conn.cancel + end + + it "Connection.new shouldn't block a second thread" do + serv = nil + t = Thread.new do + serv = TCPServer.new( '127.0.0.1', 54320 ) + expect { + described_class.connect( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) + }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/) + end + + sleep 0.5 + expect( t ).to be_alive() + serv.close + t.join + end + end + +end diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index b3e756b95..caa3985df 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -14,24 +14,81 @@ expect( ObjectSpace.memsize_of(@conn) ).to be > DATA_OBJ_MEMSIZE end - describe "PG::Connection#connect_string_to_hash" do + describe "#inspect", :without_transaction do + it "should print host, port and user of a fresh connection, but not more" do + expect( @conn.inspect ).to match(//) + end + + it "should tell about finished connection" do + conn = PG.connect(@conninfo) + conn.finish + expect( conn.inspect ).to match(//) + end + + it "should tell about connection status" do + conn = PG::Connection.connect_start(@conninfo) + expect( conn.inspect ).to match(/ status=CONNECTION_STARTED/) + end + + it "should tell about pipeline mode", :postgresql_14 do + @conn.enter_pipeline_mode + expect( @conn.inspect ).to match(/ pipeline_status=PQ_PIPELINE_ON/) + end + + it "should tell about transaction_status" do + @conn.send_query "select 8" + expect( @conn.inspect ).to match(/ transaction_status=PQTRANS_ACTIVE/) + end + + it "should tell about nonblocking mode" do + @conn.setnonblocking true + expect( @conn.inspect ).to match(/ nonblocking=true/) + end + + it "should tell about non UTF8 client encoding" do + @conn.set_client_encoding "ISO-8859-1" + expect( @conn.inspect ).to match(/ client_encoding=LATIN1/) + end + + it "should tell about non default type_map_for_results" do + @conn.type_map_for_results = PG::TypeMapByColumn.new([]) + expect( @conn.inspect ).to match(/ type_map_for_results=#/) + end + + it "should tell about non default type_map_for_queries" do + @conn.type_map_for_queries = PG::TypeMapByColumn.new([]) + expect( @conn.inspect ).to match(/ type_map_for_queries=#/) + end + + it "should tell about encoder_for_put_copy_data" do + @conn.encoder_for_put_copy_data = PG::TextEncoder::CopyRow.new + expect( @conn.inspect ).to match(/ encoder_for_put_copy_data=#/) + end + + it "should tell about decoder_for_get_copy_data" do + @conn.decoder_for_get_copy_data = PG::TextDecoder::CopyRow.new + expect( @conn.inspect ).to match(/ decoder_for_get_copy_data=#/) + end + end + + describe "PG::Connection#conninfo_parse" do it "encode and decode Hash to connection string to Hash" do hash = { :host => 'pgsql.example.com', :dbname => 'db01', 'sslmode' => 'require', - 'somekey' => '', + 'service' => '', 'password' => "\\ \t\n\"'", } optstring = described_class.connect_hash_to_string(hash) - res = described_class.connect_string_to_hash(optstring) + res = described_class.conninfo_parse(optstring).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } expect( res ).to eq( hash.transform_keys(&:to_sym) ) end it "decode option string to Hash" do optstring = "host=overwritten host=c:\\\\pipe password = \\\\\\'\" " - res = described_class.connect_string_to_hash(optstring) + res = described_class.conninfo_parse(optstring).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } expect( res ).to eq({ host: 'c:\pipe', @@ -39,12 +96,25 @@ }) end - it "raises error when decoding invalid option string" do - optstring = "host='abc" - expect{ described_class.connect_string_to_hash(optstring) }.to raise_error(ArgumentError, /unterminated quoted string/) + it "can parse connection info strings kind of key=value" do + ar = PG::Connection.conninfo_parse("user=auser host=somehost port=3334 dbname=db") + expect( ar ).to be_kind_of( Array ) + expect( ar.first ).to be_kind_of( Hash ) + expect( ar.map{|a| a[:keyword] } ).to include( "dbname", "user", "password", "port" ) + expect( ar.map{|a| a[:val] } ).to include( "auser", "somehost", "3334", "db" ) + end + + it "can parse connection info strings kind of URI" do + ar = PG::Connection.conninfo_parse("postgresql://auser@somehost:3334/db") + expect( ar ).to be_kind_of( Array ) + expect( ar.first ).to be_kind_of( Hash ) + expect( ar.map{|a| a[:keyword] } ).to include( "dbname", "user", "password", "port" ) + expect( ar.map{|a| a[:val] } ).to include( "auser", "somehost", "3334", "db" ) + end - optstring = "host" - expect{ described_class.connect_string_to_hash(optstring) }.to raise_error(ArgumentError, /missing = after/) + it "can parse connection info strings with error" do + expect{ PG::Connection.conninfo_parse("host='abc") }.to raise_error(PG::Error, /unterminated quoted string/) + expect{ PG::Connection.conninfo_parse("host") }.to raise_error(PG::Error, /missing "=" after/) end end @@ -88,7 +158,6 @@ expect( optstring ).to match( /(^|\s)host='localhost'/ ) expect( optstring ).to match( /(^|\s)dbname='sales'/ ) expect( optstring ).to match( /(^|\s)options='-c geqo=off'/ ) - expect( optstring ).to match( /(^|\s)hostaddr='(::1|127.0.0.1)'/ ) expect( optstring ).to_not match( /port=/ ) expect( optstring ).to_not match( /tty=/ ) @@ -111,9 +180,8 @@ 'host' => 'www.ruby-lang.org,nonexisting-domaiiin.xyz,localhost' ) expect( optstring ).to be_a( String ) - expect( optstring ).to match( /(^|\s)dbname=original/ ) + expect( optstring ).to match( /(^|\s)dbname='original'/ ) expect( optstring ).to match( /(^|\s)user='jrandom'/ ) - expect( optstring ).to match( /(^|\s)hostaddr='\d+\.\d+\.\d+\.\d+,,(::1|127\.0\.0\.1)'/ ) end it "escapes single quotes and backslashes in connection parameters" do @@ -124,50 +192,38 @@ let(:uri) { 'postgresql://user:pass@pgsql.example.com:222/db01?sslmode=require&hostaddr=4.3.2.1' } - it "accepts an URI" do + it "accepts an URI string" do string = described_class.parse_connect_args( uri ) expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) - expect( string ).to match( %r{\?.*sslmode=require} ) + expect( string ).to match( %r{^user='user' password='pass' dbname='db01' host='pgsql.example.com' hostaddr='4.3.2.1' port='222' sslmode='require' fallback_application_name} ) + end + it "accepts an URI object" do string = described_class.parse_connect_args( URI.parse(uri) ) expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) - expect( string ).to match( %r{\?.*sslmode=require} ) + expect( string ).to match( %r{^user='user' password='pass' dbname='db01' host='pgsql.example.com' hostaddr='4.3.2.1' port='222' sslmode='require' fallback_application_name} ) end it "accepts an URI and adds parameters from hash" do string = described_class.parse_connect_args( uri + "&fallback_application_name=testapp", :connect_timeout => 2 ) expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) - expect( string ).to match( %r{\?sslmode=require&} ) - expect( string ).to match( %r{\?.*&fallback_application_name=testapp&} ) - expect( string ).to match( %r{\?.*&connect_timeout=2$} ) - end - - it "accepts an URI and adds hostaddr" do - uri = 'postgresql://www.ruby-lang.org,nonexisting-domaiiin.xyz,localhost' - string = described_class.parse_connect_args( uri ) - - expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql://www.ruby-lang.org,nonexisting-domaiiin.xyz,localhost\?hostaddr=\d+\.\d+\.\d+\.\d+%2C%2C(%3A%3A1|127\.0\.0\.1)} ) + expect( string ).to match( %r{^user='user' password='pass' dbname='db01' host='pgsql.example.com' hostaddr='4.3.2.1' port='222' fallback_application_name='testapp' sslmode='require' connect_timeout='2'} ) end it "accepts an URI with a non-standard domain socket directory" do string = described_class.parse_connect_args( 'postgresql://%2Fvar%2Flib%2Fpostgresql/dbname' ) expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql://%2Fvar%2Flib%2Fpostgresql/dbname} ) + expect( string ).to match( %r{^dbname='dbname' host='/var/lib/postgresql'} ) string = described_class. parse_connect_args( 'postgresql:///dbname', :host => '/var/lib/postgresql' ) expect( string ).to be_a( String ) - expect( string ).to match( %r{^postgresql:///dbname\?} ) - expect( string ).to match( %r{\?.*host=%2Fvar%2Flib%2Fpostgresql} ) + expect( string ).to match( %r{^dbname='dbname' host='/var/lib/postgresql'} ) end it "connects with defaults if no connection parameters are given" do @@ -180,12 +236,11 @@ string = described_class.parse_connect_args( conninfo_with_colon_in_password ) expect( string ).to be_a( String ) - expect( string ).to match( %r{(^|\s)user=a} ) - expect( string ).to match( %r{(^|\s)password=a:a} ) - expect( string ).to match( %r{(^|\s)host=localhost} ) - expect( string ).to match( %r{(^|\s)port=555} ) - expect( string ).to match( %r{(^|\s)dbname=test} ) - expect( string ).to match( %r{(^|\s)hostaddr='(::1|127\.0\.0\.1)'} ) + expect( string ).to match( %r{(^|\s)user='a'} ) + expect( string ).to match( %r{(^|\s)password='a:a'} ) + expect( string ).to match( %r{(^|\s)host='localhost'} ) + expect( string ).to match( %r{(^|\s)port='555'} ) + expect( string ).to match( %r{(^|\s)dbname='test'} ) end it "sets the fallback_application_name on new connections" do @@ -257,50 +312,137 @@ it "emits a suitable error_message at connection errors" do skip("Will be fixed in postgresql-15 on Windows") if RUBY_PLATFORM=~/mingw|mswin/ - expect { - described_class.connect( - :host => 'localhost', - :port => @port, - :dbname => "non-existent") - }.to raise_error do |error| + expect { + described_class.connect( + :host => 'localhost', + :port => @port, + :dbname => "non-existent") + }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) expect( error.message ).to match( /database "non-existent" does not exist/i ) expect( error.message.encoding ).to eq( Encoding::BINARY ) end end - it "connects using URI with multiple hosts", :postgresql_10 do - uri = "postgres://localhost:#{@port},127.0.0.1:#{@port}/test?keepalives=1" + it "times out after connect_timeout seconds" do + TCPServer.open( 'localhost', 54320 ) do |serv| + start_time = Time.now + expect { + described_class.connect( + host: 'localhost', + port: 54320, + connect_timeout: 1, + dbname: "test") + }.to raise_error do |error| + expect( error ).to be_an( PG::ConnectionBad ) + expect( error.message ).to match( /timeout expired/ ) + if PG.library_version >= 120000 + expect( error.message ).to match( /\"localhost\"/ ) + expect( error.message ).to match( /port 54320/ ) + end + end + + expect( Time.now - start_time ).to be_between(1.9, 10).inclusive + end + end + + context "with multiple PostgreSQL servers", :without_transaction do + before :all do + @port_ro = @port + 1 + @dbms = PG::TestingHelpers::PostgresServer.new("read-only", + port: @port_ro, + postgresql_conf: "default_transaction_read_only=on" + ) + end + + after :all do + @dbms&.teardown + end + + it "honors target_session_attrs requirements", :postgresql_10 do + uri = "postgres://localhost:#{@port_ro},localhost:#{@port}/postgres?target_session_attrs=read-write" + PG.connect(uri) do |conn| + expect( conn.port ).to eq( @port ) + end + + uri = "postgres://localhost:#{@port_ro},localhost:#{@port}/postgres?target_session_attrs=any" + PG.connect(uri) do |conn| + expect( conn.port ).to eq( @port_ro ) + end + end + end + + it "stops hosts iteration on authentication errors", :without_transaction, :ipv6, :postgresql_10 do + @conn.exec("DROP USER IF EXISTS testusermd5") + @conn.exec("CREATE USER testusermd5 PASSWORD 'secret'") + + uri = "host=::1,::1,127.0.0.1 port=#{@port_down},#{@port},#{@port} dbname=postgres user=testusermd5 password=wrong" + error_match = if RUBY_PLATFORM=~/mingw|mswin/ + # It's a long standing issue of libpq, that the error text is not correctly returned when both client and server are running on Windows. + # Instead a "Connection refused" is retured. + /authenti.*testusermd5|Connection refused|server closed the connection unexpectedly/i + else + /authenti.*testusermd5/i + end + expect { PG.connect(uri) }.to raise_error(error_match) + + uri = "host=::1,::1,127.0.0.1 port=#{@port_down},#{@port},#{@port} dbname=postgres user=testusermd5 password=secret" + PG.connect(uri) do |conn| + expect( conn.host ).to eq( "::1" ) + expect( conn.port ).to eq( @port ) + end + + uri = "host=::1,::1,127.0.0.1 port=#{@port_down},#{@port_down},#{@port} dbname=postgres user=testusermd5 password=wrong" + PG.connect(uri) do |conn| + expect( conn.host ).to eq( "127.0.0.1" ) + expect( conn.port ).to eq( @port ) + end + end + + it "connects using URI with multiple hosts", :postgresql_12 do + uri = "postgres://localhost:#{@port_down},127.0.0.1:#{@port}/test?keepalives=1" tmpconn = described_class.connect( uri ) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) - expect( tmpconn.conninfo_hash[:host] ).to eq( "localhost,127.0.0.1" ) - expect( tmpconn.conninfo_hash[:hostaddr] ).to match( /\A(::1|127\.0\.0\.1),(::1|127\.0\.0\.1)\z/ ) + expect( tmpconn.port ).to eq( @port ) + expect( tmpconn.host ).to eq( "127.0.0.1" ) + expect( tmpconn.hostaddr ).to match( /\A(::1|127\.0\.0\.1)\z/ ) tmpconn.finish end - it "connects using URI with IPv6 hosts", :postgresql_10 do + it "connects using URI with IPv6 hosts", :postgresql_12, :ipv6 do uri = "postgres://localhost:#{@port},[::1]:#{@port},/test" tmpconn = described_class.connect( uri ) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) - expect( tmpconn.conninfo_hash[:host] ).to eq( "localhost,::1," ) - expect( tmpconn.conninfo_hash[:hostaddr] ).to match( /\A(::1|127\.0\.0\.1),::1,\z/ ) + expect( tmpconn.host ).to eq( "localhost" ) + expect( tmpconn.hostaddr ).to match( /\A(::1|127\.0\.0\.1)\z/ ) tmpconn.finish end - it "connects using URI with UnixSocket host", :postgresql_10, :unix_socket do + it "connects using URI with UnixSocket host", :postgresql_12, :unix_socket do uri = "postgres://#{@unix_socket.gsub("/", "%2F")}:#{@port}/test" tmpconn = described_class.connect( uri ) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) - expect( tmpconn.conninfo_hash[:host] ).to eq( @unix_socket ) - expect( tmpconn.conninfo_hash[:hostaddr] ).to be_nil + expect( tmpconn.host ).to eq( @unix_socket ) + expect( tmpconn.hostaddr ).to eq( "" ) tmpconn.finish end - it "connects using Hash with multiple hosts", :postgresql_10 do - tmpconn = described_class.connect( host: "#{@unix_socket},127.0.0.1,localhost", port: @port, dbname: "test" ) + it "connects with environment variables" do + skip("Is broken before postgresql-12 on Windows") if RUBY_PLATFORM=~/mingw|mswin/ && PG.library_version < 120000 + + tmpconn = with_env_vars(PGHOST: "localhost", PGPORT: @port, PGDATABASE: "test") do + described_class.connect + end expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) - expect( tmpconn.conninfo_hash[:host] ).to eq( "#{@unix_socket},127.0.0.1,localhost" ) - expect( tmpconn.conninfo_hash[:hostaddr] ).to match( /\A,(::1|127\.0\.0\.1),(::1|127\.0\.0\.1)\z/ ) + expect( tmpconn.host ).to eq( "localhost" ) + tmpconn.finish + end + + it "connects using Hash with multiple hosts", :postgresql_12 do + tmpconn = described_class.connect( host: "#{@unix_socket}xx,127.0.0.1,localhost", port: @port, dbname: "test" ) + expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) + expect( tmpconn.host ).to eq( "127.0.0.1" ) + expect( tmpconn.hostaddr ).to match( /\A127\.0\.0\.1\z/ ) tmpconn.finish end @@ -309,10 +451,10 @@ klass = Class.new(described_class) do alias execute exec end - conn = klass.send(meth, @conninfo) - expect( conn ).to be_a_kind_of( klass ) - expect( conn.execute("SELECT 1") ).to be_a_kind_of( PG::Result ) - conn.close + klass.send(meth, @conninfo) do |conn| + expect( conn ).to be_a_kind_of( klass ) + expect( conn.execute("SELECT 1") ).to be_a_kind_of( PG::Result ) + end end end @@ -372,6 +514,34 @@ res = @conn2.query("SELECT 4") end + it "can work with changing IO while connection setup", :postgresql_95 do + # The file_no of the socket IO can change while connecting. + # This can happen when alternative hosts are tried, + # while GSS authentication + # and when falling back to unencrypted in sslmode:prefer + + # Consume some file descriptors and free them while the connection is established. + pipes = 100.times.map{ IO.pipe } + Thread.new do + pipes.reverse_each do |ios| + ios.each(&:close) + sleep 0.01 + end + end + + # Connect with SSL, but use a wrong client cert, so that connection is aborted. + # A second connection is then started with a new IO. + # And since the pipes above were freed in the concurrent thread above, there is a high chance that it's a lower file descriptor than before. + conn = PG.connect( @conninfo + " sslcert=tmp_test_specs/data/ruby-pg-ca-cert" ) + expect( conn.ssl_in_use? ).to be_falsey + + # The new connection should work even when the file descriptor has changed. + res = conn.exec("SELECT 1") + expect( res.values ).to eq([["1"]]) + + conn.close + end + it "can use conn.reset_start to restart the connection" do ios = IO.pipe conn = described_class.connect_start( @conninfo ) @@ -468,87 +638,55 @@ end it "rejects to send lots of COPY data" do - skip("takes around an hour to succeed on Windows") if RUBY_PLATFORM=~/mingw|mswin/ + unless RUBY_PLATFORM =~ /i386-mingw|x86_64-darwin|x86_64-linux/ + skip "this spec depends on out-of-memory condition in put_copy_data, which is not reliable on all platforms" + end - conn = described_class.new(@conninfo) - conn.setnonblocking(true) + run_with_gate(60) do |conn, gate| + conn.setnonblocking(true) - res = nil - begin - Timeout.timeout(60) do - conn.exec <<-EOSQL - CREATE TEMP TABLE copytable (col1 TEXT); - - CREATE OR REPLACE FUNCTION delay_input() RETURNS trigger AS $x$ - BEGIN - PERFORM pg_sleep(1); - RETURN NEW; - END; - $x$ LANGUAGE plpgsql; - - CREATE TRIGGER delay_input BEFORE INSERT ON copytable - FOR EACH ROW EXECUTE PROCEDURE delay_input(); - EOSQL - - conn.exec( "COPY copytable FROM STDOUT CSV" ) - - data = "x" * 1000 * 1000 - data << "\n" - 20000.times do - res = conn.put_copy_data(data) - break if res == false - end + res = nil + conn.exec <<-EOSQL + CREATE TEMP TABLE copytable (col1 TEXT); + EOSQL + + conn.exec( "COPY copytable FROM STDOUT CSV" ) + gate.stop + + data = "x" * 1000 * 1000 + data << "\n" + 20000.times do |idx| + res = conn.put_copy_data(data) + break if res == false end expect( res ).to be_falsey - rescue Timeout::Error - skip <<-EOT -Unfortunately this test is not reliable. - -It is timing dependent, since it assumes that the ruby process -sends data faster than the PostgreSQL server can process it. -This assumption is wrong in some environments. -EOT - ensure + + gate.start conn.cancel conn.discard_results - conn.finish end end it "needs to flush data after send_query" do - retries = 3 - begin - conn = described_class.new(@conninfo) + run_with_gate(60) do |conn, gate| conn.setnonblocking(true) - data = "x" * 1000 * 1000 * 100 + gate.stop + data = "x" * 1000 * 1000 * 30 res = conn.send_query_params("SELECT LENGTH($1)", [data]) expect( res ).to be_nil res = conn.flush expect( res ).to be_falsey - rescue RSpec::Expectations::ExpectationNotMetError - if (retries-=1) >= 0 - until conn.flush() - IO.select(nil, [conn.socket_io], nil, 10) - end - conn.get_last_result - conn.finish - retry - end - raise - ensure - - until conn.flush() - IO.select(nil, [conn.socket_io], nil, 10) + gate.start + until conn.flush + IO.select(nil, [conn.socket_io], [conn.socket_io], 10) end expect( conn.flush ).to be_truthy res = conn.get_last_result expect( res.values ).to eq( [[data.length.to_s]] ) - - conn.finish end end @@ -565,13 +703,13 @@ while @conn.get_copy_data end end - end.to raise_error(PG::QueryCanceled) + end.to raise_error(PG::QueryCanceled){|err| expect(err).to have_attributes(connection: @conn) } end end it "raises proper error when sending fails" do conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) - expect{ conn.exec 'SELECT 1' }.to raise_error(PG::UnableToSend, /no connection/) + expect{ conn.exec 'SELECT 1' }.to raise_error(PG::UnableToSend, /no connection/){|err| expect(err).to have_attributes(connection: conn) } end it "doesn't leave stale server connections after finish" do @@ -593,6 +731,35 @@ expect( @conn.options ).to eq( "" ) end + it "connects without port and then retrieves the default port" do + gate = Helpers::TcpGateSwitcher.new( + external_host: 'localhost', + external_port: ENV['PGPORT'].to_i, + internal_host: "127.0.0.1", + internal_port: PG::DEF_PGPORT, + debug: ENV['PG_DEBUG']=='1') + + PG.connect(host: "localhost", + port: "", + dbname: "test") do |conn| + expect( conn.port ).to eq( PG::DEF_PGPORT ) + end + + PG.connect(hostaddr: "127.0.0.1", + port: nil, + dbname: "test") do |conn| + expect( conn.port ).to eq( PG::DEF_PGPORT ) + end + + gate.finish + rescue Errno::EADDRINUSE => err + skip err.to_s + end + + it "can retrieve hostaddr for the established connection", :postgresql_12 do + expect( @conn.hostaddr ).to match( /^127\.0\.0\.1$|^::1$/ ) + end + it "can set error verbosity" do old = @conn.set_error_verbosity( PG::PQERRORS_TERSE ) new = @conn.set_error_verbosity( old ) @@ -685,97 +852,20 @@ @conn.cancel if notice =~ /foobar/ end @conn.send_query "do $$ BEGIN RAISE NOTICE 'foobar'; PERFORM pg_sleep(10); END; $$ LANGUAGE plpgsql;" - expect{ @conn.get_last_result }.to raise_error(PG::QueryCanceled) + expect{ @conn.get_last_result }.to raise_error(PG::QueryCanceled){|err| expect(err).to have_attributes(connection: @conn) } expect( Time.now - start ).to be < 9.9 end - def interrupt_thread(exc=nil) - start = Time.now - t = Thread.new do - begin - yield - rescue Exception => err - err - end - end - sleep 0.1 - - if exc - t.raise exc, "Stop the query by #{exc}" - else - t.kill - end - t.join - - [t, Time.now - start] - end - - it "can stop a thread that runs a blocking query with async_exec" do - t, duration = interrupt_thread do - @conn.async_exec( 'select pg_sleep(10)' ) - end - - expect( t.value ).to be_nil - expect( duration ).to be < 10 - @conn.cancel # Stop the query that is still running on the server - end - describe "#transaction" do - it "stops a thread that runs a blocking transaction with async_exec" do - t, duration = interrupt_thread(Interrupt) do - @conn.transaction do |c| - c.async_exec( 'select pg_sleep(10)' ) - end - end - - expect( t.value ).to be_kind_of( Interrupt ) - expect( duration ).to be < 10 - end - - it "stops a thread that runs a failing transaction with async_exec" do - t, duration = interrupt_thread(Interrupt) do - @conn.transaction do |c| - c.async_exec( 'select nonexist' ) - end - end - - expect( t.value ).to be_kind_of( PG::UndefinedColumn ) - expect( duration ).to be < 10 - end - - it "stops a thread that runs a no query but a transacted ruby sleep" do - t, duration = interrupt_thread(Interrupt) do - @conn.transaction do - sleep 10 - end - end - - expect( t.value ).to be_kind_of( Interrupt ) - expect( duration ).to be < 10 - end - - it "doesn't worry about an already finished connection" do - t, duration = interrupt_thread(Interrupt) do - @conn.transaction do - @conn.async_exec("ROLLBACK") - end - end - - expect( t.value ).to be_kind_of( PG::Result ) - expect( t.value.result_status ).to eq( PG::PGRES_COMMAND_OK ) - end - it "automatically rolls back a transaction if an exception is raised" do # abort the per-example transaction so we can test our own @conn.exec( 'ROLLBACK' ) - - res = nil @conn.exec( "CREATE TABLE pie ( flavor TEXT )" ) begin expect { - res = @conn.transaction do + @conn.transaction do @conn.exec( "INSERT INTO pie VALUES ('rhubarb'), ('cherry'), ('schizophrenia')" ) raise Exception, "Oh noes! All pie is gone!" end @@ -788,6 +878,31 @@ def interrupt_thread(exc=nil) end end + it "commits even if the block includes an early break/return" do + # abort the per-example transaction so we can test our own + @conn.exec( 'ROLLBACK' ) + @conn.exec( "CREATE TABLE pie ( flavor TEXT )" ) + + begin + @conn.transaction do + @conn.exec( "INSERT INTO pie VALUES ('rhubarb'), ('cherry'), ('schizophrenia')" ) + # a prior version would neither commit nor rollback when the block included an early break/return + break + end + + # if the previous transaction committed, the result should be visible from another conn/transaction + @conn2 = PG.connect(@conninfo) + begin + res = @conn2.exec( "SELECT * FROM pie" ) + expect( res.ntuples ).to eq( 3 ) + ensure + @conn2.close + end + ensure + @conn.exec( "DROP TABLE pie" ) + end + end + it "passes the connection to the block and returns the block result" do # abort the per-example transaction so we can test our own @conn.exec( 'ROLLBACK' ) @@ -800,20 +915,6 @@ def interrupt_thread(exc=nil) end end - it "should work together with signal handlers", :unix do - signal_received = false - trap 'USR2' do - signal_received = true - end - - Thread.new do - sleep 0.1 - Process.kill("USR2", Process.pid) - end - @conn.async_exec("select pg_sleep(0.3)") - expect( signal_received ).to be_truthy - end - it "not read past the end of a large object" do @conn.transaction do oid = @conn.lo_create( 0 ) @@ -867,7 +968,7 @@ def interrupt_thread(exc=nil) begin conn = described_class.connect( @conninfo ) sleep 0.1 - conn.async_exec( 'NOTIFY woo' ) + conn.exec( 'NOTIFY woo' ) ensure conn.finish end @@ -887,7 +988,7 @@ def interrupt_thread(exc=nil) begin conn = described_class.connect( @conninfo ) sleep 0.1 - conn.async_exec( 'NOTIFY woo' ) + conn.exec( 'NOTIFY woo' ) ensure conn.finish end @@ -1004,167 +1105,193 @@ def interrupt_thread(exc=nil) expect( rval ).to include( '5678', '1234' ) end - it "can process #copy_data output queries" do - rows = [] - res2 = @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| - expect( res.result_status ).to eq( PG::PGRES_COPY_OUT ) - expect( res.nfields ).to eq( 1 ) - while row=@conn.get_copy_data - rows << row - end - end - expect( rows ).to eq( ["1\n", "2\n"] ) - expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) - expect( @conn ).to still_be_usable - end + it "correctly finishes COPY queries passed to #exec" do + @conn.exec( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) - it "can handle incomplete #copy_data output queries" do - expect { - @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| - @conn.get_copy_data + results = [] + begin + data = @conn.get_copy_data( true ) + if false == data + @conn.block( 2.0 ) + data = @conn.get_copy_data( true ) end - }.to raise_error(PG::NotAllCopyDataRetrieved, /Not all/) - expect( @conn ).to still_be_usable - end + results << data if data + end until data.nil? - it "can handle client errors in #copy_data for output" do - expect { - @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do - raise "boom" - end - }.to raise_error(RuntimeError, "boom") - expect( @conn ).to still_be_usable + expect( results.size ).to eq( 2 ) + expect( results ).to include( "1\n", "2\n" ) end - it "can handle server errors in #copy_data for output" do - @conn.exec "ROLLBACK" - @conn.transaction do - @conn.exec( "CREATE FUNCTION errfunc() RETURNS int AS $$ BEGIN RAISE 'test-error'; END; $$ LANGUAGE plpgsql;" ) - expect { - @conn.copy_data( "COPY (SELECT errfunc()) TO STDOUT" ) do |res| - while @conn.get_copy_data - end - end - }.to raise_error(PG::Error, /test-error/) - end - expect( @conn ).to still_be_usable - end + it "#get_result should send remaining data before waiting" do + str = "abcd" * 2000 + "\n" + @conn.exec( "CREATE TEMP TABLE copytable2 (col1 TEXT)" ) + @conn.exec( "COPY copytable2 FROM STDOUT" ) - it "can process #copy_data input queries" do - @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) - res2 = @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| - expect( res.result_status ).to eq( PG::PGRES_COPY_IN ) - expect( res.nfields ).to eq( 1 ) - @conn.put_copy_data "1\n" - @conn.put_copy_data "2\n" + 1000.times do + @conn.sync_put_copy_data(str) end - expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) - - expect( @conn ).to still_be_usable - - res = @conn.exec( "SELECT * FROM copytable ORDER BY col1" ) - expect( res.values ).to eq( [["1"], ["2"]] ) + @conn.sync_put_copy_end + res = @conn.get_last_result + expect( res.result_status ).to eq( PG::PGRES_COMMAND_OK ) end - it "can process #copy_data input queries with lots of data" do - str = "abcd" * 2000 + "\n" - @conn.exec( "CREATE TEMP TABLE copytable2 (col1 TEXT)" ) - @conn.copy_data( "COPY copytable2 FROM STDOUT" ) do |res| - 1000.times do - @conn.put_copy_data(str) + describe "#copy_data" do + it "can process #copy_data output queries" do + rows = [] + res2 = @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| + expect( res.result_status ).to eq( PG::PGRES_COPY_OUT ) + expect( res.nfields ).to eq( 1 ) + while row=@conn.get_copy_data + rows << row + end end + expect( rows ).to eq( ["1\n", "2\n"] ) + expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) + expect( @conn ).to still_be_usable end - expect( @conn ).to still_be_usable - res = @conn.exec( "SELECT COUNT(*) FROM copytable2" ) - expect( res.values ).to eq( [["1000"]] ) - res = @conn.exec( "SELECT * FROM copytable2 LIMIT 1" ) - expect( res.values ).to eq( [[str.chomp]] ) - end + it "can handle incomplete #copy_data output queries" do + expect { + @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| + @conn.get_copy_data + end + }.to raise_error(PG::NotAllCopyDataRetrieved, /Not all/){|err| expect(err).to have_attributes(connection: @conn) } + expect( @conn ).to still_be_usable + end - it "can handle client errors in #copy_data for input" do - @conn.exec "ROLLBACK" - @conn.transaction do - @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) + it "can handle client errors in #copy_data for output" do expect { - @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| + @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do raise "boom" end }.to raise_error(RuntimeError, "boom") + expect( @conn ).to still_be_usable end - expect( @conn ).to still_be_usable - end - - it "can handle server errors in #copy_data for input" do - @conn.exec "ROLLBACK" - @conn.transaction do - @conn.exec( "CREATE TEMP TABLE copytable (col1 INT)" ) + it "can handle client errors after all data is consumed in #copy_data for output" do expect { - @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| - @conn.put_copy_data "xyz\n" + @conn.copy_data( "COPY (SELECT 1) TO STDOUT" ) do |res| + while @conn.get_copy_data + end + raise "boom" end - }.to raise_error(PG::Error, /invalid input syntax for .*integer/) + }.to raise_error(RuntimeError, "boom") + expect( @conn ).to still_be_usable end - expect( @conn ).to still_be_usable - end - it "gracefully handle SQL statements while in #copy_data for input" do - @conn.exec "ROLLBACK" - @conn.transaction do - @conn.exec( "CREATE TEMP TABLE copytable (col1 INT)" ) - expect { - @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| - @conn.exec "SELECT 1" - end - }.to raise_error(PG::Error, /no COPY in progress/) + it "can handle server errors in #copy_data for output" do + @conn.exec "ROLLBACK" + @conn.transaction do + @conn.exec( "CREATE FUNCTION errfunc() RETURNS int AS $$ BEGIN RAISE 'test-error'; END; $$ LANGUAGE plpgsql;" ) + expect { + @conn.copy_data( "COPY (SELECT errfunc()) TO STDOUT" ) do |res| + while @conn.get_copy_data + end + end + }.to raise_error(PG::Error, /test-error/){|err| expect(err).to have_attributes(connection: @conn) } + end + expect( @conn ).to still_be_usable end - expect( @conn ).to still_be_usable - end - it "gracefully handle SQL statements while in #copy_data for output" do - @conn.exec "ROLLBACK" - @conn.transaction do - expect { - @conn.copy_data( "COPY (VALUES(1), (2)) TO STDOUT" ) do |res| - @conn.exec "SELECT 3" + it "can process #copy_data input queries" do + @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) + res2 = @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| + expect( res.result_status ).to eq( PG::PGRES_COPY_IN ) + expect( res.nfields ).to eq( 1 ) + @conn.put_copy_data "1\n" + @conn.put_copy_data "2\n" + end + expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) + + expect( @conn ).to still_be_usable + + res = @conn.exec( "SELECT * FROM copytable ORDER BY col1" ) + expect( res.values ).to eq( [["1"], ["2"]] ) + end + + it "can process #copy_data input queries with lots of data" do + str = "abcd" * 2000 + "\n" + @conn.exec( "CREATE TEMP TABLE copytable2 (col1 TEXT)" ) + @conn.copy_data( "COPY copytable2 FROM STDOUT" ) do |res| + 1000.times do + @conn.put_copy_data(str) end - }.to raise_error(PG::Error, /no COPY in progress/) + end + expect( @conn ).to still_be_usable + + res = @conn.exec( "SELECT COUNT(*) FROM copytable2" ) + expect( res.values ).to eq( [["1000"]] ) + res = @conn.exec( "SELECT * FROM copytable2 LIMIT 1" ) + expect( res.values ).to eq( [[str.chomp]] ) end - expect( @conn ).to still_be_usable - end - it "should raise an error for non copy statements in #copy_data" do - expect { - @conn.copy_data( "SELECT 1" ){} - }.to raise_error(ArgumentError, /no COPY/) + it "can handle client errors in #copy_data for input" do + @conn.exec "ROLLBACK" + @conn.transaction do + @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) + expect { + @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| + raise "boom" + end + }.to raise_error(RuntimeError, "boom") + end - expect( @conn ).to still_be_usable - end + expect( @conn ).to still_be_usable + end - it "correctly finishes COPY queries passed to #async_exec" do - @conn.async_exec( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) + it "can handle server errors in #copy_data for input" do + @conn.exec "ROLLBACK" + @conn.transaction do + @conn.exec( "CREATE TEMP TABLE copytable (col1 INT)" ) + expect { + @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| + @conn.put_copy_data "xyz\n" + end + }.to raise_error(PG::Error, /invalid input syntax for .*integer/){|err| expect(err).to have_attributes(connection: @conn) } + end + expect( @conn ).to still_be_usable + end - results = [] - begin - data = @conn.get_copy_data( true ) - if false == data - @conn.block( 2.0 ) - data = @conn.get_copy_data( true ) + it "gracefully handle SQL statements while in #copy_data for input" do + @conn.exec "ROLLBACK" + @conn.transaction do + @conn.exec( "CREATE TEMP TABLE copytable (col1 INT)" ) + expect { + @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| + @conn.exec "SELECT 1" + end + }.to raise_error(PG::Error, /no COPY in progress/){|err| expect(err).to have_attributes(connection: @conn) } end - results << data if data - end until data.nil? + expect( @conn ).to still_be_usable + end - expect( results.size ).to eq( 2 ) - expect( results ).to include( "1\n", "2\n" ) - end + it "gracefully handle SQL statements while in #copy_data for output" do + @conn.exec "ROLLBACK" + @conn.transaction do + expect { + @conn.copy_data( "COPY (VALUES(1), (2)) TO STDOUT" ) do |res| + @conn.exec "SELECT 3" + end + }.to raise_error(PG::Error, /no COPY in progress/){|err| expect(err).to have_attributes(connection: @conn) } + end + expect( @conn ).to still_be_usable + end - it "#copy_data raises error in nonblocking mode" do - @conn.setnonblocking(true) - expect { - @conn.copy_data( "COPY copytable FROM STDOUT" ) - }.to raise_error(PG::NotInBlockingMode) - @conn.setnonblocking(false) + it "should raise an error for non copy statements in #copy_data" do + expect { + @conn.copy_data( "SELECT 1" ){} + }.to raise_error(ArgumentError, /no COPY/) + + expect( @conn ).to still_be_usable + end + + it "#copy_data raises error in nonblocking mode" do + @conn.setnonblocking(true) + expect { + @conn.copy_data( "COPY copytable FROM STDOUT" ) + }.to raise_error(PG::NotInBlockingMode){|err| expect(err).to have_attributes(connection: @conn) } + @conn.setnonblocking(false) + end end it "described_class#block shouldn't block a second thread" do @@ -1223,7 +1350,7 @@ def interrupt_thread(exc=nil) describe "connection information related to SSL" do it "can retrieve connection's ssl state", :postgresql_95 do - expect( @conn.ssl_in_use? ).to be false + expect( @conn.ssl_in_use? ).to be true end it "can retrieve connection's ssl attribute_names", :postgresql_95 do @@ -1232,6 +1359,8 @@ def interrupt_thread(exc=nil) it "can retrieve a single ssl connection attribute", :postgresql_95 do expect( @conn.ssl_attribute('dbname') ).to eq( nil ) + expect( @conn.ssl_attribute('protocol') ).to match( /^TLSv/ ) + expect( @conn.ssl_attribute('key_bits') ).to match( /^\d+$/ ) end it "can retrieve all connection's ssl attributes", :postgresql_95 do @@ -1376,11 +1505,18 @@ def interrupt_thread(exc=nil) expect( result ).to eq( { 'one' => '47' } ) end + it "carries the connection in case of connection errors" do + conn = PG.connect(@conninfo) + expect { + conn.exec("select pg_terminate_backend(pg_backend_pid());") + }.to raise_error(PG::Error, /connection has been closed|terminating connection/i){|err| expect(err).to have_attributes(connection: conn) } + end + it "raises a rescue-able error if #finish is called twice", :without_transaction do conn = PG.connect( @conninfo ) conn.finish - expect { conn.finish }.to raise_error( PG::ConnectionBad, /connection is closed/i ) + expect { conn.finish }.to raise_error( PG::ConnectionBad, /connection is closed/i ){|err| expect(err).to have_attributes(connection: conn) } end it "can use conn.reset to restart the connection" do @@ -1390,7 +1526,8 @@ def interrupt_thread(exc=nil) # Close the two pipe file descriptors, so that the file descriptor of # newly established connection is probably distinct from the previous one. ios.each(&:close) - conn.reset + res = conn.reset + expect( res ).to eq( conn ) # The new connection should work even when the file descriptor has changed. expect( conn.exec("SELECT 1").values ).to eq([["1"]]) @@ -1402,7 +1539,7 @@ def interrupt_thread(exc=nil) io = conn.socket_io conn.finish expect( io ).to be_closed() - expect { conn.socket_io }.to raise_error( PG::ConnectionBad, /connection is closed/i ) + expect { conn.socket_io }.to raise_error( PG::ConnectionBad, /connection is closed/i ){|err| expect(err).to have_attributes(connection: conn) } end it "closes the IO fetched from #socket_io when the connection is reset", :without_transaction do @@ -1414,15 +1551,15 @@ def interrupt_thread(exc=nil) conn.finish end - it "block should raise ConnectionBad for a closed connection" do + it "consume_input should raise ConnectionBad for a closed connection" do serv = TCPServer.new( '127.0.0.1', 54320 ) conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) while [PG::CONNECTION_STARTED, PG::CONNECTION_MADE].include?(conn.connect_poll) sleep 0.1 end serv.close - expect{ conn.block }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/) - expect{ conn.block }.to raise_error(PG::ConnectionBad, /can't get socket descriptor|connection not open/) + expect{ conn.consume_input }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/){|err| expect(err).to have_attributes(connection: conn) } + expect{ conn.consume_input }.to raise_error(PG::ConnectionBad, /can't get socket descriptor|connection not open/){|err| expect(err).to have_attributes(connection: conn) } end it "calls the block supplied to wait_for_notify with the notify payload if it accepts " + @@ -1590,7 +1727,7 @@ def interrupt_thread(exc=nil) it "raises an error when called at the wrong time" do expect { @conn.set_single_row_mode - }.to raise_error(PG::Error) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } end it "should work in single row mode" do @@ -1639,7 +1776,7 @@ def interrupt_thread(exc=nil) res.check first_result ||= res end - end.to raise_error(PG::Error) + end.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } expect( first_result.kind_of?(PG::Result) ).to be_truthy expect( first_result.result_status ).to eq( PG::PGRES_SINGLE_TUPLE ) end @@ -1665,10 +1802,10 @@ def interrupt_thread(exc=nil) end it "raises an error when called with pending results" do - @conn.send_query "select 1" + @conn.send_query_params "select 1", [] expect { @conn.enter_pipeline_mode - }.to raise_error(PG::Error) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } @conn.get_last_result end end @@ -1681,10 +1818,10 @@ def interrupt_thread(exc=nil) it "raises an error when called with pending results" do @conn.enter_pipeline_mode - @conn.send_query "select 1" + @conn.send_query_params "select 1", [] expect { @conn.exit_pipeline_mode - }.to raise_error(PG::Error) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } @conn.pipeline_sync @conn.get_last_result end @@ -1693,7 +1830,7 @@ def interrupt_thread(exc=nil) describe "pipeline_sync" do it "sends a sync message" do @conn.enter_pipeline_mode - @conn.send_query "select 6" + @conn.send_query_params "select 6", [] @conn.pipeline_sync expect( @conn.get_result.result_status ).to eq( PG::PGRES_TUPLES_OK ) expect( @conn.get_result ).to be_nil @@ -1706,14 +1843,14 @@ def interrupt_thread(exc=nil) it "raises an error when not in pipeline mode" do expect { @conn.pipeline_sync - }.to raise_error(PG::Error) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } end end describe "send_flush_request" do it "flushs all results" do @conn.enter_pipeline_mode - @conn.send_query "select 1" + @conn.send_query_params "select 1", [] @conn.send_flush_request @conn.flush expect( @conn.get_result.result_status ).to eq( PG::PGRES_TUPLES_OK ) @@ -1722,17 +1859,17 @@ def interrupt_thread(exc=nil) end it "raises an error when called with pending results" do - @conn.send_query "select 1" + @conn.send_query_params "select 1", [] expect { @conn.send_flush_request - }.to raise_error(PG::Error) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } end end describe "get_last_result" do it "delivers PGRES_PIPELINE_SYNC" do @conn.enter_pipeline_mode - @conn.send_query "select 6" + @conn.send_query_params "select 6", [] @conn.pipeline_sync expect( @conn.get_last_result.values ).to eq( [["6"]] ) expect( @conn.get_last_result.result_status ).to eq( PG::PGRES_PIPELINE_SYNC ) @@ -1741,8 +1878,8 @@ def interrupt_thread(exc=nil) it "raises an error for PGRES_PIPELINE_ABORT" do @conn.enter_pipeline_mode - @conn.send_query("garbage") - @conn.send_query("SELECT 7") + @conn.send_query_params("garbage", []) + @conn.send_query_params("SELECT 7", []) @conn.pipeline_sync begin @conn.get_last_result @@ -1805,10 +1942,10 @@ def interrupt_thread(exc=nil) expect( @conn.internal_encoding ).to eq( Encoding::ASCII_8BIT ) end - it "the connection should use JOHAB dummy encoding when it's set to JOHAB" do + it "the connection should use the BINARY encoding when it's set to JOHAB" do @conn.set_client_encoding "JOHAB" val = @conn.exec("SELECT chr(x'3391'::int)").values[0][0] - expect( val.encoding.name ).to eq( "JOHAB" ) + expect( val.encoding ).to eq( Encoding::BINARY ) expect( val.unpack("H*")[0] ).to eq( "dc65" ) end @@ -1870,25 +2007,11 @@ def interrupt_thread(exc=nil) end it "raises appropriate error if set_client_encoding is called with invalid arguments" do - expect { @conn.set_client_encoding( "invalid" ) }.to raise_error(PG::Error, /invalid value/) + expect { @conn.set_client_encoding( "invalid" ) }.to raise_error(PG::Error, /invalid value/){|err| expect(err).to have_attributes(connection: @conn) } expect { @conn.set_client_encoding( :invalid ) }.to raise_error(TypeError) expect { @conn.set_client_encoding( nil ) }.to raise_error(TypeError) end - it "can use an encoding with high index for client encoding" do - # Allocate a lot of encoding indices, so that MRI's ENCODING_INLINE_MAX is exceeded - unless Encoding.name_list.include?("pgtest-0") - 256.times do |eidx| - Encoding::UTF_8.replicate("pgtest-#{eidx}") - end - end - - # Now allocate the JOHAB encoding with an unusual high index - @conn.set_client_encoding "JOHAB" - val = @conn.exec("SELECT chr(x'3391'::int)").values[0][0] - expect( val.encoding.name ).to eq( "JOHAB" ) - end - end describe "respect and convert character encoding of input strings" do @@ -2166,34 +2289,6 @@ def interrupt_thread(exc=nil) end end - context "OS thread support" do - it "Connection#exec shouldn't block a second thread" do - t = Thread.new do - @conn.async_exec( "select pg_sleep(1)" ) - end - - sleep 0.1 - expect( t ).to be_alive() - t.kill - @conn.cancel - end - - it "Connection.new shouldn't block a second thread" do - serv = nil - t = Thread.new do - serv = TCPServer.new( '127.0.0.1', 54320 ) - expect { - described_class.async_connect( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) - }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/) - end - - sleep 0.5 - expect( t ).to be_alive() - serv.close - t.join - end - end - describe "type casting" do it "should raise an error on invalid param mapping" do expect{ @@ -2210,7 +2305,7 @@ def interrupt_thread(exc=nil) if @conn.server_version < 100000 expect{ @conn.exec_params( "SELECT $1", [5] ) - }.to raise_error(PG::IndeterminateDatatype) + }.to raise_error(PG::IndeterminateDatatype){|err| expect(err).to have_attributes(connection: @conn) } else # PostgreSQL-10 maps to TEXT type (OID 25) expect( @conn.exec_params( "SELECT $1", [5] ).ftype(0)).to eq(25) diff --git a/spec/pg/connection_sync_spec.rb b/spec/pg/connection_sync_spec.rb index 3911ae920..25f819618 100644 --- a/spec/pg/connection_sync_spec.rb +++ b/spec/pg/connection_sync_spec.rb @@ -7,7 +7,7 @@ before :all do @conn.finish PG::Connection.async_api = false - @conn = connect_testing_db + @conn = $pg_server.connect end after :all do @@ -31,6 +31,8 @@ expect( Time.now - start ).to be < 0.9 @conn.cancel + ensure + PG::Connection.async_api = false end it "disables async methods by #async_api" do diff --git a/spec/pg/gc_compact_spec.rb b/spec/pg/gc_compact_spec.rb index 9000a5e2f..d480a1dcf 100644 --- a/spec/pg/gc_compact_spec.rb +++ b/spec/pg/gc_compact_spec.rb @@ -51,11 +51,15 @@ def conv_array(value) CPYENC = PG::TextEncoder::CopyRow.new type_map: TM3 RECENC = PG::TextEncoder::Record.new type_map: TM3 - # Use GC.verify_compaction_references instead of GC.compact . - # This has the advantage that all movable objects are actually moved. - # The downside is that it doubles the heap space of the Ruby process. - # Therefore we call it only once and do several tests afterwards. - GC.verify_compaction_references(toward: :empty, double_heap: true) + begin + # Use GC.verify_compaction_references instead of GC.compact . + # This has the advantage that all movable objects are actually moved. + # The downside is that it doubles the heap space of the Ruby process. + # Therefore we call it only once and do several tests afterwards. + GC.verify_compaction_references(toward: :empty, double_heap: true) + rescue NotImplementedError, NoMethodError => err + skip("GC.compact skipped: #{err}") + end end it "should compact PG::TypeMapByClass #328" do diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index 851feec21..63abad84c 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -210,6 +210,16 @@ expect( @conn.get_result ).to be_nil end + it "can handle commands not returning tuples" do + @conn.send_query( "CREATE TEMP TABLE test_single_row_mode (a int)" ) + @conn.set_single_row_mode + res1 = @conn.get_result + res2 = res1.stream_each_tuple { raise "this shouldn't be called" } + expect( res2 ).to be_equal( res1 ) + expect( @conn.get_result ).to be_nil + @conn.exec( "DROP TABLE test_single_row_mode" ) + end + it "complains when not in single row mode" do @conn.send_query( "SELECT generate_series(2,4)" ) expect{ @@ -231,6 +241,17 @@ @conn.get_result.stream_each_row.to_a }.to raise_error(PG::DivisionByZero) end + + it "raises an error if result number of rows change" do + @conn.send_query( "SELECT 1" ) + @conn.set_single_row_mode + expect{ + @conn.get_result.stream_each_row do + @conn.discard_results + @conn.send_query("SELECT 2,3"); + end + }.to raise_error(PG::InvalidChangeOfResultFields, /from 1 to 2 /) + end end it "inserts nil AS NULL and return NULL as nil" do diff --git a/spec/pg/scheduler_spec.rb b/spec/pg/scheduler_spec.rb index 6fd0357c4..749eb35a1 100644 --- a/spec/pg/scheduler_spec.rb +++ b/spec/pg/scheduler_spec.rb @@ -7,56 +7,6 @@ context "with a Fiber scheduler", :scheduler do - def setup - # Run examples with gated scheduler - sched = Helpers::TcpGateScheduler.new(external_host: 'localhost', external_port: ENV['PGPORT'].to_i, debug: ENV['PG_DEBUG']=='1') - Fiber.set_scheduler(sched) - @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{sched.internal_port}") - - # Run examples with default scheduler - #Fiber.set_scheduler(Helpers::Scheduler.new) - #@conninfo_gate = @conninfo - - # Run examples without scheduler - #def Fiber.schedule; yield; end - #@conninfo_gate = @conninfo - end - - def teardown - Fiber.set_scheduler(nil) - end - - def stop_scheduler - if Fiber.scheduler && Fiber.scheduler.respond_to?(:finish) - Fiber.scheduler.finish - end - end - - def thread_with_timeout(timeout) - th = Thread.new do - yield - end - unless th.join(timeout) - th.kill - $scheduler_timeout = true - raise("scheduler timeout in:\n#{th.backtrace.join("\n")}") - end - end - - def run_with_scheduler(timeout=10) - thread_with_timeout(timeout) do - setup - Fiber.schedule do - conn = PG.connect(@conninfo_gate) - - yield conn - - conn.finish - stop_scheduler - end - end - end - it "connects to a server" do run_with_scheduler do |conn| res = conn.exec_params("SELECT 7", []) @@ -109,12 +59,25 @@ def run_with_scheduler(timeout=10) end end + it "connects using without host but envirinment variables", :postgresql_12, :unix_socket do + run_with_scheduler do + vars = PG::Connection.conninfo_parse(@conninfo_gate).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } + + tmpconn = with_env_vars(PGHOST: "scheduler-localhost", PGPORT: vars[:port], PGDATABASE: vars[:dbname], PGSSLMODE: vars[:sslmode]) do + PG.connect + end + expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) + expect( tmpconn.host ).to eq( "scheduler-localhost" ) + tmpconn.finish + end + end + it "can connect with DNS lookup", :scheduler_address_resolve do run_with_scheduler do conninfo = @conninfo_gate.gsub(/(^| )host=\w+/, " host=scheduler-localhost") conn = PG.connect(conninfo) opt = conn.conninfo.find { |info| info[:keyword] == 'host' } - expect( opt[:val] ).to eq( 'scheduler-localhost' ) + expect( opt[:val] ).to start_with( 'scheduler-localhost' ) conn.finish end end @@ -158,6 +121,18 @@ def run_with_scheduler(timeout=10) end end + it "can use stream_each_* methods" do + run_with_scheduler do |conn| + conn.send_query( "SELECT generate_series(0,999);" ) + conn.set_single_row_mode + + res = conn.get_result + rows = res.stream_each_row.to_a + + expect( rows ).to eq( (0..999).map{ [_1.to_s] } ) + end + end + it "can receive COPY data" do run_with_scheduler do |conn| rows = [] diff --git a/spec/pg_spec.rb b/spec/pg_spec.rb index 8e3f25120..3c9d67ca2 100644 --- a/spec/pg_spec.rb +++ b/spec/pg_spec.rb @@ -53,6 +53,10 @@ ]) end + it "can be used to raise errors without text" do + expect{ raise PG::InvalidTextRepresentation }.to raise_error(PG::InvalidTextRepresentation) + end + it "tells about the libpq library path" do expect( PG::POSTGRESQL_LIB_PATH ).to include("/") end @@ -62,4 +66,14 @@ expect( c ).to be_a_kind_of( PG::Connection ) c.close end + + it "can #connect with block" do + bres = PG.connect(@conninfo) do |c| + res = c.exec "SELECT 5" + expect( res.values ).to eq( [["5"]] ) + 55 + end + + expect( bres ).to eq( 55 ) + end end