PostgreSQL Database Management System
=====================================
-
+
This directory contains the source code distribution of the PostgreSQL
database management system.
dnl @synopsis AC_FUNC_ACCEPT_ARGTYPES
dnl
dnl Checks the data types of the three arguments to accept(). Results are
-dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123],
+dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123],
dnl consistent with the following example:
dnl
dnl #define ACCEPT_TYPE_RETURN int
# which is *not* 'socklen_t *'). If we detect that, then we assume
# 'int' as the result, because that ought to work best.
#
-# On Win32, accept() returns 'unsigned int PASCAL'
+# On Win32, accept() returns 'unsigned int PASCAL'
AC_DEFUN([AC_FUNC_ACCEPT_ARGTYPES],
[AC_MSG_CHECKING([types of arguments for accept()])
dnl that by making the help string look the same, which is why we need to
dnl save the default that was passed in previously.
m4_define([_pgac_helpdefault], m4_ifdef([pgac_defined_$1_$2_bool], [m4_defn([pgac_defined_$1_$2_bool])], [$3]))dnl
-PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6],
+PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6],
[AC_MSG_ERROR([no argument expected for --$1-$2 option])],
[m4_case([$3],
yes, [pgac_arg_to_variable([$1], [$2])=yes
#
PGAC_ARG_BOOL(enable, dtrace, no,
[build with DTrace support],
-[AC_DEFINE([ENABLE_DTRACE], 1,
+[AC_DEFINE([ENABLE_DTRACE], 1,
[Define to 1 to enable DTrace support. (--enable-dtrace)])
AC_CHECK_PROGS(DTRACE, dtrace)
if test -z "$DTRACE"; then
can set it bigger if you need bigger tuples (although TOAST should
reduce the need to have large tuples, since fields can be spread
across multiple tuples).
-
+
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ
is currently 2^15 (32768). This is determined by the 15-bit widths
of the lp_off and lp_len fields in ItemIdData (see
include/storage/itemid.h).
-
+
Changing BLCKSZ requires an initdb.
-])
+])
#
# Relation segment size
RELSEG_SIZE is the maximum number of blocks allowed in one disk file.
Thus, the maximum size of a single file is RELSEG_SIZE * BLCKSZ;
relations bigger than that are divided into multiple files.
-
+
RELSEG_SIZE * BLCKSZ must be less than your OS' limit on file size.
This is often 2 GB or 4GB in a 32-bit operating system, unless you
have large file support enabled. By default, we make the limit 1 GB
buffers, else direct I/O may fail.
Changing XLOG_BLCKSZ requires an initdb.
-])
+])
#
# WAL segment size
# enable profiling if --enable-profiling
if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then
if test "$GCC" = yes; then
- AC_DEFINE([PROFILE_PID_DIR], 1,
+ AC_DEFINE([PROFILE_PID_DIR], 1,
[Define to 1 to allow profiling output to be saved separately for each process.])
CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS"
else
AC_MSG_CHECKING(for krb5_free_unparsed_name)
AC_TRY_LINK([#include <krb5.h>],
[krb5_free_unparsed_name(NULL,NULL);],
- [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name])
+ [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)])
fi
AC_CHECK_SIZEOF([off_t])
# If we don't have largefile support, can't handle segsize >= 2GB.
-if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then
- AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.])
+if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then
+ AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.])
fi
#
# To properly translate all NLS languages strings, we must support the
# *printf() %$ format, which allows *printf() arguments to be selected
- # by position in the translated string.
- #
+ # by position in the translated string.
+ #
# libintl versions < 0.13 use the native *printf() functions, and Win32
# *printf() doesn't understand %$, so we must use our /port versions,
# which do understand %$. libintl versions >= 0.13 include their own
AC_CHECK_SIZEOF([long])
# Decide whether float4 is passed by value: user-selectable, enabled by default
-AC_MSG_CHECKING([whether to build with float4 passed by value])
+AC_MSG_CHECKING([whether to build with float4 passed by value])
PGAC_ARG_BOOL(enable, float4-byval, yes, [disable float4 passed by value],
[AC_DEFINE([USE_FLOAT4_BYVAL], 1,
[Define to 1 if you want float4 values to be passed by value. (--enable-float4-byval)])
if test "$PORTNAME" = "win32"; then
AC_CONFIG_COMMANDS([check_win32_symlinks],[
-# Links sometimes fail undetected on Mingw -
+# Links sometimes fail undetected on Mingw -
# so here we detect it and warn the user
for FILE in $CONFIG_LINKS
do
lo -
Large Object maintenance
ltree -
Tree-like data structures
-oid2name -
+oid2name -
Maps numeric files to table names
Functions to get information about SSL certificates
-start-scripts -
+start-scripts -
Scripts for starting the server at boot time on various platforms.
tablefunc -
# contrib/btree_gin/Makefile
MODULE_big = btree_gin
-OBJS = btree_gin.o
+OBJS = btree_gin.o
DATA_built = btree_gin.sql
DATA = uninstall_btree_gin.sql
CREATE TABLE test_cidr (
i cidr
);
-INSERT INTO test_cidr VALUES
+INSERT INTO test_cidr VALUES
( '1.2.3.4' ),
( '1.2.4.4' ),
( '1.2.5.4' ),
CREATE TABLE test_date (
i date
);
-INSERT INTO test_date VALUES
+INSERT INTO test_date VALUES
( '2004-10-23' ),
( '2004-10-24' ),
( '2004-10-25' ),
CREATE TABLE test_inet (
i inet
);
-INSERT INTO test_inet VALUES
+INSERT INTO test_inet VALUES
( '1.2.3.4/16' ),
( '1.2.4.4/16' ),
( '1.2.5.4/16' ),
CREATE TABLE test_interval (
i interval
);
-INSERT INTO test_interval VALUES
+INSERT INTO test_interval VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
CREATE TABLE test_macaddr (
i macaddr
);
-INSERT INTO test_macaddr VALUES
+INSERT INTO test_macaddr VALUES
( '22:00:5c:03:55:08' ),
( '22:00:5c:04:55:08' ),
( '22:00:5c:05:55:08' ),
CREATE TABLE test_time (
i time
);
-INSERT INTO test_time VALUES
+INSERT INTO test_time VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
CREATE TABLE test_timestamp (
i timestamp
);
-INSERT INTO test_timestamp VALUES
+INSERT INTO test_timestamp VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
CREATE TABLE test_timestamptz (
i timestamptz
);
-INSERT INTO test_timestamptz VALUES
+INSERT INTO test_timestamptz VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
CREATE TABLE test_timetz (
i timetz
);
-INSERT INTO test_timetz VALUES
+INSERT INTO test_timetz VALUES
( '03:55:08 GMT+2' ),
( '04:55:08 GMT+2' ),
( '05:55:08 GMT+2' ),
i cidr
);
-INSERT INTO test_cidr VALUES
+INSERT INTO test_cidr VALUES
( '1.2.3.4' ),
( '1.2.4.4' ),
( '1.2.5.4' ),
i date
);
-INSERT INTO test_date VALUES
+INSERT INTO test_date VALUES
( '2004-10-23' ),
( '2004-10-24' ),
( '2004-10-25' ),
i inet
);
-INSERT INTO test_inet VALUES
+INSERT INTO test_inet VALUES
( '1.2.3.4/16' ),
( '1.2.4.4/16' ),
( '1.2.5.4/16' ),
i interval
);
-INSERT INTO test_interval VALUES
+INSERT INTO test_interval VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
i macaddr
);
-INSERT INTO test_macaddr VALUES
+INSERT INTO test_macaddr VALUES
( '22:00:5c:03:55:08' ),
( '22:00:5c:04:55:08' ),
( '22:00:5c:05:55:08' ),
i time
);
-INSERT INTO test_time VALUES
+INSERT INTO test_time VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
i timestamp
);
-INSERT INTO test_timestamp VALUES
+INSERT INTO test_timestamp VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
i timestamptz
);
-INSERT INTO test_timestamptz VALUES
+INSERT INTO test_timestamptz VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
i timetz
);
-INSERT INTO test_timetz VALUES
+INSERT INTO test_timetz VALUES
( '03:55:08 GMT+2' ),
( '04:55:08 GMT+2' ),
( '05:55:08 GMT+2' ),
-- Create the operator class
CREATE OPERATOR CLASS gist_oid_ops
-DEFAULT FOR TYPE oid USING gist
+DEFAULT FOR TYPE oid USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int2_ops
-DEFAULT FOR TYPE int2 USING gist
+DEFAULT FOR TYPE int2 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int4_ops
-DEFAULT FOR TYPE int4 USING gist
+DEFAULT FOR TYPE int4 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int8_ops
-DEFAULT FOR TYPE int8 USING gist
+DEFAULT FOR TYPE int8 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_float4_ops
-DEFAULT FOR TYPE float4 USING gist
+DEFAULT FOR TYPE float4 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_float8_ops
-DEFAULT FOR TYPE float8 USING gist
+DEFAULT FOR TYPE float8 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- timestamp ops
---
+--
--
--
RETURNS bool
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_compress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_union(bytea, internal)
RETURNS gbtreekey16
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_timestamp_ops
-DEFAULT FOR TYPE timestamp USING gist
+DEFAULT FOR TYPE timestamp USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_timestamptz_ops
-DEFAULT FOR TYPE timestamptz USING gist
+DEFAULT FOR TYPE timestamptz USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- time ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_time_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_time_union(bytea, internal)
RETURNS gbtreekey16
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_time_ops
-DEFAULT FOR TYPE time USING gist
+DEFAULT FOR TYPE time USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
STORAGE gbtreekey16;
CREATE OPERATOR CLASS gist_timetz_ops
-DEFAULT FOR TYPE timetz USING gist
+DEFAULT FOR TYPE timetz USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- date ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_date_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_date_union(bytea, internal)
RETURNS gbtreekey8
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_date_ops
-DEFAULT FOR TYPE date USING gist
+DEFAULT FOR TYPE date USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- interval ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_intv_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_intv_union(bytea, internal)
RETURNS gbtreekey32
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_interval_ops
-DEFAULT FOR TYPE interval USING gist
+DEFAULT FOR TYPE interval USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_cash_ops
-DEFAULT FOR TYPE money USING gist
+DEFAULT FOR TYPE money USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_macaddr_ops
-DEFAULT FOR TYPE macaddr USING gist
+DEFAULT FOR TYPE macaddr USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_text_ops
-DEFAULT FOR TYPE text USING gist
+DEFAULT FOR TYPE text USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_text_penalty (internal, internal, internal),
FUNCTION 6 gbt_text_picksplit (internal, internal),
FUNCTION 7 gbt_text_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
---- Create the operator class
CREATE OPERATOR CLASS gist_bpchar_ops
-DEFAULT FOR TYPE bpchar USING gist
+DEFAULT FOR TYPE bpchar USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_text_penalty (internal, internal, internal),
FUNCTION 6 gbt_text_picksplit (internal, internal),
FUNCTION 7 gbt_text_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_bytea_ops
-DEFAULT FOR TYPE bytea USING gist
+DEFAULT FOR TYPE bytea USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bytea_penalty (internal, internal, internal),
FUNCTION 6 gbt_bytea_picksplit (internal, internal),
FUNCTION 7 gbt_bytea_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
--
-- Create the operator class
CREATE OPERATOR CLASS gist_numeric_ops
-DEFAULT FOR TYPE numeric USING gist
+DEFAULT FOR TYPE numeric USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_numeric_penalty (internal, internal, internal),
FUNCTION 6 gbt_numeric_picksplit (internal, internal),
FUNCTION 7 gbt_numeric_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
--
--
-- Create the operator class
CREATE OPERATOR CLASS gist_bit_ops
-DEFAULT FOR TYPE bit USING gist
+DEFAULT FOR TYPE bit USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bit_penalty (internal, internal, internal),
FUNCTION 6 gbt_bit_picksplit (internal, internal),
FUNCTION 7 gbt_bit_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_vbit_ops
-DEFAULT FOR TYPE varbit USING gist
+DEFAULT FOR TYPE varbit USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bit_penalty (internal, internal, internal),
FUNCTION 6 gbt_bit_picksplit (internal, internal),
FUNCTION 7 gbt_bit_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_inet_ops
-DEFAULT FOR TYPE inet USING gist
+DEFAULT FOR TYPE inet USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_cidr_ops
-DEFAULT FOR TYPE cidr USING gist
+DEFAULT FOR TYPE cidr USING gist
AS
OPERATOR 1 < (inet, inet) ,
OPERATOR 2 <= (inet, inet) ,
OPERATOR 3 = (inet, inet) ,
OPERATOR 4 >= (inet, inet) ,
OPERATOR 5 > (inet, inet) ,
- OPERATOR 6 <> (inet, inet) ,
+ OPERATOR 6 <> (inet, inet) ,
FUNCTION 1 gbt_inet_consistent (internal, inet, int2, oid, internal),
FUNCTION 2 gbt_inet_union (bytea, internal),
FUNCTION 3 gbt_inet_compress (internal),
DROP FUNCTION gbt_intv_same(internal, internal, internal);
DROP FUNCTION gbt_intv_union(bytea, internal);
-
+
DROP FUNCTION gbt_intv_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_intv_penalty(internal,internal,internal);
DROP FUNCTION gbt_intv_decompress(internal);
DROP FUNCTION gbt_date_same(internal, internal, internal);
DROP FUNCTION gbt_date_union(bytea, internal);
-
+
DROP FUNCTION gbt_date_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_date_penalty(internal,internal,internal);
DROP FUNCTION gbt_date_compress(internal);
DROP FUNCTION gbt_time_same(internal, internal, internal);
DROP FUNCTION gbt_time_union(bytea, internal);
-
+
DROP FUNCTION gbt_time_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_time_penalty(internal,internal,internal);
DROP FUNCTION gbt_timetz_compress(internal);
DROP FUNCTION gbt_ts_same(internal, internal, internal);
DROP FUNCTION gbt_ts_union(bytea, internal);
-
+
DROP FUNCTION gbt_ts_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_ts_penalty(internal,internal,internal);
DROP FUNCTION gbt_tstz_compress(internal);
DROP FUNCTION gbt_ts_compress(internal);
-
+
DROP FUNCTION gbt_tstz_consistent(internal,timestamptz,int2,oid,internal);
DROP FUNCTION gbt_ts_consistent(internal,timestamp,int2,oid,internal);
);
--
--- Matching citext to text.
+-- Matching citext to text.
--
CREATE OR REPLACE FUNCTION texticlike(citext, text)
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
Update the calling convention for all external facing functions. By external
facing, I mean all functions that are directly referenced in cube.sql. Prior
-to my update, all functions used the older V0 calling convention. They now
+to my update, all functions used the older V0 calling convention. They now
use V1.
-New Functions:
+New Functions:
cube(float[]), which makes a zero volume cube from a float array
SET search_path = public;
-- Create the user-defined type for N-dimensional boxes
---
+--
CREATE OR REPLACE FUNCTION cube_in(cstring)
RETURNS cube
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_compress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_decompress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_union(internal, internal)
-RETURNS cube
+RETURNS cube
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_same(cube, cube, internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
O_BRACKET paren_list COMMA paren_list C_BRACKET {
int dim;
-
+
dim = delim_count($2, ',') + 1;
if ( (delim_count($4, ',') + 1) != dim ) {
ereport(ERROR,
CUBE_MAX_DIM)));
YYABORT;
}
-
+
*((void **)result) = write_box( dim, $2, $4 );
-
+
}
|
paren_list COMMA paren_list {
int dim;
dim = delim_count($1, ',') + 1;
-
+
if ( (delim_count($3, ',') + 1) != dim ) {
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
CUBE_MAX_DIM)));
YYABORT;
}
-
+
*((void **)result) = write_box( dim, $1, $3 );
}
|
$$ = palloc(scanbuflen + 1);
strcpy($$, $1);
}
- |
+ |
list COMMA CUBEFLOAT {
$$ = $1;
strcat($$, ",");
return (ndelim);
}
-static NDBOX *
+static NDBOX *
write_box(unsigned int dim, char *str1, char *str2)
{
NDBOX * bp;
char * s;
- int i;
+ int i;
int size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
-
+
bp = palloc0(size);
SET_VARSIZE(bp, size);
bp->dim = dim;
-
+
s = str1;
bp->x[i=0] = strtod(s, NULL);
while ((s = strchr(s, ',')) != NULL) {
s++; i++;
bp->x[i] = strtod(s, NULL);
- }
-
+ }
+
s = str2;
bp->x[i=dim] = strtod(s, NULL);
while ((s = strchr(s, ',')) != NULL) {
s++; i++;
bp->x[i] = strtod(s, NULL);
- }
+ }
return(bp);
}
int i, size;
double x;
char * s = str;
-
+
size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
bp = palloc0(size);
SET_VARSIZE(bp, size);
bp->dim = dim;
-
+
i = 0;
x = strtod(s, NULL);
bp->x[0] = x;
x = strtod(s, NULL);
bp->x[i] = x;
bp->x[i+dim] = x;
- }
+ }
return(bp);
}
%{
-/*
-** A scanner for EMP-style numeric ranges
+/*
+ * A scanner for EMP-style numeric ranges
* contrib/cube/cubescan.l
-*/
+ */
#include "postgres.h"
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
SELECT cube('{0,1,2}'::float[], '{3}'::float[]);
SELECT cube(NULL::float[], '{3}'::float[]);
SELECT cube('{0,1,2}'::float[]);
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
SHLIB_LINK = $(libpq)
SHLIB_PREREQS = submake-libpq
-DATA_built = dblink.sql
-DATA = uninstall_dblink.sql
+DATA_built = dblink.sql
+DATA = uninstall_dblink.sql
REGRESS = dblink
OUT notify_name TEXT,
OUT be_pid INT4,
OUT extra TEXT
-)
+)
RETURNS setof record
AS 'MODULE_PATHNAME', 'dblink_get_notify'
LANGUAGE C STRICT;
OUT notify_name TEXT,
OUT be_pid INT4,
OUT extra TEXT
-)
+)
RETURNS setof record
AS 'MODULE_PATHNAME', 'dblink_get_notify'
LANGUAGE C STRICT;
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
t1
----
-- test asynchronous queries
SELECT dblink_connect('dtest1', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
SELECT dblink_connect('dtest2', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1;
SELECT dblink_connect('dtest3', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1;
CREATE TEMPORARY TABLE result AS
SELECT * from result;
SELECT dblink_connect('dtest1', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
SELECT dblink_cancel_query('dtest1');
CONSTRAINT on_surface check(abs(cube_distance(value, '(0)'::cube) /
earth() - 1) < '10e-7'::float8);
-CREATE OR REPLACE FUNCTION sec_to_gc(float8)
+CREATE OR REPLACE FUNCTION sec_to_gc(float8)
RETURNS float8
LANGUAGE SQL
IMMUTABLE STRICT
LANGUAGE SQL
IMMUTABLE STRICT
AS 'SELECT cube_enlarge($1, gc_to_sec($2), 3)';
-
+
--------------- geo_distance
CREATE OR REPLACE FUNCTION geo_distance (point, point)
AS 'MODULE_PATHNAME', 'difference'
LANGUAGE C IMMUTABLE STRICT;
-CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text
+CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text
AS 'MODULE_PATHNAME', 'dmetaphone'
LANGUAGE C IMMUTABLE STRICT;
-CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text
+CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text
AS 'MODULE_PATHNAME', 'dmetaphone_alt'
LANGUAGE C IMMUTABLE STRICT;
f
(1 row)
--- delete
+-- delete
select delete('a=>1 , b=>2, c=>3'::hstore, 'a');
delete
--------------------
select hstore 'a=>NULL, b=>qq' ?& ARRAY['c','d'];
select hstore 'a=>NULL, b=>qq' ?& '{}'::text[];
--- delete
+-- delete
select delete('a=>1 , b=>2, c=>3'::hstore, 'a');
select delete('a=>null , b=>2, c=>3'::hstore, 'a');
# contrib/intarray/Makefile
MODULE_big = _int
-OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o
+OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o
DATA_built = _int.sql
DATA = uninstall__int.sql
REGRESS = _int
-#!/usr/bin/perl
+#!/usr/bin/perl
use strict;
# make sure we are in a sane environment.
print <<EOT;
Usage:
$0 -d DATABASE -s SECTIONS [-b NUMBER] [-v] [-e] [-o] [-r] [-a] [-u]
--d DATABASE -DATABASE
--b NUMBER -number of repeats
--s SECTIONS -sections, format sid1[,sid2[,sid3[...]]]]
--v -verbose (show SQL)
+-d DATABASE -DATABASE
+-b NUMBER -number of repeats
+-s SECTIONS -sections, format sid1[,sid2[,sid3[...]]]]
+-v -verbose (show SQL)
-e -show explain
-r -use RD-tree index
-a -AND section
-o -show output
-u -unique
--c -count
+-c -count
EOT
exit;
foreach ( @a ) {
print "$_->{mid}\t$_->{sections}\n";
}
-}
+}
print sprintf("total: %.02f sec; number: %d; for one: %.03f sec; found %d docs\n", $elapsed, $b, $elapsed/$b, $count+1 );
$dbi -> disconnect;
sub exec_sql {
my ($dbi, $sql, @keys) = @_;
my $sth=$dbi->prepare($sql) || die;
- $sth->execute( @keys ) || die;
- my $r;
+ $sth->execute( @keys ) || die;
+ my $r;
my @row;
while ( defined ( $r=$sth->fetchrow_hashref ) ) {
push @row, $r;
- }
- $sth->finish;
+ }
+ $sth->finish;
return @row;
}
sections int[]
);
create table message_section_map (
- mid int not null,
+ mid int not null,
sid int not null
);
sub copytable {
my $t = shift;
-
+
print "COPY $t from stdin;\n";
open( FFF, "$t.tmp") || die;
while(<FFF>) { print; }
* For ISBN with prefix 978
* Range Table as of 2010-Jul-29
*/
-
+
/* where the digit set begins, and how many of them are in the table */
const unsigned ISBN_index[10][2] = {
{0, 6},
RETURNS ltree_gist
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
-
+
CREATE OR REPLACE FUNCTION ltree_gist_out(ltree_gist)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
-
+
CREATE TYPE ltree_gist (
internallength = -1,
input = ltree_gist_in,
output = ltree_gist_out,
storage = plain
-);
+);
CREATE OR REPLACE FUNCTION ltree_consistent(internal,internal,int2,oid,internal)
DROP FUNCTION ltree_consistent(internal,internal,int2,oid,internal);
DROP TYPE ltree_gist CASCADE;
-
+
DROP OPERATOR ^@ (ltxtquery, ltree);
DROP OPERATOR ^@ (ltree, ltxtquery);
# contrib/pg_buffercache/Makefile
MODULE_big = pg_buffercache
-OBJS = pg_buffercache_pages.o
+OBJS = pg_buffercache_pages.o
-DATA_built = pg_buffercache.sql
-DATA = uninstall_pg_buffercache.sql
+DATA_built = pg_buffercache.sql
+DATA = uninstall_pg_buffercache.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
-- Create a view for convenient access.
CREATE VIEW pg_buffercache AS
SELECT P.* FROM pg_buffercache_pages() AS P
- (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid,
+ (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid,
relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2);
-
+
-- Don't want these to be available at public.
REVOKE ALL ON FUNCTION pg_buffercache_pages() FROM PUBLIC;
REVOKE ALL ON pg_buffercache FROM PUBLIC;
# contrib/pg_freespacemap/Makefile
MODULE_big = pg_freespacemap
-OBJS = pg_freespacemap.o
+OBJS = pg_freespacemap.o
-DATA_built = pg_freespacemap.sql
-DATA = uninstall_pg_freespacemap.sql
+DATA_built = pg_freespacemap.sql
+DATA = uninstall_pg_freespacemap.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
RETURNS bool
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gtrgm_compress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
DROP FUNCTION gtrgm_decompress(internal);
DROP FUNCTION gtrgm_compress(internal);
-
+
DROP FUNCTION gtrgm_consistent(internal,text,int,oid,internal);
DROP TYPE gtrgm CASCADE;
of time. If you have too much data, you may have to buy more storage
since you need enough room to hold the original data plus the exported
data. pg_upgrade can reduce the amount of time and disk space required
-for many upgrades.
+for many upgrades.
The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a
presentation about pg_upgrade internals that mirrors the text
b) For pre-9.0, remove 'regex_flavor'
f) For pre-9.0, adjust extra_float_digits
- Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0
- databases, and extra_float_digits=-3 for >= 9.0 databases.
- It is necessary to modify 9.0 pg_dump to always use -3, and
- modify the pre-9.0 old server to accept extra_float_digits=-3.
+ Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0
+ databases, and extra_float_digits=-3 for >= 9.0 databases.
+ It is necessary to modify 9.0 pg_dump to always use -3, and
+ modify the pre-9.0 old server to accept extra_float_digits=-3.
Once the dump is created, it can be repeatedly loaded into the old
database, upgraded, and dumped out of the new database, and then
3) Create the regression database in the old server.
-4) Load the dump file created above into the regression database;
+4) Load the dump file created above into the regression database;
check for errors while loading.
5) Upgrade the old database to the new major version, as outlined in
namelist[fileno]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '_'));
-
+
unlink(new_file);
transfer_relfile(pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
3ea6357a0ee7fad6d0c4b63464f2aafa40c2e91b4b7e1bba8114932fd92b5c8f111e7e50e7b2e541
(1 row)
--- blowfish-448
+-- blowfish-448
SELECT encode(encrypt(
decode('fedcba9876543210', 'hex'),
decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'),
-- result: c04504012e4e1f53
-- empty data
-select encode( encrypt('', 'foo', 'bf'), 'hex');
+select encode(encrypt('', 'foo', 'bf'), 'hex');
encode
------------------
1871949bb2311c8e
(1 row)
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789', 'bf'), 'hex');
encode
------------------
42f58af3b2c03f46
(1 row)
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
encode
------------------
86ab6f0bc72b5f22
INSERT INTO ctest VALUES ('password', '', '');
UPDATE ctest SET salt = gen_salt('bf', 8);
UPDATE ctest SET res = crypt(data, salt);
-SELECT res = crypt(data, res) AS "worked"
+SELECT res = crypt(data, res) AS "worked"
FROM ctest;
worked
--------
(1 row)
-- empty data
-select encode( encrypt('', 'foo', 'aes'), 'hex');
+select encode(encrypt('', 'foo', 'aes'), 'hex');
encode
----------------------------------
b48cc3338a2eb293b6007ef72c360d48
(1 row)
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789', 'aes'), 'hex');
encode
----------------------------------
f397f03d2819b7172b68d0706fda4693
(1 row)
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
encode
----------------------------------
5c9db77af02b4678117bcd8a71ae7f53
(1 row)
select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'),
- '0123456', 'abcd', 'aes');
+ '0123456', 'abcd', 'aes');
decrypt_iv
------------
foo
};
static const u4byte rco_tab[10] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
0x00000020, 0x00000040, 0x00000080, 0x0000001b, 0x00000036
};
decode('37363534333231204e6f77206973207468652074696d6520666f722000', 'hex'),
'bf-cbc'), 'hex');
--- blowfish-448
+-- blowfish-448
SELECT encode(encrypt(
decode('fedcba9876543210', 'hex'),
decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'),
-- result: c04504012e4e1f53
-- empty data
-select encode( encrypt('', 'foo', 'bf'), 'hex');
+select encode(encrypt('', 'foo', 'bf'), 'hex');
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789', 'bf'), 'hex');
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
-- decrypt
select decrypt(encrypt('foo', '0123456', 'bf'), '0123456', 'bf');
UPDATE ctest SET salt = gen_salt('bf', 8);
UPDATE ctest SET res = crypt(data, salt);
-SELECT res = crypt(data, res) AS "worked"
+SELECT res = crypt(data, res) AS "worked"
FROM ctest;
DROP TABLE ctest;
'aes-cbc'), 'hex');
-- empty data
-select encode( encrypt('', 'foo', 'aes'), 'hex');
+select encode(encrypt('', 'foo', 'aes'), 'hex');
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789', 'aes'), 'hex');
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
-- decrypt
select decrypt(encrypt('foo', '0123456', 'aes'), '0123456', 'aes');
-- iv
select encode(encrypt_iv('foo', '0123456', 'abcd', 'aes'), 'hex');
select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'),
- '0123456', 'abcd', 'aes');
+ '0123456', 'abcd', 'aes');
-- long message
select encode(encrypt('Lets try a longer message.', '0123456789', 'aes'), 'hex');
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
143
(1 row)
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
s
-----------------
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
143
(1 row)
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
s
-----------------
SET search_path = public;
-- Create the user-defined type for 1-D floating point intervals (seg)
---
+--
CREATE OR REPLACE FUNCTION seg_in(cstring)
RETURNS seg
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_compress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_decompress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_union(internal, internal)
-RETURNS seg
+RETURNS seg
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_same(seg, seg, internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
FUNCTION 1 seg_cmp(seg, seg);
CREATE OPERATOR CLASS gist_seg_ops
-DEFAULT FOR TYPE seg USING gist
+DEFAULT FOR TYPE seg USING gist
AS
OPERATOR 1 << ,
OPERATOR 2 &< ,
%{
#define YYPARSE_PARAM result /* need this to pass a pointer (void *) to yyparse */
-
+
#include "postgres.h"
#include <math.h>
extern int seg_yylex(void);
extern int significant_digits(char *str); /* defined in seg.c */
-
+
void seg_yyerror(const char *message);
int seg_yyparse(void *result);
$$.sigd = significant_digits($1);
$$.val = val;
}
- |
+ |
EXTENSION SEGFLOAT {
/* temp variable avoids a gcc 3.3.x bug on Sparc64 */
float val = seg_atof($2);
%{
-/*
-** A scanner for EMP-style numeric ranges
-*/
+/*
+ * A scanner for EMP-style numeric ranges
+ */
#include "postgres.h"
push @rows, $_;
}
-foreach ( sort {
+foreach ( sort {
@ar = split("\t", $a);
$valA = pop @ar;
$valA =~ s/[~<> ]+//g;
SELECT '1'::seg <@ '-1 .. 1'::seg AS bool;
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
-- Test functions
idesc text
);
-CREATE TRIGGER ids_nextid
+CREATE TRIGGER ids_nextid
BEFORE INSERT OR UPDATE ON ids
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE autoinc (id, next_id);
INSERT INTO ids VALUES (0, 'first (-2 ?)');
SELECT * FROM ids;
-UPDATE ids SET id = null, idesc = 'first: -2 --> 2'
+UPDATE ids SET id = null, idesc = 'first: -2 --> 2'
WHERE idesc = 'first (-2 ?)';
-UPDATE ids SET id = 0, idesc = 'second: -1 --> 3'
+UPDATE ids SET id = 0, idesc = 'second: -1 --> 3'
WHERE id = -1;
-UPDATE ids SET id = 4, idesc = 'third: 1 --> 4'
+UPDATE ids SET id = 4, idesc = 'third: 1 --> 4'
WHERE id = 1;
SELECT * FROM ids;
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION autoinc()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION autoinc()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
CREATE TRIGGER insert_usernames
BEFORE INSERT OR UPDATE ON username_test
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE insert_username (username);
INSERT INTO username_test VALUES ('nothing');
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION insert_username()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION insert_username()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
CREATE TRIGGER mdt_moddatetime
BEFORE UPDATE ON mdt
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE moddatetime (moddate);
INSERT INTO mdt VALUES (1, 'first');
--Trigger for table A:
CREATE TRIGGER AT BEFORE DELETE OR UPDATE ON A FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_foreign_key (2, 'cascade', 'ID', 'B', 'REFB', 'C', 'REFC');
/*
2 - means that check must be performed for foreign keys of 2 tables.
-cascade - defines that corresponding keys must be deleted.
+cascade - defines that corresponding keys must be deleted.
ID - name of primary key column in triggered table (A). You may
use as many columns as you need.
B - name of (first) table with foreign keys.
--Trigger for table B:
CREATE TRIGGER BT BEFORE INSERT OR UPDATE ON B FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_primary_key ('REFB', 'A', 'ID');
/*
-REFB - name of foreign key column in triggered (B) table. You may use as
+REFB - name of foreign key column in triggered (B) table. You may use as
many columns as you need, but number of key columns in referenced
table must be the same.
A - referenced table name.
--Trigger for table C:
CREATE TRIGGER CT BEFORE INSERT OR UPDATE ON C FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_primary_key ('REFC', 'A', 'ID');
-- Now try
drop table tttest;
create table tttest (
- price_id int4,
- price_val int4,
+ price_id int4,
+ price_val int4,
price_on abstime,
price_off abstime
);
alter table tttest add column q2 int;
alter table tttest drop column q1;
-create trigger timetravel
+create trigger timetravel
before insert or delete or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
timetravel (price_on, price_off);
insert into tttest values (1, 1, null, null);
insert into tttest(price_id, price_val) values (2, 2);
insert into tttest(price_id, price_val,price_off) values (3, 3, 'infinity');
-insert into tttest(price_id, price_val,price_off) values (4, 4,
+insert into tttest(price_id, price_val,price_off) values (4, 4,
abstime('now'::timestamp - '100 days'::interval));
insert into tttest(price_id, price_val,price_on) values (3, 3, 'infinity'); -- duplicate key
select get_timetravel('tttest'); -- check status
-- we want to correct some date
-update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and
+update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and
price_off <> 'infinity';
-- but this doesn't work
select get_timetravel('tttest'); -- check status
-update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and
+update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and
price_off <> 'infinity';
select * from tttest;
-- isn't it what we need ?
-- get price for price_id == 5 as it was '10-Jan-1990'
-select * from tttest where price_id = 5 and
+select * from tttest where price_id = 5 and
price_on <= '10-Jan-1990' and price_off > '10-Jan-1990';
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION timetravel()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION timetravel()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
-CREATE OR REPLACE FUNCTION set_timetravel(name, int4)
-RETURNS int4
+CREATE OR REPLACE FUNCTION set_timetravel(name, int4)
+RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE C RETURNS NULL ON NULL INPUT;
-CREATE OR REPLACE FUNCTION get_timetravel(name)
-RETURNS int4
+CREATE OR REPLACE FUNCTION get_timetravel(name)
+RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE C RETURNS NULL ON NULL INPUT;
#
# Created by David Wheeler, 2002.
-# modified by Ray Aspeitia 12-03-2003 :
+# modified by Ray Aspeitia 12-03-2003 :
# added log rotation script to db startup
-# modified StartupParameters.plist "Provides" parameter to make it easier to
+# modified StartupParameters.plist "Provides" parameter to make it easier to
# start and stop with the SystemStarter utitlity
# use the below command in order to correctly start/stop/restart PG with log rotation script:
'star'
(1 row)
-SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
to_tsquery('testcfg', 'stars'));
ts_headline
-----------------------------------------------------------------
SELECT to_tsquery('testcfg', 'star');
-SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
to_tsquery('testcfg', 'stars'));
53
(1 row)
-select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
to_tsquery
---------------
'qwe' & 'sky'
(1 row)
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
to_tsquery
-----------------
'qwe' & 'skies'
The granite features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The <b>granite</b> features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
headline
-----------------------------------------------------------------------------
53
(1 row)
-select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
to_tsquery
---------------
'qwe' & 'sky'
(1 row)
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
to_tsquery
-----------------
'qwe' & 'skies'
The granite features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The <b>granite</b> features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
headline
-----------------------------------------------------------------------------
<i <b> wow < jqw <> qwerty'));
-select to_tsquery('english', 'qwe & sKies ');
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
select to_tsquery('english', '''the wether'':dc & '' sKies '':BC ');
select to_tsquery('english', 'asd&(and|fghj)');
select to_tsquery('english', '(asd&and)|fghj');
The sculpture of these granite seams,
Upon a woman s face. E. J. Pratt (1882 1964)
', to_tsquery('sea&thousand&years'));
-
+
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The sculpture of these granite seams,
Upon a woman s face. E. J. Pratt (1882 1964)
', to_tsquery('granite&sea'));
-
+
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
--check debug
select * from public.ts_debug('Tsearch module for PostgreSQL 7.3.3');
CREATE DOMAIN gtsq AS pg_catalog.text;
--dict interface
-CREATE FUNCTION lexize(oid, text)
+CREATE FUNCTION lexize(oid, text)
RETURNS _text
as 'ts_lexize'
LANGUAGE INTERNAL
--built-in dictionaries
CREATE FUNCTION dex_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_dex_init'
+ as 'MODULE_PATHNAME', 'tsa_dex_init'
LANGUAGE C;
CREATE FUNCTION dex_lexize(internal,internal,int4)
CREATE FUNCTION snb_ru_init_koi8(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8'
+ as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8'
LANGUAGE C;
CREATE FUNCTION snb_ru_init_utf8(internal)
CREATE FUNCTION spell_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_spell_init'
+ as 'MODULE_PATHNAME', 'tsa_spell_init'
LANGUAGE C;
CREATE FUNCTION spell_lexize(internal,internal,int4)
CREATE FUNCTION syn_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_syn_init'
+ as 'MODULE_PATHNAME', 'tsa_syn_init'
LANGUAGE C;
CREATE FUNCTION syn_lexize(internal,internal,int4)
RETURNS NULL ON NULL INPUT;
--sql-level interface
-CREATE TYPE tokentype
- as (tokid int4, alias text, descr text);
+CREATE TYPE tokentype
+ as (tokid int4, alias text, descr text);
CREATE FUNCTION token_type(int4)
RETURNS setof tokentype
LANGUAGE C
RETURNS NULL ON NULL INPUT;
-CREATE TYPE tokenout
+CREATE TYPE tokenout
as (tokid int4, token text);
CREATE FUNCTION parse(oid,text)
as 'ts_parse_byid'
LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT;
-
+
CREATE FUNCTION parse(text,text)
RETURNS setof tokenout
as 'ts_parse_byname'
LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT;
-
+
CREATE FUNCTION parse(text)
RETURNS setof tokenout
as 'MODULE_PATHNAME', 'tsa_parse_current'
LANGUAGE C
RETURNS NULL ON NULL INPUT;
-
+
--default parser
CREATE FUNCTION prsd_start(internal,int4)
RETURNS internal
STORAGE gtsvector;
--stat info
-CREATE TYPE statinfo
+CREATE TYPE statinfo
as (word text, ndoc int4, nentry int4);
CREATE FUNCTION stat(text)
CREATE OPERATOR CLASS tsvector_ops
FOR TYPE tsvector USING btree AS
OPERATOR 1 < ,
- OPERATOR 2 <= ,
+ OPERATOR 2 <= ,
OPERATOR 3 = ,
OPERATOR 4 >= ,
OPERATOR 5 > ,
REGRESS = unaccent
# Adjust REGRESS_OPTS because we need a UTF8 database
-REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale
+REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale
ifdef USE_PGXS
PG_CONFIG = pg_config
(1 row)
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
(1 row)
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
select query_to_xml('select 1 as x',true,false,'');
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
Please describe a way to repeat the problem. Please try to provide a
-concise reproducible example, if at all possible:
+concise reproducible example, if at all possible:
----------------------------------------------------------------------
endif
# Enable some extra warnings
-# -wfully-tagged needed to throw a warning on missing tags
+# -wfully-tagged needed to throw a warning on missing tags
# for older tool chains, 2007-08-31
# Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs,
# in particular < and & that haven't been made into entities. It's far too
<listitem>
<para>
<varname>auto_explain.log_buffers</varname> causes <command>EXPLAIN
- (ANALYZE, BUFFERS)</> output, rather than just <command>EXPLAIN</>
- output, to be printed when an execution plan is logged. This parameter is
+ (ANALYZE, BUFFERS)</> output, rather than just <command>EXPLAIN</>
+ output, to be printed when an execution plan is logged. This parameter is
off by default. Only superusers can change this setting. This
parameter has no effect unless <varname>auto_explain.log_analyze</>
parameter is set.
<bibliodiv>
<title>Proceedings and Articles</title>
<para>This section is for articles and newsletters.</para>
-
+
<biblioentry id="OLSON93">
<title>Partial indexing in POSTGRES: research project</title>
<titleabbrev>Olson, 1993</titleabbrev>
<biblioset relation="article">
<title>Generalized Partial Indexes
<ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">(cached version)
-<!--
+<!--
Original URL: http://citeseer.ist.psu.edu/seshadri95generalized.html
-->
</ulink>
locale then the specifications can take the form
<replaceable>language_territory.codeset</>. For example,
<literal>fr_BE.UTF-8</> represents the French language (fr) as
- spoken in Belgium (BE), with a <acronym>UTF-8</> character set
+ spoken in Belgium (BE), with a <acronym>UTF-8</> character set
encoding.
</para>
<listitem>
<para>
Sets the location of the Kerberos server key file. See
- <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth">
+ <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth">
for details. This parameter can only be set in the
<filename>postgresql.conf</> file or on the server command line.
</para>
<para>
Sets whether Kerberos and GSSAPI user names should be treated
case-insensitively.
- The default is <literal>off</> (case sensitive). This parameter can only be
+ The default is <literal>off</> (case sensitive). This parameter can only be
set in the <filename>postgresql.conf</> file or on the server command line.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-shared-preload-libraries" xreflabel="shared_preload_libraries">
<term><varname>shared_preload_libraries</varname> (<type>string</type>)</term>
<indexterm>
when the library is first used. However, the time to start each new
server process might increase slightly, even if that process never
uses the library. So this parameter is recommended only for
- libraries that will be used in most sessions.
+ libraries that will be used in most sessions.
</para>
<note>
On Windows hosts, preloading a library at server start will not reduce
the time required to start each new server process; each server process
will re-load all preload libraries. However, <varname>shared_preload_libraries
- </varname> is still useful on Windows hosts because some shared libraries may
+ </varname> is still useful on Windows hosts because some shared libraries may
need to perform certain operations that only take place at postmaster start
(for example, a shared library may need to reserve lightweight locks
or shared memory and you can't do that after the postmaster has started).
<para>
Every PostgreSQL-supported library has a <quote>magic
- block</> that is checked to guarantee compatibility.
- For this reason, non-PostgreSQL libraries cannot be
+ block</> that is checked to guarantee compatibility.
+ For this reason, non-PostgreSQL libraries cannot be
loaded in this way.
</para>
</listitem>
<para>
<varname>fsync</varname> can only be set in the <filename>postgresql.conf</>
file or on the server command line.
- If you turn this parameter off, also consider turning off
+ If you turn this parameter off, also consider turning off
<xref linkend="guc-full-page-writes">.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-wal-sync-method" xreflabel="wal_sync_method">
<term><varname>wal_sync_method</varname> (<type>enum</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-full-page-writes" xreflabel="full_page_writes">
<indexterm>
<primary><varname>full_page_writes</> configuration parameter</primary>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-archive-timeout" xreflabel="archive_timeout">
<term><varname>archive_timeout</varname> (<type>integer</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
</variablelist>
</sect2>
<sect2 id="runtime-config-query-constants">
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-cpu-operator-cost" xreflabel="cpu_operator_cost">
<term><varname>cpu_operator_cost</varname> (<type>floating point</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-effective-cache-size" xreflabel="effective_cache_size">
<term><varname>effective_cache_size</varname> (<type>integer</type>)</term>
<indexterm>
<productname>PostgreSQL</productname> supports several methods
for logging server messages, including
<systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem> and
- <systemitem>syslog</systemitem>. On Windows,
+ <systemitem>syslog</systemitem>. On Windows,
<systemitem>eventlog</systemitem> is also supported. Set this
parameter to a list of desired log destinations separated by
- commas. The default is to log to <systemitem>stderr</systemitem>
+ commas. The default is to log to <systemitem>stderr</systemitem>
only.
This parameter can only be set in the <filename>postgresql.conf</>
file or on the server command line.
value</> (<acronym>CSV</>) format, which is convenient for
loading logs into programs.
See <xref linkend="runtime-config-logging-csvlog"> for details.
- <varname>logging_collector</varname> must be enabled to generate
+ <varname>logging_collector</varname> must be enabled to generate
CSV-format log output.
</para>
</indexterm>
<listitem>
<para>
- When <varname>logging_collector</> is enabled,
+ When <varname>logging_collector</> is enabled,
this parameter determines the directory in which log files will be created.
It can be specified as an absolute path, or relative to the
cluster data directory.
</para>
<para>
If CSV-format output is enabled in <varname>log_destination</>,
- <literal>.csv</> will be appended to the timestamped
+ <literal>.csv</> will be appended to the timestamped
log file name to create the file name for CSV-format output.
(If <varname>log_filename</> ends in <literal>.log</>, the suffix is
replaced instead.)
</para>
<para>
Example: To keep 7 days of logs, one log file per day named
- <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>,
+ <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>,
etc, and automatically overwrite last week's log with this week's log,
- set <varname>log_filename</varname> to <literal>server_log.%a</literal>,
- <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and
+ set <varname>log_filename</varname> to <literal>server_log.%a</literal>,
+ <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and
<varname>log_rotation_age</varname> to <literal>1440</literal>.
</para>
<para>
- Example: To keep 24 hours of logs, one log file per hour, but
- also rotate sooner if the log file size exceeds 1GB, set
- <varname>log_filename</varname> to <literal>server_log.%H%M</literal>,
- <varname>log_truncate_on_rotation</varname> to <literal>on</literal>,
- <varname>log_rotation_age</varname> to <literal>60</literal>, and
+ Example: To keep 24 hours of logs, one log file per hour, but
+ also rotate sooner if the log file size exceeds 1GB, set
+ <varname>log_filename</varname> to <literal>server_log.%H%M</literal>,
+ <varname>log_truncate_on_rotation</varname> to <literal>on</literal>,
+ <varname>log_rotation_age</varname> to <literal>60</literal>, and
<varname>log_rotation_size</varname> to <literal>1000000</literal>.
Including <literal>%M</> in <varname>log_filename</varname> allows
any size-driven rotations that might occur to select a file name
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-syslog-ident" xreflabel="syslog_ident">
<term><varname>syslog_ident</varname> (<type>string</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-log-min-duration-statement" xreflabel="log_min_duration_statement">
<term><varname>log_min_duration_statement</varname> (<type>integer</type>)</term>
<indexterm>
the text of statements that are logged because of
<varname>log_statement</> will not be repeated in the
duration log message.
- If you are not using <application>syslog</>, it is recommended
+ If you are not using <application>syslog</>, it is recommended
that you log the PID or session ID using
<xref linkend="guc-log-line-prefix">
so that you can link the statement message to the later
<note>
<para>
- Some client programs, like <application>psql</>, attempt
- to connect twice while determining if a password is required, so
+ Some client programs, like <application>psql</>, attempt
+ to connect twice while determining if a password is required, so
duplicate <quote>connection received</> messages do not
necessarily indicate a problem.
</para>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-log-line-prefix" xreflabel="log_line_prefix">
<term><varname>log_line_prefix</varname> (<type>string</type>)</term>
<indexterm>
<tip>
<para>
- <application>Syslog</> produces its own
+ <application>Syslog</> produces its own
time stamp and process ID information, so you probably do not want to
include those escapes if you are logging to <application>syslog</>.
</para>
<listitem>
<para>
- Set <varname>log_rotation_size</varname> to 0 to disable
- size-based log rotation, as it makes the log file name difficult
- to predict.
+ Set <varname>log_rotation_size</varname> to 0 to disable
+ size-based log rotation, as it makes the log file name difficult
+ to predict.
</para>
</listitem>
<para>
Every PostgreSQL-supported library has a <quote>magic
- block</> that is checked to guarantee compatibility.
+ block</> that is checked to guarantee compatibility.
For this reason, non-PostgreSQL libraries cannot be
loaded in this way.
</para>
<para>
Refer to the introduction in this manual or to the
-<productname>PostgreSQL</productname>
+<productname>PostgreSQL</productname>
<ulink url="http://www.postgresql.org">web page</ulink>
for subscription information to the no-cost mailing lists.
</para>
<para>
When building from the source distribution, these modules are not built
- automatically, unless you build the "world" target
+ automatically, unless you build the "world" target
(see <xref linkend="build">).
You can build and install all of them by running:
<screen>
<para>
<xref linkend="datatype-table"> shows all the built-in general-purpose data
- types. Most of the alternative names listed in the
+ types. Most of the alternative names listed in the
<quote>Aliases</quote> column are the names used internally by
<productname>PostgreSQL</productname> for historical reasons. In
addition, some internally used or deprecated types are available,
<para>
In addition to ordinary numeric values, the <type>numeric</type>
- type allows the special value <literal>NaN</>, meaning
+ type allows the special value <literal>NaN</>, meaning
<quote>not-a-number</quote>. Any operation on <literal>NaN</>
yields another <literal>NaN</>. When writing this value
as a constant in an SQL command, you must put quotes around it,
<type>float(<replaceable>p</replaceable>)</type> for specifying
inexact numeric types. Here, <replaceable>p</replaceable> specifies
the minimum acceptable precision in <emphasis>binary</> digits.
- <productname>PostgreSQL</productname> accepts
+ <productname>PostgreSQL</productname> accepts
<type>float(1)</type> to <type>float(24)</type> as selecting the
- <type>real</type> type, while
+ <type>real</type> type, while
<type>float(25)</type> to <type>float(53)</type> select
<type>double precision</type>. Values of <replaceable>p</replaceable>
outside the allowed range draw an error.
<para>
Date and time input is accepted in almost any reasonable format, including
- ISO 8601, <acronym>SQL</acronym>-compatible,
+ ISO 8601, <acronym>SQL</acronym>-compatible,
traditional <productname>POSTGRES</productname>, and others.
For some formats, ordering of day, month, and year in date input is
ambiguous and there is support for specifying the expected
See <xref linkend="datetime-appendix">
for the exact parsing rules of date/time input and for the
recognized text fields including months, days of the week, and
- time zones.
+ time zones.
</para>
<para>
Remember that any date or time literal input needs to be enclosed
- in single quotes, like text strings. Refer to
+ in single quotes, like text strings. Refer to
<xref linkend="sql-syntax-constants-generic"> for more
information.
<acronym>SQL</acronym> requires the following syntax
<indexterm>
<primary>date</primary>
</indexterm>
-
+
<para>
<xref linkend="datatype-datetime-date-table"> shows some possible
inputs for the <type>date</type> type.
<para>
Valid input for these types consists of a time of day followed
by an optional time zone. (See <xref
- linkend="datatype-datetime-time-table">
+ linkend="datatype-datetime-time-table">
and <xref linkend="datatype-timezone-table">.) If a time zone is
specified in the input for <type>time without time zone</type>,
it is silently ignored. You can also specify a date but it will
<para>
The <acronym>SQL</acronym> standard differentiates
- <type>timestamp without time zone</type>
- and <type>timestamp with time zone</type> literals by the presence of a
+ <type>timestamp without time zone</type>
+ and <type>timestamp with time zone</type> literals by the presence of a
<quote>+</quote> or <quote>-</quote> symbol and time zone offset after
the time. Hence, according to the standard,
The following <acronym>SQL</acronym>-compatible functions can also
be used to obtain the current time value for the corresponding data
type:
- <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>,
- <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>,
- <literal>LOCALTIMESTAMP</literal>. The latter four accept an
- optional subsecond precision specification. (See <xref
+ <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>,
+ <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>,
+ <literal>LOCALTIMESTAMP</literal>. The latter four accept an
+ optional subsecond precision specification. (See <xref
linkend="functions-datetime-current">.) Note that these are
SQL functions and are <emphasis>not</> recognized in data input strings.
</para>
<itemizedlist>
<listitem>
<para>
- Although the <type>date</type> type
+ Although the <type>date</type> type
cannot have an associated time zone, the
<type>time</type> type can.
- Time zones in the real world have little meaning unless
+ Time zones in the real world have little meaning unless
associated with a date as well as a time,
since the offset can vary through the year with daylight-saving
time boundaries.
<listitem>
<para>
- The default time zone is specified as a constant numeric offset
+ The default time zone is specified as a constant numeric offset
from <acronym>UTC</>. It is therefore impossible to adapt to
daylight-saving time when doing date/time arithmetic across
<acronym>DST</acronym> boundaries.
order in which the values were listed when the type was created.
All standard comparison operators and related
aggregate functions are supported for enums. For example:
-
+
<programlisting>
INSERT INTO person VALUES ('Larry', 'sad');
INSERT INTO person VALUES ('Curly', 'ok');
Moe | happy
(2 rows)
-SELECT name
+SELECT name
FROM person
WHERE current_mood = (SELECT MIN(current_mood) FROM person);
name
<sect2>
<title>Implementation Details</title>
-
+
<para>
An enum value occupies four bytes on disk. The length of an enum
value's textual label is limited by the <symbol>NAMEDATALEN</symbol>
<table id="datatype-net-cidr-table">
<title><type>cidr</> Type Input Examples</title>
<tgroup cols="3">
- <thead>
- <row>
+ <thead>
+ <row>
<entry><type>cidr</type> Input</entry>
<entry><type>cidr</type> Output</entry>
<entry><literal><function>abbrev(<type>cidr</type>)</function></literal></entry>
for searching:
<programlisting>
-SELECT to_tsvector('english', 'The Fat Rats');
+SELECT to_tsvector('english', 'The Fat Rats');
to_tsvector
-----------------
'fat':2 'rat':3
functions for UUIDs, but the core database does not include any
function for generating UUIDs, because no single algorithm is well
suited for every application. The contrib module
- <filename>contrib/uuid-ossp</filename> provides functions that implement
+ <filename>contrib/uuid-ossp</filename> provides functions that implement
several standard algorithms.
Alternatively, UUIDs could be generated by client applications or
other libraries invoked through a server-side function.
checks the input values for well-formedness, and there are support
functions to perform type-safe operations on it; see <xref
linkend="functions-xml">. Use of this data type requires the
- installation to have been built with <command>configure
+ installation to have been built with <command>configure
--with-libxml</>.
</para>
<para>
If the token is a text string, match up with possible strings:
</para>
-
+
<substeps>
<step>
<para>
abbreviation.
</para>
</step>
-
+
<step>
<para>
If not found, do a similar binary-search table lookup to match
</step>
</substeps>
</step>
-
+
<step>
<para>
When the token is a number or number field:
<step>
<para>
If there are eight or six digits,
- and if no other date fields have been previously read, then interpret
+ and if no other date fields have been previously read, then interpret
as a <quote>concatenated date</quote> (e.g.,
<literal>19990118</literal> or <literal>990118</literal>).
The interpretation is <literal>YYYYMMDD</> or <literal>YYMMDD</>.
and a year has already been read, then interpret as day of year.
</para>
</step>
-
+
<step>
<para>
If four or six digits and a year has already been read, then
about 1 day in 128 years.
</para>
- <para>
+ <para>
The accumulating calendar error prompted
Pope Gregory XIII to reform the calendar in accordance with
instructions from the Council of Trent.
the beginnings of the Chinese calendar can be traced back to the 14th
century BC. Legend has it that the Emperor Huangdi invented that
calendar in 2637 BC.
-
+
The People's Republic of China uses the Gregorian calendar
for civil purposes. The Chinese calendar is used for determining
festivals.
<para>
The <quote>Julian Date</quote> is unrelated to the <quote>Julian
- calendar</quote>.
+ calendar</quote>.
The Julian Date system was invented by the French scholar
Joseph Justus Scaliger (1540-1609)
and probably takes its name from Scaliger's father,
<para>
Here is an example. It assumes the developer tools are installed.
<programlisting>
-cc -c foo.c
+cc -c foo.c
cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o
</programlisting>
</para>
</varlistentry>
<varlistentry>
- <term><systemitem class="osname">Tru64 UNIX</></term>
+ <term><systemitem class="osname">Tru64 UNIX</></term>
<indexterm><primary>Tru64 UNIX</><secondary>shared library</></>
<indexterm><primary>Digital UNIX</><see>Tru64 UNIX</></>
<listitem>
<tip>
<para>
- If this is too complicated for you, you should consider using
+ If this is too complicated for you, you should consider using
<ulink url="http://www.gnu.org/software/libtool/">
<productname>GNU Libtool</productname></ulink>,
which hides the platform differences behind a uniform interface.
<para>
It's possible that the ports do not update the main catalog file
- in <filename>/usr/local/share/sgml/catalog.ports</filename> or order
+ in <filename>/usr/local/share/sgml/catalog.ports</filename> or order
isn't proper . Be sure to have the following lines in beginning of file:
<programlisting>
CATALOG "openjade/catalog"
</screen>
</para>
</listitem>
-
+
<listitem>
<para>
To make a <acronym>PDF</acronym>:
<para>
Norm Walsh offers a
<ulink url="http://nwalsh.com/emacs/docbookide/index.html">major mode</ulink>
- specifically for DocBook which also has font-lock and a number of features to
+ specifically for DocBook which also has font-lock and a number of features to
reduce typing.
</para>
</sect2>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Description</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Options</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Exit Status</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Usage</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Environment</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Files</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Diagnostics</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Notes</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Examples</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>History</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>See Also</term>
<listitem>
<literal>unix:postgresql://<replaceable>hostname</><optional>:<replaceable>port</></optional><optional>/<replaceable>dbname</></optional><optional>?<replaceable>options</></optional></literal>
</simpara>
</listitem>
-
+
<listitem>
<simpara>
an SQL string literal containing one of the above forms
a reference to a character variable containing one of the above forms (see examples)
</simpara>
</listitem>
-
+
<listitem>
<simpara>
<literal>DEFAULT</literal>
The function returns the parsed timestamp on success. On error,
<literal>PGTYPESInvalidTimestamp</literal> is returned and <varname>errno</> is
set to <literal>PGTYPES_TS_BAD_TIMESTAMP</>. See <xref linkend="PGTYPESInvalidTimestamp"> for important notes on this value.
-
</para>
<para>
In general, the input string can contain any combination of an allowed
You can use the following format specifiers for the format mask. The
format specifiers are the same ones that are used in the
<function>strftime</> function in <productname>libc</productname>. Any
- non-format specifier will be copied into the output buffer.
+ non-format specifier will be copied into the output buffer.
<!-- This is from the FreeBSD man page:
http://www.freebsd.org/cgi/man.cgi?query=strftime&apropos=0&sektion=3&manpath=FreeBSD+7.0-current&format=html
-->
<literal>%E*</literal> <literal>%O*</literal> - POSIX locale
extensions. The sequences
<literal>%Ec</literal>
- <literal>%EC</literal>
- <literal>%Ex</literal>
- <literal>%EX</literal>
- <literal>%Ey</literal>
- <literal>%EY</literal>
- <literal>%Od</literal>
+ <literal>%EC</literal>
+ <literal>%Ex</literal>
+ <literal>%EX</literal>
+ <literal>%Ey</literal>
+ <literal>%EY</literal>
+ <literal>%Od</literal>
<literal>%Oe</literal>
- <literal>%OH</literal>
- <literal>%OI</literal>
- <literal>%Om</literal>
- <literal>%OM</literal>
- <literal>%OS</literal>
- <literal>%Ou</literal>
- <literal>%OU</literal>
- <literal>%OV</literal>
- <literal>%Ow</literal>
- <literal>%OW</literal>
- <literal>%Oy</literal>
+ <literal>%OH</literal>
+ <literal>%OI</literal>
+ <literal>%Om</literal>
+ <literal>%OM</literal>
+ <literal>%OS</literal>
+ <literal>%Ou</literal>
+ <literal>%OU</literal>
+ <literal>%OV</literal>
+ <literal>%Ow</literal>
+ <literal>%OW</literal>
+ <literal>%Oy</literal>
are supposed to provide alternative representations.
</para>
<para>
<note>
<para>
On Windows, if the <application>ecpg</> libraries and an application are
- compiled with different flags, this function call will crash the
- application because the internal representation of the
+ compiled with different flags, this function call will crash the
+ application because the internal representation of the
<literal>FILE</> pointers differ. Specifically,
- multithreaded/single-threaded, release/debug, and static/dynamic
+ multithreaded/single-threaded, release/debug, and static/dynamic
flags should be the same for the library and all applications using
that library.
</para>
<function>ECPGget_PGconn(const char *<replaceable>connection_name</replaceable>)
</function> returns the library database connection handle identified by the given name.
If <replaceable>connection_name</replaceable> is set to <literal>NULL</literal>, the current
- connection handle is returned. If no connection handle can be identified, the function returns
+ connection handle is returned. If no connection handle can be identified, the function returns
<literal>NULL</literal>. The returned connection handle can be used to call any other functions
from <application>libpq</application>, if necessary.
</para>