From bc499687641a021e0dac3e146611b5a553cf0c5b Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sat, 21 Sep 2002 18:32:54 +0000 Subject: [PATCH] Add more appropriate markup. --- doc/src/sgml/array.sgml | 12 +- doc/src/sgml/charset.sgml | 32 +- doc/src/sgml/client-auth.sgml | 12 +- doc/src/sgml/datatype.sgml | 26 +- doc/src/sgml/ddl.sgml | 12 +- doc/src/sgml/developer.sgml | 5 +- doc/src/sgml/dfunc.sgml | 62 +- doc/src/sgml/diskusage.sgml | 4 +- doc/src/sgml/ecpg.sgml | 21 +- doc/src/sgml/extend.sgml | 26 +- doc/src/sgml/features.sgml | 6 +- doc/src/sgml/func.sgml | 758 ++++++++++++------------ doc/src/sgml/indices.sgml | 8 +- doc/src/sgml/installation.sgml | 13 +- doc/src/sgml/jdbc.sgml | 38 +- doc/src/sgml/libpgtcl.sgml | 6 +- doc/src/sgml/libpq.sgml | 26 +- doc/src/sgml/maintenance.sgml | 10 +- doc/src/sgml/monitoring.sgml | 24 +- doc/src/sgml/mvcc.sgml | 4 +- doc/src/sgml/perform.sgml | 6 +- doc/src/sgml/plperl.sgml | 8 +- doc/src/sgml/plpgsql.sgml | 96 +-- doc/src/sgml/plpython.sgml | 11 +- doc/src/sgml/pltcl.sgml | 32 +- doc/src/sgml/programmer.sgml | 3 +- doc/src/sgml/ref/alter_table.sgml | 6 +- doc/src/sgml/ref/alter_user.sgml | 4 +- doc/src/sgml/ref/begin.sgml | 4 +- doc/src/sgml/ref/cluster.sgml | 4 +- doc/src/sgml/ref/clusterdb.sgml | 38 +- doc/src/sgml/ref/copy.sgml | 20 +- doc/src/sgml/ref/create_aggregate.sgml | 4 +- doc/src/sgml/ref/create_conversion.sgml | 4 +- doc/src/sgml/ref/create_database.sgml | 6 +- doc/src/sgml/ref/create_function.sgml | 10 +- doc/src/sgml/ref/create_index.sgml | 6 +- doc/src/sgml/ref/create_opclass.sgml | 28 +- doc/src/sgml/ref/create_operator.sgml | 89 +-- doc/src/sgml/ref/create_schema.sgml | 6 +- doc/src/sgml/ref/create_type.sgml | 18 +- doc/src/sgml/ref/create_view.sgml | 4 +- doc/src/sgml/ref/createdb.sgml | 32 +- doc/src/sgml/ref/createlang.sgml | 25 +- doc/src/sgml/ref/createuser.sgml | 40 +- doc/src/sgml/ref/drop_conversion.sgml | 4 +- doc/src/sgml/ref/drop_opclass.sgml | 4 +- doc/src/sgml/ref/dropdb.sgml | 23 +- doc/src/sgml/ref/droplang.sgml | 23 +- doc/src/sgml/ref/dropuser.sgml | 17 +- doc/src/sgml/ref/initdb.sgml | 44 +- doc/src/sgml/ref/insert.sgml | 6 +- doc/src/sgml/ref/listen.sgml | 8 +- doc/src/sgml/ref/lock.sgml | 8 +- doc/src/sgml/ref/notify.sgml | 12 +- doc/src/sgml/ref/pg_ctl-ref.sgml | 4 +- doc/src/sgml/ref/postmaster.sgml | 19 +- doc/src/sgml/ref/psql-ref.sgml | 191 +++--- doc/src/sgml/ref/reindex.sgml | 7 +- doc/src/sgml/ref/reset.sgml | 6 +- doc/src/sgml/ref/select.sgml | 24 +- doc/src/sgml/ref/set.sgml | 30 +- doc/src/sgml/ref/set_session_auth.sgml | 4 +- doc/src/sgml/ref/unlisten.sgml | 8 +- doc/src/sgml/ref/vacuum.sgml | 4 +- doc/src/sgml/ref/vacuumdb.sgml | 50 +- doc/src/sgml/regress.sgml | 8 +- doc/src/sgml/release.sgml | 33 +- doc/src/sgml/rules.sgml | 29 +- doc/src/sgml/runtime.sgml | 14 +- doc/src/sgml/syntax.sgml | 24 +- doc/src/sgml/trigger.sgml | 288 ++++----- doc/src/sgml/xaggr.sgml | 4 +- doc/src/sgml/xfunc.sgml | 257 ++++---- doc/src/sgml/xindex.sgml | 30 +- doc/src/sgml/xoper.sgml | 42 +- doc/src/sgml/xplang.sgml | 10 +- 77 files changed, 1489 insertions(+), 1355 deletions(-) diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index aeb238d63ba..deca39f0ae2 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -1,4 +1,4 @@ - + Arrays @@ -251,8 +251,8 @@ SELECT * FROM sal_emp WHERE pay_by_quarter **= 10000; A limitation of the present array implementation is that individual - elements of an array cannot be SQL NULLs. The entire array can be set - to NULL, but you can't have an array with some elements NULL and some + elements of an array cannot be SQL null values. The entire array can be set + to null, but you can't have an array with some elements null and some not. Fixing this is on the to-do list. @@ -267,7 +267,7 @@ SELECT * FROM sal_emp WHERE pay_by_quarter **= 10000; around the array value plus delimiter characters between adjacent items. The delimiter character is usually a comma (,) but can be something else: it is determined by the typdelim setting - for the array's element type. (Among the standard datatypes provided + for the array's element type. (Among the standard data types provided in the PostgreSQL distribution, type box uses a semicolon (;) but all the others use comma.) In a multidimensional array, each dimension (row, plane, @@ -300,8 +300,8 @@ SELECT * FROM sal_emp WHERE pay_by_quarter **= 10000; if they are empty strings or contain curly braces, delimiter characters, double quotes, backslashes, or white space. Double quotes and backslashes embedded in element values will be backslash-escaped. For numeric - datatypes it is safe to assume that double quotes will never appear, but - for textual datatypes one should be prepared to cope with either presence + data types it is safe to assume that double quotes will never appear, but + for textual data types one should be prepared to cope with either presence or absence of quotes. (This is a change in behavior from pre-7.2 PostgreSQL releases.) diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index febe4d37326..a3d8a52aef2 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -1,4 +1,4 @@ - + Localization</> @@ -405,27 +405,27 @@ perl: warning: Falling back to the standard locale ("C"). </row> <row> <entry><literal>LATIN1</literal></entry> - <entry>ISO 8859-1 ECMA-94 Latin Alphabet No.1</entry> + <entry>ISO 8859-1 <acronym>ECMA</>-94 Latin Alphabet No.1</entry> </row> <row> <entry><literal>LATIN2</literal></entry> - <entry>ISO 8859-2 ECMA-94 Latin Alphabet No.2</entry> + <entry>ISO 8859-2 <acronym>ECMA</>-94 Latin Alphabet No.2</entry> </row> <row> <entry><literal>LATIN3</literal></entry> - <entry>ISO 8859-3 ECMA-94 Latin Alphabet No.3</entry> + <entry>ISO 8859-3 <acronym>ECMA</>-94 Latin Alphabet No.3</entry> </row> <row> <entry><literal>LATIN4</literal></entry> - <entry>ISO 8859-4 ECMA-94 Latin Alphabet No.4</entry> + <entry>ISO 8859-4 <acronym>ECMA</>-94 Latin Alphabet No.4</entry> </row> <row> <entry><literal>LATIN5</literal></entry> - <entry>ISO 8859-9 ECMA-128 Latin Alphabet No.5</entry> + <entry>ISO 8859-9 <acronym>ECMA</>-128 Latin Alphabet No.5</entry> </row> <row> <entry><literal>LATIN6</literal></entry> - <entry>ISO 8859-10 ECMA-144 Latin Alphabet No.6</entry> + <entry>ISO 8859-10 <acronym>ECMA</>-144 Latin Alphabet No.6</entry> </row> <row> <entry><literal>LATIN7</literal></entry> @@ -441,23 +441,23 @@ perl: warning: Falling back to the standard locale ("C"). </row> <row> <entry><literal>LATIN10</literal></entry> - <entry>ISO 8859-16 ASRO SR 14111 Latin Alphabet No.10</entry> + <entry>ISO 8859-16 <acronym>ASRO</> SR 14111 Latin Alphabet No.10</entry> </row> <row> <entry><literal>ISO-8859-5</literal></entry> - <entry>ECMA-113 Latin/Cyrillic</entry> + <entry><acronym>ECMA</>-113 Latin/Cyrillic</entry> </row> <row> <entry><literal>ISO-8859-6</literal></entry> - <entry>ECMA-114 Latin/Arabic</entry> + <entry><acronym>ECMA</>-114 Latin/Arabic</entry> </row> <row> <entry><literal>ISO-8859-7</literal></entry> - <entry>ECMA-118 Latin/Greek</entry> + <entry><acronym>ECMA</>-118 Latin/Greek</entry> </row> <row> <entry><literal>ISO-8859-8</literal></entry> - <entry>ECMA-121 Latin/Hebrew</entry> + <entry><acronym>ECMA</>-121 Latin/Hebrew</entry> </row> <row> <entry><literal>KOI8</literal></entry> @@ -477,7 +477,7 @@ perl: warning: Falling back to the standard locale ("C"). </row> <row> <entry><literal>TCVN</literal></entry> - <entry>Vietnamese TCVN-5712(Windows CP1258)</entry> + <entry>Vietnamese <acronym>TCVN</>-5712 (Windows CP1258)</entry> </row> <row> <entry><literal>WIN874</literal></entry> @@ -500,7 +500,7 @@ perl: warning: Falling back to the standard locale ("C"). <important> <para> - Not all APIs supports all the encodings listed above. For example, the + Not all <acronym>API</>s supports all the encodings listed above. For example, the <productname>PostgreSQL</> JDBC driver does not support <literal>MULE_INTERNAL</>, <literal>LATIN6</>, <literal>LATIN8</>, and <literal>LATIN10</>. @@ -572,7 +572,7 @@ $ <userinput>psql -l</userinput> <para> <productname>PostgreSQL</productname> supports an automatic encoding conversion between server and client for some - encodings. The conversion info is stored in pg_converson system + encodings. The conversion info is stored in <literal>pg_conversion</> system catalog. You can create a new conversion by using <command>CREATE CONVERSION</command>. PostgreSQL comes with some predefined conversions. They are listed in <xref @@ -878,7 +878,7 @@ RESET CLIENT_ENCODING; <para> Using client_encoding variable. - If client_encoding variable in postgresql.conf is set, that + If the <varname>client_encoding</> variable in <filename>postgresql.conf</> is set, that client encoding is automatically selected when a connection to the server is made. (This can subsequently be overridden using any of the other methods mentioned above.) diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 6e1d76522a1..750885c29b6 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/client-auth.sgml,v 1.38 2002/09/18 20:09:31 petere Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/client-auth.sgml,v 1.39 2002/09/21 18:32:52 petere Exp $ --> <chapter id="client-authentication"> @@ -261,7 +261,7 @@ hostssl <replaceable>database</replaceable> <replaceable>user</replaceable> < <term><literal>password</></term> <listitem> <para> - Same as "md5", but the password is sent in cleartext over the + Same as "md5", but the password is sent in clear text over the network. This should not be used on untrusted networks. See <xref linkend="auth-password"> for details. </para> @@ -314,8 +314,10 @@ hostssl <replaceable>database</replaceable> <replaceable>user</replaceable> < <para> For local connections, this only works on machines that - support Unix-domain socket credentials (currently Linux, - FreeBSD, NetBSD, and BSD/OS). + support Unix-domain socket credentials (currently + <systemitem class=osname>Linux</>, <systemitem + class=osname>FreeBSD</>, <systemitem class=osname>NetBSD</>, + and <systemitem class=osname>BSD/OS</>). </para> <para> @@ -504,7 +506,7 @@ local db1,db2,@demodbs all md5 if you trust every user on every machine that is allowed to connect to the postmaster by the <filename>pg_hba.conf</> lines that specify <literal>trust</>. It is seldom reasonable to use <literal>trust</> - for any TCP connections other than those from localhost (127.0.0.1). + for any TCP connections other than those from <systemitem>localhost</> (127.0.0.1). </para> </sect2> diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 28d3fcb7ede..c162669131a 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.103 2002/09/18 21:35:20 tgl Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.104 2002/09/21 18:32:52 petere Exp $ --> <chapter id="datatype"> @@ -703,7 +703,7 @@ CREATE TABLE <replaceable class="parameter">tablename</replaceable> ( implied <literal>UNIQUE</literal>. This is no longer automatic. If you wish a serial column to be <literal>UNIQUE</literal> or a <literal>PRIMARY KEY</literal> it must now be specified, same as with - any other datatype. + any other data type. </para></note> </sect2> </sect1> @@ -2028,7 +2028,7 @@ January 8 04:05:06 1999 PST <listitem> <para> The default time zone is specified as a constant integer offset - from GMT/UTC. It is not possible to adapt to daylight-saving + from <acronym>GMT</>/<acronym>UTC</>. It is not possible to adapt to daylight-saving time when doing date/time arithmetic across <acronym>DST</acronym> boundaries. </para> @@ -3025,14 +3025,14 @@ SELECT * FROM test; <row> <entry><type>regproc</></entry> - <entry>pg_proc</entry> + <entry><structname>pg_proc</></entry> <entry>Function name</entry> <entry>sum</entry> </row> <row> <entry><type>regprocedure</></entry> - <entry>pg_proc</entry> + <entry><structname>pg_proc</></entry> <entry>Function with argument types</entry> <entry>sum(int4)</entry> </row> @@ -3098,7 +3098,7 @@ SELECT * FROM test; <para> Another identifier type used by the system is <type>xid</>, or transaction - (abbreviated xact) identifier. This is the datatype of the system columns + (abbreviated <abbrev>xact</>) identifier. This is the data type of the system columns <structfield>xmin</> and <structfield>xmax</>. Transaction identifiers are 32-bit quantities. In a long-lived database it is possible for transaction IDs to wrap around. This @@ -3110,7 +3110,7 @@ SELECT * FROM test; <para> A third identifier type used by the system is <type>cid</>, or command - identifier. This is the datatype of the system columns + identifier. This is the data type of the system columns <structfield>cmin</> and <structfield>cmax</>. Command identifiers are also 32-bit quantities. This creates a hard limit of 2<superscript>32</> (4 billion) SQL commands within a single @@ -3121,7 +3121,7 @@ SELECT * FROM test; <para> A final identifier type used by the system is <type>tid</>, or tuple - identifier. This is the datatype of the system column + identifier. This is the data type of the system column <structfield>ctid</>. A tuple ID is a pair (block number, tuple index within block) that identifies the physical location of the tuple within its table. @@ -3172,10 +3172,10 @@ SELECT * FROM test; The <productname>PostgreSQL</productname> type system contains a number of special-purpose entries that are collectively called <firstterm>pseudo-types</>. A pseudo-type cannot be used as a column - datatype, but it can be used to declare a function's argument or result + data type, but it can be used to declare a function's argument or result type. Each of the available pseudo-types is useful in situations where a function's behavior does not correspond to simply taking or returning - a value of a specific SQL datatype. + a value of a specific SQL data type. </para> <para> @@ -3198,12 +3198,12 @@ SELECT * FROM test; <row> <entry><type>any</></entry> - <entry>Indicates that a function accepts any input datatype whatever</entry> + <entry>Indicates that a function accepts any input data type whatever</entry> </row> <row> <entry><type>anyarray</></entry> - <entry>Indicates that a function accepts any array datatype</entry> + <entry>Indicates that a function accepts any array data type</entry> </row> <row> @@ -3243,7 +3243,7 @@ SELECT * FROM test; <para> Functions coded in C (whether built-in or dynamically loaded) may be - declared to accept or return any of these pseudo datatypes. It is up to + declared to accept or return any of these pseudo data types. It is up to the function author to ensure that the function will behave safely when a pseudo-type is used as an argument type. </para> diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index ff40feeac92..0f10263df0f 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -1,4 +1,4 @@ -<!-- $Header: /cvsroot/pgsql/doc/src/sgml/ddl.sgml,v 1.4 2002/09/12 22:05:35 petere Exp $ --> +<!-- $Header: /cvsroot/pgsql/doc/src/sgml/ddl.sgml,v 1.5 2002/09/21 18:32:52 petere Exp $ --> <chapter id="ddl"> <title>Data Definition @@ -765,7 +765,7 @@ CREATE TABLE order_items ( Note that these do not excuse you from observing any constraints. For example, if an action specifies SET DEFAULT but the default value would not satisfy the foreign key, the - deletion of the primary key wil fail. + deletion of the primary key will fail. @@ -1425,7 +1425,7 @@ SHOW search_path; - The first schema in the seach path that exists is the default + The first schema in the search path that exists is the default location for creating new objects. That is the reason that by default objects are created in the public schema. When objects are referenced in any other context without schema qualification @@ -1466,8 +1466,8 @@ SET search_path TO myschema; - The search path works in the same way for datatype names, function names, - and operator names as it does for table names. Datatype and function + The search path works in the same way for data type names, function names, + and operator names as it does for table names. Data type and function names can be qualified in exactly the same way as table names. If you need to write a qualified operator name in an expression, there is a special provision: you must write @@ -1519,7 +1519,7 @@ REVOKE CREATE ON public FROM PUBLIC; In addition to public and user-created schemas, each database contains a pg_catalog schema, which contains - the system tables and all the built-in datatypes, functions, and + the system tables and all the built-in data types, functions, and operators. pg_catalog is always effectively part of the search path. If it is not named explicitly in the path then it is implicitly searched before searching the path's diff --git a/doc/src/sgml/developer.sgml b/doc/src/sgml/developer.sgml index be1591e033d..d853a03ba2f 100644 --- a/doc/src/sgml/developer.sgml +++ b/doc/src/sgml/developer.sgml @@ -1,4 +1,4 @@ - + @@ -17,13 +17,14 @@ &sources; - &arch-dev; + &arch-dev; &catalogs; &protocol; &compiler; &bki; &page; &geqo; + &gist; &nls; &cvs; diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index 1067651dacb..20393a8664e 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -1,5 +1,5 @@ @@ -58,7 +58,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/dfunc.sgml,v 1.21 2002/08/26 23:22:47 momji - BSD/OS + BSD/OS BSD/OS @@ -70,13 +70,13 @@ gcc -fpic -c foo.c ld -shared -o foo.so foo.o This is applicable as of version 4.0 of - BSD/OS. + BSD/OS. - FreeBSD + FreeBSD FreeBSD @@ -88,19 +88,19 @@ gcc -fpic -c foo.c gcc -shared -o foo.so foo.o This is applicable as of version 3.0 of - FreeBSD. + FreeBSD. - HP-UX + HP-UX HP-UX The compiler flag of the system compiler to create PIC is . When using - GCC it's . The + GCC it's . The linker flag for shared libraries is . So cc +z -c foo.c @@ -113,7 +113,7 @@ gcc -fpic -c foo.c ld -b -o foo.sl foo.o - HP-UX uses the extension + HP-UX uses the extension .sl for shared libraries, unlike most other systems. @@ -121,7 +121,7 @@ ld -b -o foo.sl foo.o - IRIX + IRIX IRIX @@ -137,7 +137,7 @@ ld -shared -o foo.so foo.o - Linux + Linux Linux @@ -156,7 +156,21 @@ cc -shared -o foo.so foo.o - NetBSD + MacOS X + MacOS X + + + Here is a sample. It assumes the developer tools are installed. + +cc -c foo.c +cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o + + + + + + + NetBSD NetBSD @@ -174,7 +188,7 @@ gcc -shared -o foo.so foo.o - OpenBSD + OpenBSD OpenBSD @@ -190,30 +204,16 @@ ld -Bshareable -o foo.so foo.o - OS X - OS X - - - Here is a sample. It assumes the developer tools are installed. - -cc -c foo.c -cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o - - - - - - - Solaris + Solaris Solaris The compiler flag to create PIC is with the Sun compiler and - with GCC. To + with GCC. To link shared libraries, the compiler option is with either compiler or alternatively - with GCC. + with GCC. cc -KPIC -c foo.c cc -G -o foo.so foo.o @@ -228,7 +228,7 @@ gcc -G -o foo.so foo.o - Tru64 UNIX + Tru64 UNIX Tru64 UNIX Digital UNIXTru64 UNIX @@ -247,7 +247,7 @@ ld -shared -expect_unresolved '*' -o foo.so foo.o - UnixWare + UnixWare UnixWare diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml index d6897e58ee6..d7e5cb27945 100644 --- a/doc/src/sgml/diskusage.sgml +++ b/doc/src/sgml/diskusage.sgml @@ -1,5 +1,5 @@ @@ -16,7 +16,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/diskusage.sgml,v 1.4 2002/07/05 00:14:16 mo Each table has a primary heap disk file where most of the data is stored. To store long column values, there is also a TOAST file associated with the table, named based on the - table's oid (actually pg_class.relfilenode), and an index on the + table's OID (actually pg_class.relfilenode), and an index on the TOAST table. There also may be indexes associated with the base table. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 6dc7399198b..d0700b78a6b 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1,5 +1,5 @@ @@ -158,8 +158,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/ecpg.sgml,v 1.36 2002/01/20 22:19:55 petere exec sql include sqlca; - in the include section of your file. This will define a struct and - a variable with the name sqlca as follows: + in the include section of your file. This will define a struct and + a variable with the name sqlca as follows: struct sqlca { @@ -198,7 +198,7 @@ struct sqlca If an no error occurred in the last SQL statement. - sqlca.sqlcode will be 0 (ECPG_NO_ERROR). If + sqlca.sqlcode will be 0 (ECPG_NO_ERROR). If sqlca.sqlcode is less that zero, this is a serious error, like the database definition does not match the query. If it is greater than zero, it is a normal error like the @@ -620,8 +620,9 @@ exec sql end declare section; - The special types VARCHAR and VARCHAR2 are converted into a named struct - for every variable. A declaration like: + The special types VARCHAR and + VARCHAR2 are converted into a named struct for + every variable. A declaration like: VARCHAR var[180]; @@ -1023,10 +1024,10 @@ ECPGdo(__LINE__, NULL, "select res from mytable where index = ? ", - ECPGt_EOIT + ECPGt_EOIT - An enum telling that there are no more input variables. + An enum telling that there are no more input variables. @@ -1042,10 +1043,10 @@ ECPGdo(__LINE__, NULL, "select res from mytable where index = ? ", - ECPGt_EORT + ECPGt_EORT - An enum telling that there are no more variables. + An enum telling that there are no more variables. diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index 87245378fa0..60cfeb2e34f 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -1,5 +1,5 @@ @@ -135,51 +135,51 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.16 2002/07/30 17:34:37 tgl - pg_database + pg_database databases - pg_class + pg_class tables - pg_attribute + pg_attribute table columns - pg_index + pg_index indexes - pg_proc + pg_proc procedures/functions - pg_type + pg_type data types (both base and complex) - pg_operator + pg_operator operators - pg_aggregate + pg_aggregate aggregate functions - pg_am + pg_am access methods - pg_amop + pg_amop access method operators - pg_amproc + pg_amproc access method support functions - pg_opclass + pg_opclass access method operator classes diff --git a/doc/src/sgml/features.sgml b/doc/src/sgml/features.sgml index 9bb1bbf0bb9..c125c50aad9 100644 --- a/doc/src/sgml/features.sgml +++ b/doc/src/sgml/features.sgml @@ -1,5 +1,5 @@ @@ -1089,12 +1089,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/features.sgml,v 2.6 2002/08/04 06:17:29 tho F831-01 - Updateable scrollable cursors + Updatable scrollable cursors F831-02 - Updateable ordered cursors + Updatable ordered cursors diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 5eaa1d3c30b..544d070a85c 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1,5 +1,5 @@ @@ -74,7 +74,7 @@ PostgreSQL documentation NOT - SQL uses a three-valued Boolean logic where NULL represents + SQL uses a three-valued Boolean logic where the null value represents unknown. Observe the following truth tables: @@ -261,7 +261,7 @@ PostgreSQL documentation - To check whether a value is or is not NULL, use the constructs + To check whether a value is or is not null, use the constructs expression IS NULL expression IS NOT NULL @@ -276,16 +276,16 @@ PostgreSQL documentation Do not write expression = NULL - because NULL is not equal to NULL. (NULL represents - an unknown value, and it is not known whether two unknown values are - equal.) + because NULL is not equal to + NULL. (The null value represents an unknown value, + and it is not known whether two unknown values are equal.) Some applications may (incorrectly) require that expression = NULL returns true if expression evaluates to - the NULL value. To support these applications, the run-time option + the null value. To support these applications, the run-time option transform_null_equals can be turned on (e.g., SET transform_null_equals TO ON;). PostgreSQL will then convert @@ -305,8 +305,8 @@ PostgreSQL documentation expression IS NOT UNKNOWN These are similar to IS NULL in that they will - always return TRUE or FALSE, never NULL, even when the operand is NULL. - A NULL input is treated as the logical value UNKNOWN. + always return true or false, never a null value, even when the operand is null. + A null input is treated as the logical value unknown. @@ -973,687 +973,687 @@ PostgreSQL documentation - ascii_to_mic - SQL_ASCII - MULE_INTERNAL + ascii_to_mic + SQL_ASCII + MULE_INTERNAL - ascii_to_utf_8 - SQL_ASCII - UNICODE + ascii_to_utf_8 + SQL_ASCII + UNICODE - big5_to_euc_tw - BIG5 - EUC_TW + big5_to_euc_tw + BIG5 + EUC_TW - big5_to_mic - BIG5 - MULE_INTERNAL + big5_to_mic + BIG5 + MULE_INTERNAL - big5_to_utf_8 - BIG5 - UNICODE + big5_to_utf_8 + BIG5 + UNICODE - euc_cn_to_mic - EUC_CN - MULE_INTERNAL + euc_cn_to_mic + EUC_CN + MULE_INTERNAL - euc_cn_to_utf_8 - EUC_CN - UNICODE + euc_cn_to_utf_8 + EUC_CN + UNICODE - euc_jp_to_mic - EUC_JP - MULE_INTERNAL + euc_jp_to_mic + EUC_JP + MULE_INTERNAL - euc_jp_to_sjis - EUC_JP - SJIS + euc_jp_to_sjis + EUC_JP + SJIS - euc_jp_to_utf_8 - EUC_JP - UNICODE + euc_jp_to_utf_8 + EUC_JP + UNICODE - euc_kr_to_mic - EUC_KR - MULE_INTERNAL + euc_kr_to_mic + EUC_KR + MULE_INTERNAL - euc_kr_to_utf_8 - EUC_KR - UNICODE + euc_kr_to_utf_8 + EUC_KR + UNICODE - euc_tw_to_big5 - EUC_TW - BIG5 + euc_tw_to_big5 + EUC_TW + BIG5 - euc_tw_to_mic - EUC_TW - MULE_INTERNAL + euc_tw_to_mic + EUC_TW + MULE_INTERNAL - euc_tw_to_utf_8 - EUC_TW - UNICODE + euc_tw_to_utf_8 + EUC_TW + UNICODE - gb18030_to_utf_8 - GB18030 - UNICODE + gb18030_to_utf_8 + GB18030 + UNICODE - gbk_to_utf_8 - GBK - UNICODE + gbk_to_utf_8 + GBK + UNICODE - iso_8859_10_to_utf_8 - LATIN6 - UNICODE + iso_8859_10_to_utf_8 + LATIN6 + UNICODE - iso_8859_13_to_utf_8 - LATIN7 - UNICODE + iso_8859_13_to_utf_8 + LATIN7 + UNICODE - iso_8859_14_to_utf_8 - LATIN8 - UNICODE + iso_8859_14_to_utf_8 + LATIN8 + UNICODE - iso_8859_15_to_utf_8 - LATIN9 - UNICODE + iso_8859_15_to_utf_8 + LATIN9 + UNICODE - iso_8859_16_to_utf_8 - LATIN10 - UNICODE + iso_8859_16_to_utf_8 + LATIN10 + UNICODE - iso_8859_1_to_mic - LATIN1 - MULE_INTERNAL + iso_8859_1_to_mic + LATIN1 + MULE_INTERNAL - iso_8859_1_to_utf_8 - LATIN1 - UNICODE + iso_8859_1_to_utf_8 + LATIN1 + UNICODE - iso_8859_2_to_mic - LATIN2 - MULE_INTERNAL + iso_8859_2_to_mic + LATIN2 + MULE_INTERNAL - iso_8859_2_to_utf_8 - LATIN2 - UNICODE + iso_8859_2_to_utf_8 + LATIN2 + UNICODE - iso_8859_2_to_win1250 - LATIN2 - WIN1250 + iso_8859_2_to_win1250 + LATIN2 + WIN1250 - iso_8859_3_to_mic - LATIN3 - MULE_INTERNAL + iso_8859_3_to_mic + LATIN3 + MULE_INTERNAL - iso_8859_3_to_utf_8 - LATIN3 - UNICODE + iso_8859_3_to_utf_8 + LATIN3 + UNICODE - iso_8859_4_to_mic - LATIN4 - MULE_INTERNAL + iso_8859_4_to_mic + LATIN4 + MULE_INTERNAL - iso_8859_4_to_utf_8 - LATIN4 - UNICODE + iso_8859_4_to_utf_8 + LATIN4 + UNICODE - iso_8859_5_to_koi8r - ISO_8859_5 - KOI8 + iso_8859_5_to_koi8r + ISO_8859_5 + KOI8 - iso_8859_5_to_mic - ISO_8859_5 - MULE_INTERNAL + iso_8859_5_to_mic + ISO_8859_5 + MULE_INTERNAL - iso_8859_5_to_utf_8 - ISO_8859_5 - UNICODE + iso_8859_5_to_utf_8 + ISO_8859_5 + UNICODE - iso_8859_5_to_win1251 - ISO_8859_5 - WIN + iso_8859_5_to_win1251 + ISO_8859_5 + WIN - iso_8859_5_to_win866 - ISO_8859_5 - ALT + iso_8859_5_to_win866 + ISO_8859_5 + ALT - iso_8859_6_to_utf_8 - ISO_8859_6 - UNICODE + iso_8859_6_to_utf_8 + ISO_8859_6 + UNICODE - iso_8859_7_to_utf_8 - ISO_8859_7 - UNICODE + iso_8859_7_to_utf_8 + ISO_8859_7 + UNICODE - iso_8859_8_to_utf_8 - ISO_8859_8 - UNICODE + iso_8859_8_to_utf_8 + ISO_8859_8 + UNICODE - iso_8859_9_to_utf_8 - LATIN5 - UNICODE + iso_8859_9_to_utf_8 + LATIN5 + UNICODE - johab_to_utf_8 - JOHAB - UNICODE + johab_to_utf_8 + JOHAB + UNICODE - koi8r_to_iso_8859_5 - KOI8 - ISO_8859_5 + koi8r_to_iso_8859_5 + KOI8 + ISO_8859_5 - koi8r_to_mic - KOI8 - MULE_INTERNAL + koi8r_to_mic + KOI8 + MULE_INTERNAL - koi8r_to_utf_8 - KOI8 - UNICODE + koi8r_to_utf_8 + KOI8 + UNICODE - koi8r_to_win1251 - KOI8 - WIN + koi8r_to_win1251 + KOI8 + WIN - koi8r_to_win866 - KOI8 - ALT + koi8r_to_win866 + KOI8 + ALT - mic_to_ascii - MULE_INTERNAL - SQL_ASCII + mic_to_ascii + MULE_INTERNAL + SQL_ASCII - mic_to_big5 - MULE_INTERNAL - BIG5 + mic_to_big5 + MULE_INTERNAL + BIG5 - mic_to_euc_cn - MULE_INTERNAL - EUC_CN + mic_to_euc_cn + MULE_INTERNAL + EUC_CN - mic_to_euc_jp - MULE_INTERNAL - EUC_JP + mic_to_euc_jp + MULE_INTERNAL + EUC_JP - mic_to_euc_kr - MULE_INTERNAL - EUC_KR + mic_to_euc_kr + MULE_INTERNAL + EUC_KR - mic_to_euc_tw - MULE_INTERNAL - EUC_TW + mic_to_euc_tw + MULE_INTERNAL + EUC_TW - mic_to_iso_8859_1 - MULE_INTERNAL - LATIN1 + mic_to_iso_8859_1 + MULE_INTERNAL + LATIN1 - mic_to_iso_8859_2 - MULE_INTERNAL - LATIN2 + mic_to_iso_8859_2 + MULE_INTERNAL + LATIN2 - mic_to_iso_8859_3 - MULE_INTERNAL - LATIN3 + mic_to_iso_8859_3 + MULE_INTERNAL + LATIN3 - mic_to_iso_8859_4 - MULE_INTERNAL - LATIN4 + mic_to_iso_8859_4 + MULE_INTERNAL + LATIN4 - mic_to_iso_8859_5 - MULE_INTERNAL - ISO_8859_5 + mic_to_iso_8859_5 + MULE_INTERNAL + ISO_8859_5 - mic_to_koi8r - MULE_INTERNAL - KOI8 + mic_to_koi8r + MULE_INTERNAL + KOI8 - mic_to_sjis - MULE_INTERNAL - SJIS + mic_to_sjis + MULE_INTERNAL + SJIS - mic_to_win1250 - MULE_INTERNAL - WIN1250 + mic_to_win1250 + MULE_INTERNAL + WIN1250 - mic_to_win1251 - MULE_INTERNAL - WIN + mic_to_win1251 + MULE_INTERNAL + WIN - mic_to_win866 - MULE_INTERNAL - ALT + mic_to_win866 + MULE_INTERNAL + ALT - sjis_to_euc_jp - SJIS - EUC_JP + sjis_to_euc_jp + SJIS + EUC_JP - sjis_to_mic - SJIS - MULE_INTERNAL + sjis_to_mic + SJIS + MULE_INTERNAL - sjis_to_utf_8 - SJIS - UNICODE + sjis_to_utf_8 + SJIS + UNICODE - tcvn_to_utf_8 - TCVN - UNICODE + tcvn_to_utf_8 + TCVN + UNICODE - uhc_to_utf_8 - UHC - UNICODE + uhc_to_utf_8 + UHC + UNICODE - utf_8_to_ascii - UNICODE - SQL_ASCII + utf_8_to_ascii + UNICODE + SQL_ASCII - utf_8_to_big5 - UNICODE - BIG5 + utf_8_to_big5 + UNICODE + BIG5 - utf_8_to_euc_cn - UNICODE - EUC_CN + utf_8_to_euc_cn + UNICODE + EUC_CN - utf_8_to_euc_jp - UNICODE - EUC_JP + utf_8_to_euc_jp + UNICODE + EUC_JP - utf_8_to_euc_kr - UNICODE - EUC_KR + utf_8_to_euc_kr + UNICODE + EUC_KR - utf_8_to_euc_tw - UNICODE - EUC_TW + utf_8_to_euc_tw + UNICODE + EUC_TW - utf_8_to_gb18030 - UNICODE - GB18030 + utf_8_to_gb18030 + UNICODE + GB18030 - utf_8_to_gbk - UNICODE - GBK + utf_8_to_gbk + UNICODE + GBK - utf_8_to_iso_8859_1 - UNICODE - LATIN1 + utf_8_to_iso_8859_1 + UNICODE + LATIN1 - utf_8_to_iso_8859_10 - UNICODE - LATIN6 + utf_8_to_iso_8859_10 + UNICODE + LATIN6 - utf_8_to_iso_8859_13 - UNICODE - LATIN7 + utf_8_to_iso_8859_13 + UNICODE + LATIN7 - utf_8_to_iso_8859_14 - UNICODE - LATIN8 + utf_8_to_iso_8859_14 + UNICODE + LATIN8 - utf_8_to_iso_8859_15 - UNICODE - LATIN9 + utf_8_to_iso_8859_15 + UNICODE + LATIN9 - utf_8_to_iso_8859_16 - UNICODE - LATIN10 + utf_8_to_iso_8859_16 + UNICODE + LATIN10 - utf_8_to_iso_8859_2 - UNICODE - LATIN2 + utf_8_to_iso_8859_2 + UNICODE + LATIN2 - utf_8_to_iso_8859_3 - UNICODE - LATIN3 + utf_8_to_iso_8859_3 + UNICODE + LATIN3 - utf_8_to_iso_8859_4 - UNICODE - LATIN4 + utf_8_to_iso_8859_4 + UNICODE + LATIN4 - utf_8_to_iso_8859_5 - UNICODE - ISO_8859_5 + utf_8_to_iso_8859_5 + UNICODE + ISO_8859_5 - utf_8_to_iso_8859_6 - UNICODE - ISO_8859_6 + utf_8_to_iso_8859_6 + UNICODE + ISO_8859_6 - utf_8_to_iso_8859_7 - UNICODE - ISO_8859_7 + utf_8_to_iso_8859_7 + UNICODE + ISO_8859_7 - utf_8_to_iso_8859_8 - UNICODE - ISO_8859_8 + utf_8_to_iso_8859_8 + UNICODE + ISO_8859_8 - utf_8_to_iso_8859_9 - UNICODE - LATIN5 + utf_8_to_iso_8859_9 + UNICODE + LATIN5 - utf_8_to_johab - UNICODE - JOHAB + utf_8_to_johab + UNICODE + JOHAB - utf_8_to_koi8r - UNICODE - KOI8 + utf_8_to_koi8r + UNICODE + KOI8 - utf_8_to_sjis - UNICODE - SJIS + utf_8_to_sjis + UNICODE + SJIS - utf_8_to_tcvn - UNICODE - TCVN + utf_8_to_tcvn + UNICODE + TCVN - utf_8_to_uhc - UNICODE - UHC + utf_8_to_uhc + UNICODE + UHC - utf_8_to_win1250 - UNICODE - WIN1250 + utf_8_to_win1250 + UNICODE + WIN1250 - utf_8_to_win1251 - UNICODE - WIN + utf_8_to_win1251 + UNICODE + WIN - utf_8_to_win1256 - UNICODE - WIN1256 + utf_8_to_win1256 + UNICODE + WIN1256 - utf_8_to_win866 - UNICODE - ALT + utf_8_to_win866 + UNICODE + ALT - utf_8_to_win874 - UNICODE - WIN874 + utf_8_to_win874 + UNICODE + WIN874 - win1250_to_iso_8859_2 - WIN1250 - LATIN2 + win1250_to_iso_8859_2 + WIN1250 + LATIN2 - win1250_to_mic - WIN1250 - MULE_INTERNAL + win1250_to_mic + WIN1250 + MULE_INTERNAL - win1250_to_utf_8 - WIN1250 - UNICODE + win1250_to_utf_8 + WIN1250 + UNICODE - win1251_to_iso_8859_5 - WIN - ISO_8859_5 + win1251_to_iso_8859_5 + WIN + ISO_8859_5 - win1251_to_koi8r - WIN - KOI8 + win1251_to_koi8r + WIN + KOI8 - win1251_to_mic - WIN - MULE_INTERNAL + win1251_to_mic + WIN + MULE_INTERNAL - win1251_to_utf_8 - WIN - UNICODE + win1251_to_utf_8 + WIN + UNICODE - win1251_to_win866 - WIN - ALT + win1251_to_win866 + WIN + ALT - win1256_to_utf_8 - WIN1256 - UNICODE + win1256_to_utf_8 + WIN1256 + UNICODE - win866_to_iso_8859_5 - ALT - ISO_8859_5 + win866_to_iso_8859_5 + ALT + ISO_8859_5 - win866_to_koi8r - ALT - KOI8 + win866_to_koi8r + ALT + KOI8 - win866_to_mic - ALT - MULE_INTERNAL + win866_to_mic + ALT + MULE_INTERNAL - win866_to_utf_8 - ALT - UNICODE + win866_to_utf_8 + ALT + UNICODE - win866_to_win1251 - ALT - WIN + win866_to_win1251 + ALT + WIN - win874_to_utf_8 - WIN874 - UNICODE + win874_to_utf_8 + WIN874 + UNICODE @@ -2389,12 +2389,12 @@ PostgreSQL documentation - Regular expressions (REs), as defined in + Regular expressions (REs), as defined in POSIX - 1003.2, come in two forms: modern REs (roughly those of + 1003.2, come in two forms: modern REs (roughly those of egrep; 1003.2 calls these - extended REs) and obsolete REs (roughly those of - ed; 1003.2 basic REs). + extended REs) and obsolete REs (roughly those of + ed; 1003.2 basic REs). PostgreSQL implements the modern form. @@ -2606,7 +2606,7 @@ PostgreSQL documentation - There is no particular limit on the length of REs, except insofar + There is no particular limit on the length of REs, except insofar as memory is limited. Memory usage is approximately linear in RE size, and largely insensitive to RE complexity, except for bounded repetitions. Bounded repetitions are implemented by macro @@ -4923,7 +4923,7 @@ SELECT setval('foo', 42, false); Next nextval() will return 4 - CASE + CASE CASE WHEN condition THEN result @@ -4945,7 +4945,7 @@ END condition is true then the value of the case expression is the result in the ELSE clause. If the ELSE clause is - omitted and no condition matches, the result is NULL. + omitted and no condition matches, the result is null. @@ -4999,7 +4999,7 @@ END all the values in the WHEN clauses until one is found that is equal. If no match is found, the result in the - ELSE clause (or NULL) is returned. This is similar + ELSE clause (or a null value) is returned. This is similar to the switch statement in C. @@ -5025,7 +5025,7 @@ END - COALESCE + COALESCE COALESCE(value @@ -5034,15 +5034,15 @@ END The COALESCE function returns the first of its - arguments that is not NULL. This is often useful to substitute a - default value for NULL values when data is retrieved for display, + arguments that is not null. This is often useful to substitute a + default value for null values when data is retrieved for display, for example: SELECT COALESCE(description, short_description, '(none)') ... - NULLIF + NULLIF nullif @@ -5054,7 +5054,7 @@ SELECT COALESCE(description, short_description, '(none)') ... - The NULLIF function returns NULL if and only + The NULLIF function returns a null value if and only if value1 and value2 are equal. Otherwise it returns value1. This can be used to perform the @@ -5169,7 +5169,7 @@ SELECT NULLIF(value, '(none)') ... current_schema returns the name of the schema that is - at the front of the search path (or NULL if the search path is + at the front of the search path (or a null value if the search path is empty). This is the schema that will be used for any tables or other named objects that are created without specifying a target schema. current_schemas(boolean) returns an array of the names of all @@ -5183,7 +5183,7 @@ SELECT NULLIF(value, '(none)') ... search path changing at runtime - The search path may be altered by a runtime-alterable GUC setting. The + The search path may be altered by a run-time setting. The command to use is SET SEARCH_PATH 'schema'[,'schema']... @@ -5425,7 +5425,7 @@ SELECT has_table_privilege('myschema.mytable', 'select'); can access a function in a particular way. The possibilities for its arguments are analogous to has_table_privilege. When specifying a function by a text string rather than by OID, - the allowed input is the same as for the regprocedure datatype. + the allowed input is the same as for the regprocedure data type. The desired access type must currently evaluate to EXECUTE. @@ -5527,7 +5527,7 @@ SELECT relname FROM pg_class WHERE pg_table_is_visible(oid); visibility check for types, functions, operators, and operator classes, respectively. For functions and operators, an object in the search path is visible if there is no object of the same name and argument - datatype(s) earlier in the path. For operator classes, + data type(s) earlier in the path. For operator classes, both name and associated index access method are considered. @@ -5657,7 +5657,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); These functions extract comments previously stored with the - COMMENT command. NULL is returned if + COMMENT command. A null value is returned if no comment can be found matching the specified parameters. @@ -5742,7 +5742,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); count(expression) Counts the input values for which the value of expression is not NULL. + class="parameter">expression is not null. The return value is of type bigint. @@ -5818,10 +5818,10 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); It should be noted that except for COUNT, - these functions return NULL when no rows are selected. In - particular, SUM of no rows returns NULL, not + these functions return a null value when no rows are selected. In + particular, SUM of no rows returns null, not zero as one might expect. COALESCE may be - used to substitute zero for NULL when necessary. + used to substitute zero for null when necessary. @@ -5992,7 +5992,7 @@ OR - As usual, NULLs in the expressions or subquery rows are combined per + As usual, null values in the expressions or subquery rows are combined per the normal rules of SQL Boolean expressions. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -6033,7 +6033,7 @@ AND x NOT IN y is equivalent to NOT (x IN y) in all - cases. However, NULLs are much more likely to trip up the novice when + cases. However, null values are much more likely to trip up the novice when working with NOT IN than when working with IN. It's best to express your condition positively if possible. @@ -6084,7 +6084,7 @@ AND - As usual, NULLs in the expressions or subquery rows are combined per + As usual, null values in the expressions or subquery rows are combined per the normal rules of SQL Boolean expressions. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -6153,7 +6153,7 @@ AND - As usual, NULLs in the expressions or subquery rows are combined per + As usual, null values in the expressions or subquery rows are combined per the normal rules of SQL Boolean expressions. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -6217,7 +6217,7 @@ AND - As usual, NULLs in the expressions or subquery rows are combined per + As usual, null values in the expressions or subquery rows are combined per the normal rules of SQL Boolean expressions. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -6247,7 +6247,7 @@ AND - As usual, NULLs in the expressions or subquery rows are combined per + As usual, null values in the expressions or subquery rows are combined per the normal rules of SQL Boolean expressions. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index e4812e70281..c70dc27dac9 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -1,4 +1,4 @@ - + Indexes @@ -182,7 +182,7 @@ CREATE INDEX name ON table Testing has shown PostgreSQL's hash indexes to be similar or slower - than btree indexes, and the index size and build time for hash + than B-tree indexes, and the index size and build time for hash indexes is much worse. Hash indexes also suffer poor performance under high concurrency. For these reasons, hash index use is discouraged. @@ -517,7 +517,7 @@ Subject: Re: [QUESTIONS] PRIMARY KEY | UNIQUE - Should not allow NULLs. + Should not allow null values. @@ -540,7 +540,7 @@ Subject: Re: [QUESTIONS] PRIMARY KEY | UNIQUE - NULLs are acceptable. + Null values are acceptable. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index be27b9dddf8..be38c2f5801 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -1,4 +1,4 @@ - + <![%standalone-include[<productname>PostgreSQL</>]]> @@ -146,7 +146,7 @@ su - postgres *** You might have to rebuild your Perl installation. Refer to *** the documentation for details. </screen> - (If you don't follow the onscreen output you will merely notice + (If you don't follow the on-screen output you will merely notice the the PL/Perl library object will not be installed.) If you see this, you will have to re-build and install <productname>Perl</productname> manually to be able to build @@ -239,7 +239,7 @@ JAVACMD=$JAVA_HOME/bin/java <para> To enable Native Language Support (<acronym>NLS</acronym>), that is, the ability to display a program's messages in a language - other than English, you need an implementation of the Gettext + other than English, you need an implementation of the <application>Gettext</> <acronym>API</acronym>. Some operating systems have this built-in (e.g., <systemitem class="osname">Linux</>, <systemitem class="osname">NetBSD</>, <systemitem @@ -256,7 +256,8 @@ JAVACMD=$JAVA_HOME/bin/java <listitem> <para> - Kerberos, OpenSSL, or PAM, if you want to support + <application>Kerberos</>, <application>OpenSSL</>, or <application>PAM</>, + if you want to support authentication using these services. </para> </listitem> @@ -885,7 +886,7 @@ JAVACMD=$JAVA_HOME/bin/java <term><option>--without-readline</option></term> <listitem> <para> - Prevents the use of the Readline library. This disables + Prevents the use of the <application>Readline</> library. This disables command-line editing and history in <application>psql</application>, so it is not recommended. </para> @@ -896,7 +897,7 @@ JAVACMD=$JAVA_HOME/bin/java <term><option>--without-zlib</option></term> <listitem> <para> - Prevents the use of the Zlib library. This disables + Prevents the use of the <application>Zlib</> library. This disables compression support in <application>pg_dump</application>. This option is only intended for those rare systems where this library is not available. diff --git a/doc/src/sgml/jdbc.sgml b/doc/src/sgml/jdbc.sgml index 69220dd59c9..b784d5ea4d0 100644 --- a/doc/src/sgml/jdbc.sgml +++ b/doc/src/sgml/jdbc.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/Attic/jdbc.sgml,v 1.37 2002/09/18 20:09:31 petere Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/Attic/jdbc.sgml,v 1.38 2002/09/21 18:32:53 petere Exp $ --> <chapter id="jdbc"> @@ -57,9 +57,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/jdbc.sgml,v 1.37 2002/09/18 20:09:31 instructions. After installation, the driver should be found in <filename><replaceable>PREFIX</>/share/java/postgresql.jar</filename>. The resulting driver will be built for the version of Java you are - running. If you build with a 1.1 JDK you will build a version + running. If you build with a 1.1 <acronym>JDK</> you will build a version that supports the JDBC 1 specification, if you build with a Java 2 - JDK (e.g., JDK 1.2 or JDK 1.3) you will build a version that + <acronym>JDK</> (e.g., <acronym>JDK</> 1.2 or <acronym>JDK</> 1.3) you will build a version that supports the JDBC 2 specification. </para> </sect2> @@ -545,7 +545,7 @@ st.close(); need to understand the limitations of each method. The <type>bytea</type> data type is not well suited for storing very large amounts of binary data. While a column of type - <type>bytea</type> can hold upto 1Gig of binary data, it would + <type>bytea</type> can hold up to 1 GB of binary data, it would require a huge amount of memory (<acronym>RAM</acronym>) to process such a large value. The Large Object method for storing binary data is better suited to storing very large values, @@ -838,7 +838,7 @@ Fastpath fp = ((org.postgresql.Connection)myconn).getFastpathAPI(); <formalpara> <title>Throws: - SQLException by Fastpath when initializing for first time + SQLException by Fastpath when initializing for first time @@ -1106,7 +1106,7 @@ public void addFunction(String name, This adds a function to our look-up table. User code should use the addFunctions method, which is based upon a query, - rather than hard coding the oid. The oid for a function is not + rather than hard coding the OID. The OID for a function is not guaranteed to remain static, even on different servers of the same version. @@ -1118,7 +1118,7 @@ public void addFunctions(ResultSet rs) throws SQLException This takes a ResultSet containing two columns. Column 1 - contains the function name, Column 2 the oid. It reads the + contains the function name, Column 2 the OID. It reads the entire ResultSet, loading the values into the function table. @@ -2154,21 +2154,21 @@ public int read(byte buf[], - buf + buf destination array - off + off offset within array - len + len number of bytes to read @@ -2202,21 +2202,21 @@ public void write(byte buf[], - buf + buf destination array - off + off offset within array - len + len number of bytes to write @@ -2350,21 +2350,21 @@ lobj = ((org.postgresql.Connection)myconn).getLargeObjectAPI(); - public static final int WRITE + public static final int WRITE This mode indicates we want to write to an object. - public static final int READ + public static final int READ This mode indicates we want to read an object. - public static final int READWRITE + public static final int READWRITE This mode is the default. It indicates we want read and write access to a large object. @@ -2382,8 +2382,8 @@ public LargeObject open(int oid) throws SQLException This opens an existing large object, based on its OID. This - method assumes that READ and WRITE access is required (the - default). + method assumes that READ and + WRITE access is required (the default). @@ -2404,7 +2404,7 @@ public int create() throws SQLException This creates a large object, returning its OID. - It defaults to READWRITE for the new object's attributes. + It defaults to READWRITE for the new object's attributes. diff --git a/doc/src/sgml/libpgtcl.sgml b/doc/src/sgml/libpgtcl.sgml index 3fa6c57055b..6ff9e6bebcf 100644 --- a/doc/src/sgml/libpgtcl.sgml +++ b/doc/src/sgml/libpgtcl.sgml @@ -1038,7 +1038,7 @@ pg_execute -array arrayVar Specifies the name of an array variable where result tuples are stored, indexed by the field names. -This is ignored if queryString is not a SELECT statement. For SELECT +This is ignored if queryString is not a SELECT statement. For SELECT statements, if this option is not used, result tuples values are stored in individual variables named according to the field names in the result. @@ -1118,7 +1118,7 @@ The number of tuples affected or returned by the query. If the query is not a SELECT statement, the query is executed and the number of tuples affected by the query is returned. If the query is an INSERT and a single tuple is inserted, the OID of the inserted tuple is -stored in the oidVar variable if the optional -oid +stored in the oidVar variable if the optional -oid argument is supplied. @@ -1166,7 +1166,7 @@ In the following examples, error checking with catch has been omitted for clarity. -Insert a row and save the OID in result_oid: +Insert a row and save the OID in result_oid: pg_execute -oid result_oid $pgconn "insert into mytable values (1)" diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 03d76649908..480b08d2cf2 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,5 +1,5 @@ @@ -214,7 +214,7 @@ PGconn *PQconnectdb(const char *conninfo) environment variable (see ) is checked. If the environment variable is not set either, then hardwired defaults are used. - The return value is a pointer to an abstract struct + The return value is a pointer to an abstract struct representing the connection to the backend. @@ -457,7 +457,7 @@ struct PQconninfoOption Returns a connection options array. This may be used to determine all possible PQconnectdb options and their current default values. The return value points to an array of - PQconninfoOption structs, which ends with an entry having a NULL + PQconninfoOption structs, which ends with an entry having a NULL keyword pointer. Note that the default values (val fields) will depend on environment variables and other context. Callers must treat the connection options data as read-only. @@ -542,7 +542,7 @@ maintain the PGconn abstraction. Use the accessor func at the contents of PGconn. Avoid directly referencing the fields of the PGconn structure because they are subject to change in the future. (Beginning in PostgreSQL release 6.4, the -definition of struct PGconn is not even provided in libpq-fe.h. +definition of struct PGconn is not even provided in libpq-fe.h. If you have old code that accesses PGconn fields directly, you can keep using it by including libpq-int.h too, but you are encouraged to fix the code soon.) @@ -757,7 +757,7 @@ maintain the PGresult abstraction. Use the accessor fu at the contents of PGresult. Avoid directly referencing the fields of the PGresult structure because they are subject to change in the future. (Beginning in PostgreSQL 6.4, the -definition of struct PGresult is not even provided in libpq-fe.h. If you +definition of struct PGresult is not even provided in libpq-fe.h. If you have old code that accesses PGresult fields directly, you can keep using it by including libpq-int.h too, but you are encouraged to fix the code soon.) @@ -813,7 +813,7 @@ exposes a bug in the client software. PQresStatus - Converts the enumerated type returned by PQresultStatus into + Converts the enumerated type returned by PQresultStatus into a string constant describing the status code. char *PQresStatus(ExecStatusType status); @@ -868,7 +868,7 @@ This is libpq's internal routine to allocate and initialize an e PGresult object. It is exported because some applications find it useful to generate result objects (particularly objects with error status) themselves. If conn is not NULL and status indicates an error, -the connection's current errorMessage is copied into the PGresult. +the connection's current error message is copied into the PGresult. Note that PQclear should eventually be called on the object, just as with a PGresult returned by libpq itself. @@ -976,7 +976,7 @@ strings overlap. The from parameter points to an escaped string such as might be returned by PQgetvalue of a BYTEA column. PQunescapeBytea converts - this NUL terminated string representation into binary, filling a buffer. + this string representation into its binary representation, filling the supplied buffer. It returns a pointer to the buffer which is NULL on error, and the size of the buffer in to_length. The pointer may subsequently be used as an argument to the function @@ -1092,7 +1092,7 @@ int PQfsize(const PGresult *res, PQbinaryTuples - Returns 1 if the PGresult contains binary tuple data, + Returns 1 if the PGresult contains binary tuple data, 0 if it contains ASCII data. int PQbinaryTuples(const PGresult *res); @@ -1392,7 +1392,7 @@ PGresult *PQgetResult(PGconn *conn); indicating that the command is done. (If called when no command is active, PQgetResult will just return NULL at once.) Each non-NULL result from PQgetResult should be processed using - the same PGresult accessor functions previously described. + the same PGresult accessor functions previously described. Don't forget to free each result object with PQclear when done with it. Note that PQgetResult will block only if a query is active and the necessary response data has not yet been read by PQconsumeInput. @@ -1606,7 +1606,7 @@ typedef struct { } u; } PQArgBlock; - PQfn always returns a valid PGresult*. The resultStatus + PQfn always returns a valid PGresult*. The result status should be checked before the result is used. The caller is responsible for freeing the PGresult with PQclear when it is no longer needed. @@ -1647,7 +1647,7 @@ messages can be detected by calling PQnotifies. Returns the next notification from a list of unhandled notification messages received from the backend. Returns NULL if there are no pending notifications. Once a notification is - returned from PQnotifies, it is considered handled and will be + returned from PQnotifies, it is considered handled and will be removed from the list of notifications. PGnotify* PQnotifies(PGconn *conn); @@ -1932,7 +1932,7 @@ void PQuntrace(PGconn *conn) PQsetNoticeProcessor notice processor -Control reporting of notice and warning messages generated by libpq. +Control reporting of notice and warning messages generated by libpq. typedef void (*PQnoticeProcessor) (void *arg, const char *message); diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index 939eba14237..f762c6ad44f 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -1,5 +1,5 @@ @@ -380,14 +380,14 @@ VACUUM - PostgreSQL is unable to reuse btree index + PostgreSQL is unable to reuse B-tree index pages in certain cases. The problem is that if indexed rows are deleted, those index pages can only be reused by rows with similar values. For example, if indexed rows are deleted and newly inserted/updated rows have much higher values, the new rows can't use the index space made available by the deleted rows. Instead, such new rows must be placed on new index pages. In such cases, disk - space used by the index will grow indefinately, even if + space used by the index will grow indefinitely, even if VACUUM is run frequently. @@ -426,7 +426,7 @@ VACUUM The simplest production-grade approach to managing log output is to send it all to syslog and let syslog deal with file rotation. To do this, set - syslog to 2 (log to syslog only) in + syslog to 2 (log to syslog only) in postgresql.conf. Then you can send a SIGHUP signal to the syslog daemon whenever you want to force it to start writing a new log @@ -434,7 +434,7 @@ VACUUM - On many systems, however, syslog is not very reliable, particularly + On many systems, however, syslog is not very reliable, particularly with large log messages; it may truncate or drop messages just when you need them the most. You may find it more useful to pipe the postmaster's stderr to some type of diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 6e801174e88..b20d0bf9e52 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -1,5 +1,5 @@ @@ -172,7 +172,7 @@ postgres: user database host pgstat_stat_interval (500 milliseconds by default). So the displayed totals lag behind actual activity. @@ -204,7 +204,7 @@ postgres: user database host pg_stat_activity - One row per server process, showing process PID, database, + One row per server process, showing process PID, database, user, and current query. The current query column is only available to superusers; for others it reads as NULL. (Note that because of the collector's reporting delay, current query will only be up-to-date @@ -274,13 +274,13 @@ postgres: user database host pg_statio_sys_tables - Same as pg_statio_all_tables, except that only system tables + Same as pg_statio_all_tables, except that only system tables are shown. pg_statio_user_tables - Same as pg_statio_all_tables, except that only user tables + Same as pg_statio_all_tables, except that only user tables are shown. @@ -293,13 +293,13 @@ postgres: user database host pg_statio_sys_indexes - Same as pg_statio_all_indexes, except that only indexes on + Same as pg_statio_all_indexes, except that only indexes on system tables are shown. pg_statio_user_indexes - Same as pg_statio_all_indexes, except that only indexes on + Same as pg_statio_all_indexes, except that only indexes on user tables are shown. @@ -312,14 +312,14 @@ postgres: user database host pg_statio_sys_sequences - Same as pg_statio_all_sequences, except that only system + Same as pg_statio_all_sequences, except that only system sequences are shown. (Presently, no system sequences are defined, so this view is always empty.) pg_statio_user_sequences - Same as pg_statio_all_sequences, except that only user + Same as pg_statio_all_sequences, except that only user sequences are shown. @@ -540,7 +540,7 @@ postgres: user database host The function pg_stat_get_backend_idset provides a convenient way to generate one row for each active backend. For - example, to show the PIDs and current queries of all backends: + example, to show the PIDs and current queries of all backends: SELECT pg_stat_get_backend_pid(S.backendid) AS procpid, @@ -657,10 +657,10 @@ FROM (SELECT pg_stat_get_backend_idset() AS backendid) AS S; transaction xid The ID of a transaction, or NULL if the lockable object - is a relation. Every transaction holds ExclusiveLock on its + is a relation. Every transaction holds an exclusive lock on its transaction ID for its entire duration. If one transaction finds it necessary to wait specifically for another transaction, it - does so by attempting to acquire ShareLock on the other transaction + does so by attempting to acquire share lock on the other transaction ID. That will succeed only when the other transaction terminates and releases its locks. diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 0a7f2f8fa94..5d27af7e8a8 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -1,5 +1,5 @@ @@ -498,7 +498,7 @@ ERROR: Can't serialize access due to concurrent update EXCLUSIVE, EXCLUSIVE, and ACCESS EXCLUSIVE lock modes. This mode protects a table against - concurrent schema changes and VACUUMs. + concurrent schema changes and VACUUM runs. diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index 17c8a348d0f..05727523f00 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -1,5 +1,5 @@ @@ -527,11 +527,11 @@ regression=# - Controlling the Planner with Explicit JOINs + Controlling the Planner with Explicit <literal>JOIN</> Clauses Beginning with PostgreSQL 7.1 it is possible - to control the query planner to some extent by using explicit JOIN + to control the query planner to some extent by using explicit JOIN syntax. To see why this matters, we first need some background. diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml index 97981bf603e..b17892a4f66 100644 --- a/doc/src/sgml/plperl.sgml +++ b/doc/src/sgml/plperl.sgml @@ -1,5 +1,5 @@ @@ -162,7 +162,7 @@ SELECT name, empcomp(employee) FROM employee; Access to the database itself from your Perl function can be done via an experimental module DBD::PgSPI - (also available at CPAN + (also available at CPAN mirror sites). This module makes available a DBI-compliant database-handle named $pg_dbh that can be used to perform queries @@ -230,14 +230,14 @@ CREATE FUNCTION badfunc() RETURNS integer AS ' restricted --- for example, one might want a Perl function that sends mail. To handle these cases, PL/Perl can also be installed as an untrusted language (usually called - PL/PerlU). In this case the full Perl language is + PL/PerlU). In this case the full Perl language is available. If the createlang program is used to install the language, the language name plperlu will select the untrusted PL/Perl variant. - The writer of a PL/PerlU function must take care that the function + The writer of a PL/PerlU function must take care that the function cannot be used to do anything unwanted, since it will be able to do anything that could be done by a user logged in as the database administrator. Note that the database system allows only database diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index a4cff1d3af5..2e7f2a9e583 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -1,5 +1,5 @@ @@ -126,7 +126,7 @@ END; The PL/pgSQL EXECUTE statement is not related to the EXECUTE statement supported by the PostgreSQL backend. The backend - EXECUTE statement cannot be used within PL/PgSQL functions (and + EXECUTE statement cannot be used within PL/pgSQL functions (and is not needed). @@ -849,7 +849,7 @@ SELECT INTO target expressions - If the SELECT query returns zero rows, NULLs are assigned to the + If the SELECT query returns zero rows, null values are assigned to the target(s). If the SELECT query returns multiple rows, the first row is assigned to the target(s) and the rest are discarded. (Note that the first row is not well-defined unless you've @@ -877,8 +877,8 @@ IF NOT FOUND THEN END IF; - Alternatively, you can use the IS NULL (or ISNULL) conditional to - test for NULLity of a RECORD/ROW result. Note that there is no + Alternatively, you can use the IS NULL (or ISNULL) conditional to + test for whether a RECORD/ROW result is null. Note that there is no way to tell whether any additional rows might have been discarded. @@ -1205,7 +1205,7 @@ RETURN NEXT expression; RETURN NEXT does not actually return from the function; it simply saves away the value of the expression (or record or row variable, - as appropriate for the datatype being returned). + as appropriate for the data type being returned). Execution then continues with the next statement in the PL/pgSQL function. As successive RETURN NEXT commands are executed, the result set is built up. A final @@ -1213,37 +1213,50 @@ RETURN NEXT expression; the function. - - - The current implementation of RETURN NEXT for PL/PgSQL stores - the entire result set before returning from the function, as - discussed above. That means that if a PL/PgSQL function - produces a very large result set, performance may be poor: data - will be written to disk to avoid memory exhaustion, but the - function itself will not return until the entire - result set has been generated. A future version of PL/PgSQL may - allow users to allow users to define set-returning functions - that do not have this limitation. Currently, the point at which - data begins being written to disk is controlled by the - - - + + + The current implementation of RETURN NEXT for + PL/pgSQL stores the entire result set before + returning from the function, as discussed above. That means that + if a PL/pgSQL function produces a very large result set, + performance may be poor: data will be written to disk to avoid + memory exhaustion, but the function itself will not return until + the entire result set has been generated. A future version of + PL/pgSQL may allow users to allow users to define set-returning + functions that do not have this limitation. Currently, the point + at which data begins being written to disk is controlled by the + SORT_MEM configuration variable. Administrators who + have sufficient memory to store larger result sets in memory + should consider increasing this parameter. + + + Conditionals - IF statements let you execute commands based on - certain conditions. - PL/pgSQL has four forms of IF: IF-THEN, IF-THEN-ELSE, - IF-THEN-ELSE IF, and IF-THEN-ELSIF-THEN-ELSE. + IF statements let you execute commands based on + certain conditions. PL/pgSQL has four forms of + IF: + + + IF ... THEN + + + IF ... THEN ... ELSE + + + IF ... THEN ... ELSE IF and + + + IF ... THEN ... ELSIF ... THEN ... ELSE + + - IF-THEN + <literal>IF-THEN</> @@ -1265,7 +1278,7 @@ END IF; - IF-THEN-ELSE + <literal>IF-THEN-ELSE</> @@ -1300,7 +1313,7 @@ END IF; - IF-THEN-ELSE IF + <literal>IF-THEN-ELSE IF</> IF statements can be nested, as in the following example: @@ -1326,7 +1339,7 @@ END IF; - IF-THEN-ELSIF-ELSE + <literal>IF-THEN-ELSIF-ELSE</> @@ -1344,10 +1357,11 @@ IF boolean-expression THEN END IF; - IF-THEN-ELSIF-ELSE provides a more convenient method of checking - many alternatives in one statement. Formally it is equivalent - to nested IF-THEN-ELSE-IF-THEN commands, but only one END IF - is needed. + IF-THEN-ELSIF-ELSE provides a more convenient + method of checking many alternatives in one statement. + Formally it is equivalent to nested + IF-THEN-ELSE-IF-THEN commands, but only one + END IF is needed. @@ -1673,7 +1687,7 @@ OPEN unbound-cursor FOR SELECT ...; to execute. The cursor cannot be open already, and it must have been declared as an unbound cursor (that is, as a simple refcursor variable). The SELECT query is treated - in the same way as other SELECTs in PL/pgSQL: + in the same way as other SELECT statements in PL/pgSQL: PL/pgSQL variable names are substituted, and the query plan is cached for possible re-use. @@ -1706,7 +1720,7 @@ OPEN curs1 FOR EXECUTE ''SELECT * FROM '' || quote_ident($1); - OPENing a bound cursor + Opening a bound cursor @@ -1802,7 +1816,7 @@ CLOSE curs1; caller. This is used to return multiple rows or columns from the function. The function opens the cursor and returns the cursor name to the caller. The caller can then FETCH rows from the - cursor. The cursor can be CLOSEd by the caller, or it will be + cursor. The cursor can be closed by the caller, or it will be closed automatically when the transaction closes. @@ -2067,7 +2081,7 @@ RAISE EXCEPTION ''Inexistent ID --> %'',user_id; Data type array of text; the arguments from the CREATE TRIGGER statement. The index counts from 0 and can be given as an expression. Invalid - indices (< 0 or >= tg_nargs) result in a NULL value. + indices (< 0 or >= tg_nargs) result in a null value. @@ -2387,7 +2401,7 @@ LANGUAGE 'plpgsql'; 2 - In assignments, SELECTs, to delimit strings, etc. + In assignments, SELECT statements, to delimit strings, etc. a_output := ''Blah''; SELECT * FROM users WHERE f_name=''foobar''; diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index 110c2a826e5..e22d292ccb9 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -1,4 +1,4 @@ - + PL/Python - Python Procedural Language @@ -221,9 +221,12 @@ plan = plpy.prepare("SOME OTHER QUERY") execution environment, further restricts it to prevent the use of the file open call, and allows only modules from a specific list to be imported. Presently, that list includes: - array, bisect, binascii, calendar, cmath, codecs, errno, marshal, - math, md5, mpz, operator, pcre, pickle, random, re, regex, sre, - sha, string, StringIO, struct, time, whrandom, and zlib. + array, <>bisect, <>binascii, <>calendar, + <>cmath, <>codecs, <>errno, <>marshal, <>math, + <>md5, <>mpz, <>operator, <>pcre, <>pickle, + <>random, <>re, <>regex, <>sre, <>sha, <>string, + <>StringIO, <>struct, <>time, <>whrandom, and + <>zlib. diff --git a/doc/src/sgml/pltcl.sgml b/doc/src/sgml/pltcl.sgml index a81e5ce52e9..ed724153bb4 100644 --- a/doc/src/sgml/pltcl.sgml +++ b/doc/src/sgml/pltcl.sgml @@ -1,5 +1,5 @@ @@ -50,22 +50,22 @@ $Header: /cvsroot/pgsql/doc/src/sgml/pltcl.sgml,v 2.21 2002/08/22 00:01:40 tgl E Sometimes it is desirable to write Tcl functions that are not restricted to safe Tcl --- for example, one might want a Tcl function that sends - mail. To handle these cases, there is a variant of PL/Tcl called PL/TclU + mail. To handle these cases, there is a variant of PL/Tcl called PL/TclU (for untrusted Tcl). This is the exact same language except that a full - Tcl interpreter is used. If PL/TclU is used, it must be + Tcl interpreter is used. If PL/TclU is used, it must be installed as an untrusted procedural language so that only - database superusers can create functions in it. The writer of a PL/TclU + database superusers can create functions in it. The writer of a PL/TclU function must take care that the function cannot be used to do anything unwanted, since it will be able to do anything that could be done by a user logged in as the database administrator. - The shared object for the PL/Tcl and PL/TclU call handlers is + The shared object for the PL/Tcl and PL/TclU call handlers is automatically built and installed in the PostgreSQL library directory if Tcl/Tk support is specified in the configuration step of the installation procedure. To install - PL/Tcl and/or PL/TclU in a particular database, use the + PL/Tcl and/or PL/TclU in a particular database, use the createlang script, for example createlang pltcl dbname or createlang pltclu dbname. @@ -81,7 +81,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/pltcl.sgml,v 2.21 2002/08/22 00:01:40 tgl E PL/Tcl Functions and Arguments - To create a function in the PL/Tcl language, use the standard syntax + To create a function in the PL/Tcl language, use the standard syntax CREATE FUNCTION funcname (argument-types) RETURNS return-type AS ' @@ -89,8 +89,8 @@ CREATE FUNCTION funcname (argument-types ' LANGUAGE 'pltcl'; - PL/TclU is the same, except that the language should be specified as - 'pltclu'. + PL/TclU is the same, except that the language should be specified as + pltclu. @@ -197,7 +197,7 @@ CREATE FUNCTION overpaid_2 (EMP) RETURNS bool AS ' all PL/Tcl procedures executed in one backend share the same safe Tcl interpreter. So, any global Tcl variable is accessible to all PL/Tcl procedure calls, and will persist for the duration of the - SQL client connection. (Note that PL/TclU functions likewise share + SQL client connection. (Note that PL/TclU functions likewise share global data, but they are in a different Tcl interpreter and cannot communicate with PL/Tcl functions.) @@ -248,7 +248,7 @@ CREATE FUNCTION overpaid_2 (EMP) RETURNS bool AS ' setting up the query as a cursor and then saying FETCH n. - If the query is a SELECT statement, the values of the SELECT's + If the query is a SELECT statement, the values of the statement's result columns are placed into Tcl variables named after the columns. If the -array option is given, the column values are instead stored into the named associative array, with the SELECT @@ -267,7 +267,7 @@ spi_exec "SELECT count(*) AS cnt FROM pg_proc" will set the Tcl variable $cnt to the number of rows in - the pg_proc system catalog. + the pg_proc system catalog. If the optional loop-body argument is given, it is @@ -337,7 +337,7 @@ spi_exec -array C "SELECT * FROM pg_class" { The optional value for -nulls is a string of spaces and 'n' characters telling spi_execp - which of the arguments are NULLs. If given, it must have exactly the + which of the arguments are null values. If given, it must have exactly the same length as the value-list. If it is not given, all the argument values are non-NULL. @@ -504,7 +504,7 @@ SELECT 'doesn''t' AS ret A Tcl list of the table field names, prefixed with an empty list - element. So looking up an element name in the list with Tcl's + element. So looking up an element name in the list with Tcl's lsearch command returns the element's number starting with 1 for the first column, the same way the fields are customarily numbered in PostgreSQL. @@ -639,7 +639,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab While the unknown module could actually contain any initialization script you need, it normally defines a Tcl unknown procedure that is invoked whenever Tcl does - not recognize an invoked procedure name. PL/Tcl's standard version + not recognize an invoked procedure name. PL/Tcl's standard version of this procedure tries to find a module in pltcl_modules that will define the required procedure. If one is found, it is loaded into the interpreter, and then execution is allowed to @@ -674,7 +674,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab differ. Tcl, however, requires all procedure names to be distinct. PL/Tcl deals with this by making the internal Tcl procedure names contain the object - ID of the procedure's pg_proc row as part of their name. Thus, + ID of the procedure's pg_proc row as part of their name. Thus, PostgreSQL functions with the same name and different argument types will be different Tcl procedures too. This is not normally a concern for a PL/Tcl programmer, but it might be visible diff --git a/doc/src/sgml/programmer.sgml b/doc/src/sgml/programmer.sgml index de0960f894e..5494ce2788c 100644 --- a/doc/src/sgml/programmer.sgml +++ b/doc/src/sgml/programmer.sgml @@ -1,5 +1,5 @@ @@ -60,7 +60,6 @@ PostgreSQL Programmer's Guide. &rules; &xindex; &indexcost; - &gist; &trigger; &spi; diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index 61409861cf6..7326bbe01c1 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -1,5 +1,5 @@ @@ -249,7 +249,7 @@ ALTER TABLE table These forms change whether a column is marked to allow NULL values or to reject NULL values. You may only SET NOT NULL - when the table contains no NULLs in the column. + when the table contains no null values in the column. @@ -279,7 +279,7 @@ ALTER TABLE table compressible data. EXTERNAL is for external, uncompressed data and EXTENDED is for external, compressed data. EXTENDED is the default for all - datatypes that support it. The use of EXTERNAL will + data types that support it. The use of EXTERNAL will make substring operations on a TEXT column faster, at the penalty of increased storage space. diff --git a/doc/src/sgml/ref/alter_user.sgml b/doc/src/sgml/ref/alter_user.sgml index 3cc82371aa9..345379cd3a9 100644 --- a/doc/src/sgml/ref/alter_user.sgml +++ b/doc/src/sgml/ref/alter_user.sgml @@ -1,5 +1,5 @@ @@ -226,7 +226,7 @@ ALTER USER manuel VALID UNTIL 'Jan 31 2030'; Change a user's valid until date, specifying that his authorization should expire at midday on 4th May 1998 using - the time zone which is one hour ahead of UTC: + the time zone which is one hour ahead of UTC: ALTER USER chris VALID UNTIL 'May 4 12:00:00 1998 +1'; diff --git a/doc/src/sgml/ref/begin.sgml b/doc/src/sgml/ref/begin.sgml index 11ca82e6d49..6a02672e7d0 100644 --- a/doc/src/sgml/ref/begin.sgml +++ b/doc/src/sgml/ref/begin.sgml @@ -1,5 +1,5 @@ @@ -125,7 +125,7 @@ WARNING: BEGIN: already a transaction in progress Guide for details.) In SERIALIZABLE mode queries will see only changes committed before the entire - transaction began (actually, before execution of the first DML statement + transaction began (actually, before execution of the first DML statement in the transaction). diff --git a/doc/src/sgml/ref/cluster.sgml b/doc/src/sgml/ref/cluster.sgml index ff489fe4c90..e7aaa522811 100644 --- a/doc/src/sgml/ref/cluster.sgml +++ b/doc/src/sgml/ref/cluster.sgml @@ -1,5 +1,5 @@ @@ -169,7 +169,7 @@ SELECT columnlist INTO TABLE PostgreSQL sorting code in the ORDER BY clause to create the desired order; this is usually much - faster than an indexscan for + faster than an index scan for unordered data. You then drop the old table, use ALTER TABLE...RENAME to rename newtable to the old name, and diff --git a/doc/src/sgml/ref/clusterdb.sgml b/doc/src/sgml/ref/clusterdb.sgml index 1eae14e9672..9ffcb2ac340 100644 --- a/doc/src/sgml/ref/clusterdb.sgml +++ b/doc/src/sgml/ref/clusterdb.sgml @@ -1,5 +1,5 @@ @@ -71,8 +71,8 @@ PostgreSQL documentation - -d dbname - --dbname dbname + + Specifies the name of the database to be clustered. @@ -86,8 +86,8 @@ PostgreSQL documentation - -a - --all + + Cluster all databases. @@ -96,8 +96,8 @@ PostgreSQL documentation - -t table - --table table + + Clusters table only. @@ -114,8 +114,8 @@ PostgreSQL documentation - -h host - --host host + + Specifies the host name of the machine on which the @@ -127,8 +127,8 @@ PostgreSQL documentation - -p port - --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -139,8 +139,8 @@ PostgreSQL documentation - -U username - --username username + + User name to connect as @@ -149,8 +149,8 @@ PostgreSQL documentation - -W - --password + + Force password prompt. @@ -159,8 +159,8 @@ PostgreSQL documentation - -e - --echo + + Echo the commands that clusterdb generates @@ -170,8 +170,8 @@ PostgreSQL documentation - -q - --quiet + + Do not display a response. diff --git a/doc/src/sgml/ref/copy.sgml b/doc/src/sgml/ref/copy.sgml index 677ec5792a9..8aa5b90a9ed 100644 --- a/doc/src/sgml/ref/copy.sgml +++ b/doc/src/sgml/ref/copy.sgml @@ -1,5 +1,5 @@ @@ -477,7 +477,7 @@ order is detected here. int32 bit mask to denote important aspects of the file format. Bits are -numbered from 0 (LSB) to 31 (MSB) --- note that this field is stored +numbered from 0 (LSB) to 31 (MSB) --- note that this field is stored with source's endianness, as are all subsequent integer fields. Bits 16-31 are reserved to denote critical file format issues; a reader should abort if it finds an unexpected bit set in this range. Bits 0-15 @@ -539,8 +539,8 @@ is left for a later release. Each tuple begins with an int16 count of the number of fields in the tuple. (Presently, all tuples in a table will have the same count, but that might not always be true.) Then, repeated for each field in the -tuple, there is an int16 typlen word possibly followed by field data. -The typlen field is interpreted thus: +tuple, there is an int16 typlen word possibly followed by field data. +The typlen field is interpreted thus: @@ -557,7 +557,7 @@ The typlen field is interpreted thus: Field is a fixed-length data type. Exactly N - bytes of data follow the typlen word. + bytes of data follow the typlen word. @@ -566,8 +566,8 @@ The typlen field is interpreted thus: -1 - Field is a varlena data type. The next four - bytes are the varlena header, which contains + Field is a varlena data type. The next four + bytes are the varlena header, which contains the total value length including itself. @@ -585,8 +585,8 @@ The typlen field is interpreted thus: -For non-NULL fields, the reader can check that the typlen matches the -expected typlen for the destination column. This provides a simple +For non-NULL fields, the reader can check that the typlen matches the +expected typlen for the destination column. This provides a simple but very useful check that the data is as expected. @@ -602,7 +602,7 @@ you from moving a binary file across machines). If OIDs are included in the dump, the OID field immediately follows the field-count word. It is a normal field except that it's not included -in the field-count. In particular it has a typlen --- this will allow +in the field-count. In particular it has a typlen --- this will allow handling of 4-byte vs 8-byte OIDs without too much pain, and will allow OIDs to be shown as NULL if that ever proves desirable. diff --git a/doc/src/sgml/ref/create_aggregate.sgml b/doc/src/sgml/ref/create_aggregate.sgml index 21c547f6b67..93a459b40a4 100644 --- a/doc/src/sgml/ref/create_aggregate.sgml +++ b/doc/src/sgml/ref/create_aggregate.sgml @@ -1,5 +1,5 @@ @@ -233,7 +233,7 @@ CREATE AGGREGATE If the state transition function is not strict, then it will be called unconditionally at each input value, and must deal with NULL inputs and NULL transition values for itself. This allows the aggregate - author to have full control over the aggregate's handling of NULLs. + author to have full control over the aggregate's handling of null values. diff --git a/doc/src/sgml/ref/create_conversion.sgml b/doc/src/sgml/ref/create_conversion.sgml index ce45c2d4d43..20abdf3bd48 100644 --- a/doc/src/sgml/ref/create_conversion.sgml +++ b/doc/src/sgml/ref/create_conversion.sgml @@ -1,4 +1,4 @@ - + @@ -139,7 +139,7 @@ CREATE [DEFAULT] CONVERSION conversion_name Examples - To create a conversion from encoding UNICODE to LATIN1 using myfunc: + To create a conversion from encoding UNICODE to LATIN1 using myfunc: CREATE CONVERSION myconv FOR 'UNICODE' TO 'LATIN1' FROM myfunc; diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index b7879c3a0bb..990207b3fc9 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -1,5 +1,5 @@ @@ -114,7 +114,7 @@ CREATE DATABASE name ERROR: user 'username' is not allowed to create/drop databases - You must have the special CREATEDB privilege to create databases. + You must have the special CREATEDB privilege to create databases. See . @@ -186,7 +186,7 @@ CREATE DATABASE name Normally, the creator becomes the owner of the new database. Superusers can create databases owned by other users using the diff --git a/doc/src/sgml/ref/create_function.sgml b/doc/src/sgml/ref/create_function.sgml index 105a4a16103..6c737f26141 100644 --- a/doc/src/sgml/ref/create_function.sgml +++ b/doc/src/sgml/ref/create_function.sgml @@ -1,5 +1,5 @@ @@ -77,7 +77,7 @@ CREATE [ OR REPLACE ] FUNCTION name Depending on the implementation language it may also be allowed to specify pseudo-types such as cstring. Pseudo-types indicate that the actual argument type is either - incompletely specified, or outside the set of ordinary SQL datatypes. + incompletely specified, or outside the set of ordinary SQL data types. @@ -171,7 +171,7 @@ CREATE [ OR REPLACE ] FUNCTION name CALLED ON NULL INPUT (the default) indicates that the function will be called normally when some of its arguments are null. It is then the function author's - responsibility to check for NULLs if necessary and respond + responsibility to check for null values if necessary and respond appropriately. @@ -247,7 +247,7 @@ CREATE [ OR REPLACE ] FUNCTION name - isStrict + isStrict Equivalent to STRICT or RETURNS NULL ON NULL INPUT @@ -256,7 +256,7 @@ CREATE [ OR REPLACE ] FUNCTION name - isCachable + isCachable isCachable is an obsolete equivalent of diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 2144eaa2dd3..bfca7dfee82 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -1,5 +1,5 @@ @@ -266,7 +266,7 @@ ERROR: Cannot create index: 'index_name' already exists. The expression used in the WHERE clause may refer only to columns of the underlying table (but it can use all columns, - not only the one(s) being indexed). Presently, sub-SELECTs and + not only the one(s) being indexed). Presently, subqueries and aggregate expressions are also forbidden in WHERE. @@ -331,7 +331,7 @@ ERROR: Cannot create index: 'index_name' already exists. Testing has shown PostgreSQL's hash indexes to be similar or slower - than btree indexes, and the index size and build time for hash + than B-tree indexes, and the index size and build time for hash indexes is much worse. Hash indexes also suffer poor performance under high concurrency. For these reasons, hash index use is discouraged. diff --git a/doc/src/sgml/ref/create_opclass.sgml b/doc/src/sgml/ref/create_opclass.sgml index 6c5c0fdb194..74ac1a8d202 100644 --- a/doc/src/sgml/ref/create_opclass.sgml +++ b/doc/src/sgml/ref/create_opclass.sgml @@ -1,5 +1,5 @@ @@ -52,8 +52,8 @@ CREATE OPERATOR CLASS name [ DEFAUL If present, the operator class will become the default index - operator class for its datatype. At most one operator class - can be the default for a specific datatype and access method. + operator class for its data type. At most one operator class + can be the default for a specific data type and access method. @@ -61,7 +61,7 @@ CREATE OPERATOR CLASS name [ DEFAUL data_type - The column datatype that this operator class is for. + The column data type that this operator class is for. @@ -95,10 +95,10 @@ CREATE OPERATOR CLASS name [ DEFAUL type - The input datatype(s) of an operator, or NONE to - signify a left-unary or right-unary operator. The input datatypes + The input data type(s) of an operator, or NONE to + signify a left-unary or right-unary operator. The input data types may be omitted in the normal case where they are the same as the - operator class's datatype. + operator class's data type. @@ -135,7 +135,7 @@ CREATE OPERATOR CLASS name [ DEFAUL parameter_types - The parameter datatype(s) of the function. + The parameter data type(s) of the function. @@ -143,8 +143,8 @@ CREATE OPERATOR CLASS name [ DEFAUL storage_type - The datatype actually stored in the index. Normally this is the - same as the column datatype, but some index access methods (only + The data type actually stored in the index. Normally this is the + same as the column data type, but some index access methods (only GIST at this writing) allow it to be different. The STORAGE clause must be omitted unless the index access method allows a different type to be used. @@ -191,9 +191,9 @@ CREATE OPERATOR CLASS name. - An operator class defines how a particular datatype can be used with + An operator class defines how a particular data type can be used with an index. The operator class specifies that certain operators will fill - particular roles or strategies for this datatype and this + particular roles or strategies for this data type and this access method. The operator class also specifies the support procedures to be used by the index access method when the operator class is selected for an @@ -210,7 +210,7 @@ CREATE OPERATOR CLASS The user who defines an operator class becomes its owner. The user - must own the datatype for which the operator class is being defined, + must own the data type for which the operator class is being defined, and must have execute permission for all referenced operators and functions. @@ -248,7 +248,7 @@ CREATE OPERATOR CLASS The following example command defines a GiST index operator class - for datatype _int4 (array of int4). See + for data type _int4 (array of int4). See contrib/intarray/ for the complete example. diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml index f23644c2950..da547718281 100644 --- a/doc/src/sgml/ref/create_operator.sgml +++ b/doc/src/sgml/ref/create_operator.sgml @@ -1,5 +1,5 @@ @@ -268,10 +268,10 @@ CREATE OPERATOR are always equivalent. - At least one of LEFTARG and RIGHTARG must be defined. For + At least one of LEFTARG and RIGHTARG must be defined. For binary operators, both should be defined. For right unary - operators, only LEFTARG should be defined, while for left - unary operators only RIGHTARG should be defined. + operators, only LEFTARG should be defined, while for left + unary operators only RIGHTARG should be defined. The @@ -335,39 +335,43 @@ MYBOXES.description !== box '((0,0), (1,1))' it also works to just have both operators refer to each other.) - The HASHES, MERGES, SORT1, SORT2, LTCMP, and GTCMP options are present to - support the query optimizer in performing joins. - PostgreSQL can always evaluate a join (i.e., - processing a clause with two tuple variables separated by an operator that - returns a boolean) by iterative substitution [WONG76]. In - addition, PostgreSQL can use a hash-join - algorithm along the lines of [SHAP86]; however, it must know whether this - strategy is applicable. The current hash-join algorithm is only correct - for operators that represent equality tests; furthermore, equality of the - data type must mean bitwise equality of the representation of the type. - (For example, a data type that contains unused bits that don't matter for - equality tests could not be hash-joined.) The HASHES flag indicates to the - query optimizer that a hash join may safely be used with this - operator. + The HASHES, MERGES, SORT1, + SORT2, LTCMP, and GTCMP options + are present to support the query optimizer in performing joins. + PostgreSQL can always evaluate a join + (i.e., processing a clause with two tuple variables separated by an + operator that returns a boolean) by iterative + substitution . In addition, + PostgreSQL can use a hash-join algorithm + ; however, it must know whether this + strategy is applicable. The current hash-join algorithm is only + correct for operators that represent equality tests; furthermore, + equality of the data type must mean bitwise equality of the + representation of the type. (For example, a data type that + contains unused bits that don't matter for equality tests could not + be hash-joined.) The HASHES flag indicates to the query optimizer + that a hash join may safely be used with this operator. - Similarly, the MERGES flag indicates whether merge-sort is a usable join - strategy for this operator. A merge join requires that the two input - datatypes have consistent orderings, and that the mergejoin operator - behave like equality with respect to that ordering. For example, it is - possible to merge-join equality between an integer and a float variable by - sorting both inputs in ordinary - numeric order. Execution of a merge join requires that the system be - able to identify four operators related to the mergejoin equality operator: - less-than comparison for the left input datatype, - less-than comparison for the right input datatype, - less-than comparison between the two datatypes, and - greater-than comparison between the two datatypes. It is possible to - specify these by name, as the SORT1, SORT2, LTCMP, and GTCMP options - respectively. The system will fill in the default names <, - <, <, > respectively if - any of these are omitted when MERGES is specified. Also, MERGES will - be assumed to be implied if any of these four operator options appear. + Similarly, the MERGES flag indicates whether merge-sort + is a usable join strategy for this operator. A merge join requires + that the two input data types have consistent orderings, and that + the merge-join operator behave like equality with respect to that + ordering. For example, it is possible to merge-join equality + between an integer and a float variable by sorting both inputs in + ordinary numeric order. Execution of a merge join requires that + the system be able to identify four operators related to the + merge-join equality operator: less-than comparison for the left + input data type, less-than comparison for the right input data + type, less-than comparison between the two data types, and + greater-than comparison between the two data types. It is possible + to specify these by name, as the SORT1, + SORT2, LTCMP, and GTCMP options + respectively. The system will fill in the default names + <, <, <, + > respectively if any of these are omitted when + MERGES is specified. Also, MERGES will be + assumed to be implied if any of these four operator options appear. If other join strategies are found to be practical, @@ -380,14 +384,15 @@ MYBOXES.description !== box '((0,0), (1,1))' be worth the complexity involved. - The RESTRICT and JOIN options assist the query optimizer in estimating - result sizes. If a clause of the form: - -MYBOXES.description <<< box '((0,0), (1,1))' - + The RESTRICT and JOIN options assist the + query optimizer in estimating result sizes. If a clause of the + form: + +myboxes.description <<< box '((0,0), (1,1))' + is present in the qualification, then PostgreSQL may have to - estimate the fraction of the instances in MYBOXES that + estimate the fraction of the instances in myboxes that satisfy the clause. The function res_proc must be a registered function (meaning it is already defined using @@ -400,7 +405,7 @@ MYBOXES.description <<< box '((0,0), (1,1))' Similarly, when the operands of the operator both contain instance variables, the query optimizer must estimate the - size of the resulting join. The function join_proc will + size of the resulting join. The function join_proc will return another floating-point number which will be multiplied by the cardinalities of the two tables involved to compute the expected result size. diff --git a/doc/src/sgml/ref/create_schema.sgml b/doc/src/sgml/ref/create_schema.sgml index daecc3b0d40..067d6c8d2cf 100644 --- a/doc/src/sgml/ref/create_schema.sgml +++ b/doc/src/sgml/ref/create_schema.sgml @@ -1,5 +1,5 @@ @@ -33,7 +33,7 @@ CREATE SCHEMA AUTHORIZATION usernameschemaname - The name of a schema to be created. If this is omitted, the username + The name of a schema to be created. If this is omitted, the user name is used as the schema name. @@ -111,7 +111,7 @@ ERROR: namespace "schemaname" alre A schema is essentially a namespace: - it contains named objects (tables, datatypes, functions, and operators) + it contains named objects (tables, data types, functions, and operators) whose names may duplicate those of other objects existing in other schemas. Named objects are accessed either by qualifying their names with the schema name as a prefix, or by setting a search diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml index b2d454f129b..26c134303fa 100644 --- a/doc/src/sgml/ref/create_type.sgml +++ b/doc/src/sgml/ref/create_type.sgml @@ -1,5 +1,5 @@ @@ -208,7 +208,7 @@ CREATE TYPE specified schema. Otherwise it is created in the current schema (the one at the front of the search path; see CURRENT_SCHEMA()). The type name must be distinct from the name of any existing type or - domain in the same schema. (Because tables have associated datatypes, + domain in the same schema. (Because tables have associated data types, type names also must not conflict with table names in the same schema.) @@ -231,10 +231,10 @@ CREATE TYPE cstring, OID, int4. (The first argument is the input text as a C string, the second argument is the element type in case this is an array type, - and the third is the typmod of the destination column, if known.) - It should return a value of the datatype itself. + and the third is the typmod of the destination column, if known.) + It should return a value of the data type itself. The output function may be - declared as taking one argument of the new datatype, or as taking + declared as taking one argument of the new data type, or as taking two arguments of which the second is type OID. (The second argument is again the array element type for array types.) The output function should return type cstring. @@ -245,9 +245,9 @@ CREATE TYPE can be declared to have results or inputs of the new type, when they have to be created before the new type can be created. The answer is that the input function must be created first, then the output function, then the - datatype. + data type. PostgreSQL will first see the name of the new - datatype as the return type of the input function. It will create a + data type as the return type of the input function. It will create a shell type, which is simply a placeholder entry in pg_type, and link the input function definition to the shell type. Similarly the output function will be linked to the (now already @@ -273,7 +273,7 @@ CREATE TYPE positive integer, or variable length, indicated by setting internallength to . (Internally, this is represented - by setting typlen to -1.) The internal representation of all + by setting typlen to -1.) The internal representation of all variable-length types must start with an integer giving the total length of this value of the type. @@ -346,7 +346,7 @@ CREATE TYPE The second form of CREATE TYPE creates a composite type. - The composite type is specified by a list of column names and datatypes. + The composite type is specified by a list of column names and data types. This is essentially the same as the row type of a table, but using CREATE TYPE avoids the need to create an actual table when all that is wanted is to define a type. diff --git a/doc/src/sgml/ref/create_view.sgml b/doc/src/sgml/ref/create_view.sgml index 1692f9f0f77..9977cb9ae0b 100644 --- a/doc/src/sgml/ref/create_view.sgml +++ b/doc/src/sgml/ref/create_view.sgml @@ -1,5 +1,5 @@ @@ -235,7 +235,7 @@ CREATE VIEW view [ This option is to do with updatable views. - All INSERTs and UPDATEs on the view will be + All INSERT and UPDATE commands on the view will be checked to ensure data satisfy the view-defining condition. If they do not, the update will be rejected. diff --git a/doc/src/sgml/ref/createdb.sgml b/doc/src/sgml/ref/createdb.sgml index 9e0de5572c3..99d16855ba2 100644 --- a/doc/src/sgml/ref/createdb.sgml +++ b/doc/src/sgml/ref/createdb.sgml @@ -1,5 +1,5 @@ @@ -61,7 +61,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -72,7 +73,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or the local Unix domain socket file @@ -82,7 +84,8 @@ PostgreSQL documentation - -U, --username username + + User name to connect as @@ -91,7 +94,8 @@ PostgreSQL documentation - -W, --password + + Force password prompt. @@ -100,7 +104,8 @@ PostgreSQL documentation - -e, --echo + + Echo the queries that createdb generates @@ -110,7 +115,8 @@ PostgreSQL documentation - -q, --quiet + + Do not display a response. @@ -119,7 +125,8 @@ PostgreSQL documentation - -O, --owner owner + + Specifies the database user who will own the new database. @@ -128,7 +135,8 @@ PostgreSQL documentation - -D, --location datadir + + Specifies the alternative location for the database. See also - -T, --template template + + Specifies the template database from which to build this database. @@ -147,7 +156,8 @@ PostgreSQL documentation - -E, --encoding encoding + + Specifies the character encoding scheme to be used in this database. diff --git a/doc/src/sgml/ref/createlang.sgml b/doc/src/sgml/ref/createlang.sgml index b454374deb2..febcaf72f3c 100644 --- a/doc/src/sgml/ref/createlang.sgml +++ b/doc/src/sgml/ref/createlang.sgml @@ -1,5 +1,5 @@ @@ -69,7 +69,8 @@ PostgreSQL documentation - -d, --dbname dbname + + Specifies to which database the language should be added. @@ -80,7 +81,8 @@ PostgreSQL documentation - -e, --echo + + Displays SQL commands as they are executed. @@ -89,7 +91,8 @@ PostgreSQL documentation - -l, --list + + Shows a list of already installed languages in the target database @@ -99,7 +102,7 @@ PostgreSQL documentation - --L directory + Specifies the directory in which the language interpreter is @@ -118,7 +121,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -130,7 +134,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -141,7 +146,8 @@ PostgreSQL documentation - -U, --username username + + User name to connect as @@ -150,7 +156,8 @@ PostgreSQL documentation - -W, --password + + Force password prompt. diff --git a/doc/src/sgml/ref/createuser.sgml b/doc/src/sgml/ref/createuser.sgml index 1576062aff0..6197465d752 100644 --- a/doc/src/sgml/ref/createuser.sgml +++ b/doc/src/sgml/ref/createuser.sgml @@ -1,5 +1,5 @@ @@ -39,7 +39,7 @@ PostgreSQL documentation Being a superuser also implies the ability to bypass access permission - checks within the database, so superuser-dom should not be granted lightly. + checks within the database, so superuserdom should not be granted lightly. @@ -64,7 +64,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -76,7 +77,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -87,7 +89,8 @@ PostgreSQL documentation - -e, --echo + + Echo the queries that createuser generates @@ -97,7 +100,8 @@ PostgreSQL documentation - -q, --quiet + + Do not display a response. @@ -106,7 +110,8 @@ PostgreSQL documentation - -d, --createdb + + The new user is allowed to create databases. @@ -115,7 +120,8 @@ PostgreSQL documentation - -D, --no-createdb + + The new user is not allowed to create databases. @@ -124,7 +130,8 @@ PostgreSQL documentation - -a, --adduser + + The new user is allowed to create other users. @@ -135,7 +142,8 @@ PostgreSQL documentation - -A, --no-adduser + + The new user is not allowed to create other users (i.e., @@ -145,7 +153,8 @@ PostgreSQL documentation - -P, --pwprompt + + If given, createuser will issue a prompt for @@ -156,7 +165,8 @@ PostgreSQL documentation - -i, --sysid uid + + Allows you to pick a non-default user id for the new user. This is not @@ -166,7 +176,8 @@ PostgreSQL documentation - -E, --encrypted + + Encrypts the user's password stored in the database. If not @@ -176,7 +187,8 @@ PostgreSQL documentation - -N, --unencrypted + + Does not encrypt the user's password stored in the database. If diff --git a/doc/src/sgml/ref/drop_conversion.sgml b/doc/src/sgml/ref/drop_conversion.sgml index 96f0f74edfc..535f1aa159e 100644 --- a/doc/src/sgml/ref/drop_conversion.sgml +++ b/doc/src/sgml/ref/drop_conversion.sgml @@ -1,4 +1,4 @@ - + @@ -76,7 +76,7 @@ DROP CONVERSION conversion_name Examples - To drop the conversion named myname: + To drop the conversion named myname: DROP CONVERSION myname; diff --git a/doc/src/sgml/ref/drop_opclass.sgml b/doc/src/sgml/ref/drop_opclass.sgml index 631a45aaeba..3f54baaccdc 100644 --- a/doc/src/sgml/ref/drop_opclass.sgml +++ b/doc/src/sgml/ref/drop_opclass.sgml @@ -1,5 +1,5 @@ @@ -134,7 +134,7 @@ DROP OPERATOR CLASS Usage - Remove btree operator class widget_ops: + Remove B-tree operator class widget_ops: DROP OPERATOR CLASS widget_ops USING btree; diff --git a/doc/src/sgml/ref/dropdb.sgml b/doc/src/sgml/ref/dropdb.sgml index aaecbf821ce..574c0b98834 100644 --- a/doc/src/sgml/ref/dropdb.sgml +++ b/doc/src/sgml/ref/dropdb.sgml @@ -1,5 +1,5 @@ @@ -54,7 +54,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -66,7 +67,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -77,7 +79,8 @@ PostgreSQL documentation - -U, --username username + + User name to connect as @@ -86,7 +89,8 @@ PostgreSQL documentation - -W, --password + + Force password prompt. @@ -95,7 +99,8 @@ PostgreSQL documentation - -e, --echo + + Echo the queries that dropdb generates @@ -105,7 +110,8 @@ PostgreSQL documentation - -q, --quiet + + Do not display a response. @@ -114,7 +120,8 @@ PostgreSQL documentation - -i, --interactive + + Issues a verification prompt before doing anything destructive. diff --git a/doc/src/sgml/ref/droplang.sgml b/doc/src/sgml/ref/droplang.sgml index d6f14cfbcae..7b4b7f3a477 100644 --- a/doc/src/sgml/ref/droplang.sgml +++ b/doc/src/sgml/ref/droplang.sgml @@ -1,5 +1,5 @@ @@ -69,7 +69,8 @@ PostgreSQL documentation - [-d, --dbname] dbname + + Specifies from which database the language should be removed. @@ -80,7 +81,8 @@ PostgreSQL documentation - -e, --echo + + Displays SQL commands as they are executed. @@ -89,7 +91,8 @@ PostgreSQL documentation - -l, --list + + Shows a list of already installed languages in the target database @@ -107,7 +110,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -119,7 +123,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -130,7 +135,8 @@ PostgreSQL documentation - -U, --username username + + User name to connect as @@ -139,7 +145,8 @@ PostgreSQL documentation - -W, --password + + Force password prompt. diff --git a/doc/src/sgml/ref/dropuser.sgml b/doc/src/sgml/ref/dropuser.sgml index 9fdd3c72d3b..b533c3a22aa 100644 --- a/doc/src/sgml/ref/dropuser.sgml +++ b/doc/src/sgml/ref/dropuser.sgml @@ -1,5 +1,5 @@ @@ -56,7 +56,8 @@ PostgreSQL documentation - -h, --host host + + Specifies the host name of the machine on which the @@ -68,7 +69,8 @@ PostgreSQL documentation - -p, --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -79,7 +81,8 @@ PostgreSQL documentation - -e, --echo + + Echo the queries that dropuser generates @@ -89,7 +92,8 @@ PostgreSQL documentation - -q, --quiet + + Do not display a response. @@ -98,7 +102,8 @@ PostgreSQL documentation - -i, --interactive + + Prompt for confirmation before actually removing the user. diff --git a/doc/src/sgml/ref/initdb.sgml b/doc/src/sgml/ref/initdb.sgml index 657fee3d381..f42295dc6f1 100644 --- a/doc/src/sgml/ref/initdb.sgml +++ b/doc/src/sgml/ref/initdb.sgml @@ -1,5 +1,5 @@ @@ -114,8 +114,8 @@ PostgreSQL documentation - --pgdata=directory - -D directory + + This option specifies the directory where the database system @@ -130,23 +130,23 @@ PostgreSQL documentation - --username=username - -U username + + Selects the user name of the database superuser. This defaults to the name of the effective user running initdb. It is really not important what the superuser's name is, but one might choose to keep the - customary name postgres, even if the operating + customary name postgres, even if the operating system user's name is different. - --pwprompt - -W + + Makes initdb prompt for a password @@ -159,8 +159,8 @@ PostgreSQL documentation - --encoding=encoding - -E encoding + + Selects the encoding of the template database. This will also @@ -173,7 +173,7 @@ PostgreSQL documentation - --locale=locale + Sets the default locale for the database cluster. If this @@ -184,12 +184,12 @@ PostgreSQL documentation - --lc-collate=locale - --lc-ctype=locale - --lc-messages=locale - --lc-monetary=locale - --lc-numeric=locale - --lc-time=locale + + + + + + @@ -206,7 +206,7 @@ PostgreSQL documentation - -L directory + Specifies where initdb should find @@ -218,8 +218,8 @@ PostgreSQL documentation - --noclean - -n + + By default, when initdb @@ -232,8 +232,8 @@ PostgreSQL documentation - --debug - -d + + Print debugging output from the bootstrap backend and a few other diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml index d3bb16c191a..054570c9ce7 100644 --- a/doc/src/sgml/ref/insert.sgml +++ b/doc/src/sgml/ref/insert.sgml @@ -1,5 +1,5 @@ @@ -55,8 +55,8 @@ INSERT INTO table [ ( DEFAULT VALUES - All columns will be filled by NULLs or by values specified - when the table was created using DEFAULT clauses. + All columns will be filled by null values or by values specified + when the table was created using DEFAULT clauses. diff --git a/doc/src/sgml/ref/listen.sgml b/doc/src/sgml/ref/listen.sgml index ac610585ca6..8bcb7309912 100644 --- a/doc/src/sgml/ref/listen.sgml +++ b/doc/src/sgml/ref/listen.sgml @@ -1,5 +1,5 @@ @@ -115,12 +115,12 @@ WARNING: Async_Listen: We are already listening on lockmode is one of: Conflicts with SHARE UPDATE EXCLUSIVE, SHARE, SHARE ROW EXCLUSIVE, EXCLUSIVE and ACCESS EXCLUSIVE modes. This mode protects a table against - concurrent schema changes and VACUUMs. + concurrent schema changes and VACUUM runs. @@ -289,8 +289,8 @@ ERROR name: Table does not exist. To achieve a similar effect when running a transaction at the SERIALIZABLE isolation level, you have to execute the LOCK TABLE - statement before executing any DML statement. A serializable - transaction's view of data will be frozen when its first DML statement + statement before executing any DML statement. A serializable + transaction's view of data will be frozen when its first DML statement begins. A later LOCK will still prevent concurrent writes --- but it won't ensure that what the transaction reads corresponds to the latest diff --git a/doc/src/sgml/ref/notify.sgml b/doc/src/sgml/ref/notify.sgml index a7a7df058de..e24420e2297 100644 --- a/doc/src/sgml/ref/notify.sgml +++ b/doc/src/sgml/ref/notify.sgml @@ -1,5 +1,5 @@ @@ -95,7 +95,7 @@ NOTIFY The information passed to the frontend for a notify event includes the notify - condition name and the notifying backend process's PID. It is up to the + condition name and the notifying backend process's PID. It is up to the database designer to define the condition names that will be used in a given database and what each one means. @@ -109,7 +109,7 @@ NOTIFY NOTIFY provides a simple form of signal or - IPC (interprocess communication) mechanism for a collection of processes + IPC (interprocess communication) mechanism for a collection of processes accessing the same PostgreSQL database. Higher-level mechanisms can be built by using tables in the database to pass additional data (beyond a mere condition name) from notifier to @@ -158,8 +158,8 @@ NOTIFY re-reading a database table to find the same updates that that frontend just wrote out. In PostgreSQL 6.4 and later, it is possible to avoid such extra work by noticing whether the notifying backend - process's PID (supplied in the notify event message) is the same as one's own - backend's PID (available from libpq). When they are the same, the notify + process's PID (supplied in the notify event message) is the same as one's own + backend's PID (available from libpq). When they are the same, the notify event is one's own work bouncing back, and can be ignored. (Despite what was said in the preceding paragraph, this is a safe technique. PostgreSQL keeps self-notifies separate from notifies @@ -191,7 +191,7 @@ NOTIFY In PostgreSQL releases prior to 6.4, the backend - PID delivered in a notify message was always the PID of the frontend's own + PID delivered in a notify message was always the PID of the frontend's own backend. So it was not possible to distinguish one's own notifies from other clients' notifies in those earlier releases. diff --git a/doc/src/sgml/ref/pg_ctl-ref.sgml b/doc/src/sgml/ref/pg_ctl-ref.sgml index 397098302ed..6659ac914c5 100644 --- a/doc/src/sgml/ref/pg_ctl-ref.sgml +++ b/doc/src/sgml/ref/pg_ctl-ref.sgml @@ -1,5 +1,5 @@ @@ -148,7 +148,7 @@ PostgreSQL documentation Append the server log output to filename. If the file does not - exist, it is created. The umask is set to 077, so access to + exist, it is created. The umask is set to 077, so access to the log file from other users is disallowed by default. diff --git a/doc/src/sgml/ref/postmaster.sgml b/doc/src/sgml/ref/postmaster.sgml index f052e5af6ee..51910ee1a1e 100644 --- a/doc/src/sgml/ref/postmaster.sgml +++ b/doc/src/sgml/ref/postmaster.sgml @@ -1,5 +1,5 @@ @@ -146,7 +146,7 @@ PostgreSQL documentation Disables fsync calls for performance improvement, at the risk of data corruption in event of a system crash. This parameter corresponds to setting - fsync=false in postgresql.conf. Read the detailed + fsync=false in postgresql.conf. Read the detailed documentation before using this! @@ -176,7 +176,7 @@ PostgreSQL documentation Allows clients to connect via TCP/IP (Internet domain) connections. Without this option, only local Unix domain socket connections are accepted. This option corresponds - to setting tcpip_socket=true in postgresql.conf. + to setting tcpip_socket=true in postgresql.conf. has the opposite @@ -350,7 +350,7 @@ PostgreSQL documentation Default character encoding used by clients. (The clients may - override this invidiually.) This value can also be set in the + override this individually.) This value can also be set in the configuration file. @@ -382,7 +382,7 @@ PostgreSQL documentation - Default port (preferrably set in the configuration file) + Default port (preferably set in the configuration file) @@ -520,10 +520,11 @@ StreamServerPort: cannot bind to port - The @@ -547,7 +548,7 @@ StreamServerPort: cannot bind to port This command will start up postmaster communicating through the port 1234. In order to connect to this - postmaster using psql, you would need to + postmaster using psql, you would need to run it as $ psql -p 1234 diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index c4a605683b9..e10cdd9aa16 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1,5 +1,5 @@ @@ -51,19 +51,21 @@ PostgreSQL documentation - -a, --echo-all + + Print all the lines to the screen as they are read. This is more useful for script processing rather than interactive mode. This is - equivalent to setting the variable ECHO to + equivalent to setting the variable ECHO to all. - -A, --no-align + + Switches to unaligned output mode. (The default output mode is @@ -73,7 +75,8 @@ PostgreSQL documentation - -c, --command query + + Specifies that psql is to execute one @@ -82,7 +85,7 @@ PostgreSQL documentation query must be either - a query string that is completely parseable by the backend (i.e., + a query string that is completely parsable by the backend (i.e., it contains no psql specific features), or it is a single backslash command. Thus you cannot mix SQL and psql @@ -94,7 +97,8 @@ PostgreSQL documentation - -d, --dbname dbname + + Specifies the name of the database to connect to. This is @@ -106,31 +110,34 @@ PostgreSQL documentation - -e, --echo-queries + + Show all queries that are sent to the backend. This is equivalent - to setting the variable ECHO to + to setting the variable ECHO to queries. - -E, --echo-hidden + + Echoes the actual queries generated by \d and other backslash commands. You can use this if you wish to include similar functionality into your own programs. This is equivalent to - setting the variable ECHO_HIDDEN from within + setting the variable ECHO_HIDDEN from within psql. - -f, --file filename + + Use the file filename @@ -161,7 +168,8 @@ PostgreSQL documentation - -F, --field-separator separator + + Use separator as the @@ -172,19 +180,21 @@ PostgreSQL documentation - -h, --host hostname + + Specifies the host name of the machine on which the postmaster is running. If host begins - with a slash, it is used as the directory for the unix domain + with a slash, it is used as the directory for the Unix-domain socket. - -H, --html + + Turns on HTML tabular output. This is @@ -195,7 +205,8 @@ PostgreSQL documentation - -l, --list + + Lists all available databases, then exits. Other non-connection @@ -206,7 +217,8 @@ PostgreSQL documentation - -o, --output filename + + Put all query output into file - -p, --port port + + Specifies the TCP/IP port or, by omission, the local Unix domain @@ -231,7 +244,8 @@ PostgreSQL documentation - -P, --pset assignment + + Allows you to specify printing options in the style of @@ -244,7 +258,8 @@ PostgreSQL documentation - -q + + Specifies that psql should do its work @@ -252,13 +267,14 @@ PostgreSQL documentation informational output. If this option is used, none of this happens. This is useful with the option. Within psql you can also set the - QUIET variable to achieve the same effect. + QUIET variable to achieve the same effect. - -R, --record-separator separator + + Use separator as the @@ -269,7 +285,8 @@ PostgreSQL documentation - -s, --single-step + + Run in single-step mode. That means the user is prompted before @@ -280,7 +297,8 @@ PostgreSQL documentation - -S, --single-line + + Runs in single-line mode where a newline terminates a query, as a @@ -299,7 +317,8 @@ PostgreSQL documentation - -t, --tuples-only + + Turn off printing of column names and result row count footers, @@ -310,7 +329,8 @@ PostgreSQL documentation - -T, --table-attr table_options + + Allows you to specify options to be placed within the @@ -321,7 +341,7 @@ PostgreSQL documentation - -u + Makes psql prompt for the user name and @@ -339,7 +359,8 @@ PostgreSQL documentation - -U, --username username + + Connects to the database as the user - -v, --variable, --set assignment + + + Performs a variable assignment, like the \set @@ -365,7 +388,8 @@ PostgreSQL documentation - -V, --version + + Shows the psql version. @@ -374,7 +398,8 @@ PostgreSQL documentation - -W, --password + + Requests that psql should prompt for a @@ -396,7 +421,8 @@ PostgreSQL documentation - -x, --expanded + + Turns on extended row format mode. This is equivalent to the @@ -406,7 +432,8 @@ PostgreSQL documentation - -X, --no-psqlrc + + Do not read the start-up file ~/.psqlrc. @@ -415,7 +442,8 @@ PostgreSQL documentation - -?, --help + + Shows help about psql command line @@ -435,7 +463,7 @@ PostgreSQL documentation finished normally, 1 if a fatal error of its own (out of memory, file not found) occurs, 2 if the connection to the backend went bad and the session is not interactive, and 3 if an error occurred in a - script and the variable ON_ERROR_STOP was set. + script and the variable ON_ERROR_STOP was set. @@ -458,7 +486,7 @@ PostgreSQL documentation not belong to any option it will be interpreted as the database name (or the user name, if the database name is also given). Not all these options are required, defaults do apply. If you omit the host - name psql will connect via a Unix domain socket to a server on the + name, psql will connect via a Unix domain socket to a server on the local host. The default port number is compile-time determined. Since the database server uses the same default, you will not have to specify the port in most cases. The default user name is your @@ -556,11 +584,11 @@ testdb=> - Arguments that are quoted in backticks - (`) are taken as a command line that is passed to - the shell. The output of the command (with any trailing newline - removed) is taken as the argument value. The above escape sequences - also apply in backticks. + Arguments that are enclosed in backquotes (`) + are taken as a command line that is passed to the shell. The + output of the command (with any trailing newline removed) is taken + as the argument value. The above escape sequences also apply in + backquotes. @@ -665,13 +693,13 @@ testdb=> - \copy table + \copy table { from | to } filename | stdin | stdout [ with ] [ oids ] [ delimiter [as] 'character' ] - [ null [as] 'string' ] + [ null [as] 'string' ] @@ -838,7 +866,7 @@ testdb=> - To reduce clutter, \df does not show datatype I/O + To reduce clutter, \df does not show data type I/O functions. This is implemented by ignoring functions that accept or return type cstring. @@ -1087,7 +1115,7 @@ Tue Oct 26 21:40:57 CEST 1999 If you want to see the lines on the screen as they are read you - must set the variable ECHO to + must set the variable ECHO to all. @@ -1132,7 +1160,7 @@ Tue Oct 26 21:40:57 CEST 1999 - See the description of the LO_TRANSACTION + See the description of the LO_TRANSACTION variable for important information concerning all large object operations. @@ -1169,7 +1197,7 @@ lo_import 152801 - See the description of the LO_TRANSACTION + See the description of the LO_TRANSACTION variable for important information concerning all large object operations. @@ -1206,7 +1234,7 @@ lo_import 152801 - See the description of the LO_TRANSACTION + See the description of the LO_TRANSACTION variable for important information concerning all large object operations. @@ -1426,7 +1454,7 @@ lo_import 152801 pager - Toggles the use of a pager for query and psql help output. If the + Toggles the use of a pager for query and psql help output. If the environment variable PAGER is set, the output is piped to the specified program. Otherwise a platform-dependent default (such as more) is used. @@ -1775,7 +1803,7 @@ bar such variables. A list of all specially treated variables follows. - DBNAME + DBNAME The name of the database you are currently connected to. This is @@ -1786,7 +1814,7 @@ bar - ECHO + ECHO If set to all, all lines @@ -1802,14 +1830,14 @@ bar - ECHO_HIDDEN + ECHO_HIDDEN When this variable is set and a backslash command queries the database, the query is first shown. This way you can study the PostgreSQL internals and provide similar functionality in your own programs. If you set the - variable to the value noexec, the queries are + variable to the value noexec, the queries are just shown but are not actually sent to the backend and executed. @@ -1817,7 +1845,7 @@ bar - ENCODING + ENCODING The current client multibyte encoding. If you are not set up to @@ -1828,7 +1856,7 @@ bar - HISTCONTROL + HISTCONTROL If this variable is set to ignorespace, @@ -1849,7 +1877,7 @@ bar - HISTSIZE + HISTSIZE The number of commands to store in the command history. The @@ -1865,7 +1893,7 @@ bar - HOST + HOST The database server host you are currently connected to. This is @@ -1876,15 +1904,16 @@ bar - IGNOREEOF + IGNOREEOF - If unset, sending an EOF character (usually Control-D) to an - interactive session of psql will - terminate the application. If set to a numeric value, that many - EOF characters are ignored before the application terminates. - If the variable is set but has no numeric value, the default is - 10. + If unset, sending an EOF character (usually + ControlD) + to an interactive session of psql + will terminate the application. If set to a numeric value, + that many EOF characters are ignored before the + application terminates. If the variable is set but has no + numeric value, the default is 10. @@ -1896,10 +1925,10 @@ bar - LASTOID + LASTOID - The value of the last affected oid, as returned from an + The value of the last affected OID, as returned from an INSERT or lo_insert command. This variable is only guaranteed to be valid until after the result of the next SQL command has @@ -1909,7 +1938,7 @@ bar - LO_TRANSACTION + LO_TRANSACTION If you use the PostgreSQL large @@ -1943,7 +1972,7 @@ bar - ON_ERROR_STOP + ON_ERROR_STOP By default, if non-interactive scripts encounter an error, such @@ -1963,7 +1992,7 @@ bar - PORT + PORT The database server port to which you are currently connected. @@ -1974,7 +2003,9 @@ bar - PROMPT1, PROMPT2, PROMPT3 + PROMPT1 + PROMPT2 + PROMPT3 These specify what the prompt psql @@ -1986,7 +2017,7 @@ bar - QUIET + QUIET This variable is equivalent to the command line option @@ -1997,7 +2028,7 @@ bar - SINGLELINE + SINGLELINE This variable is set by the command line option @@ -2007,7 +2038,7 @@ bar - SINGLESTEP + SINGLESTEP This variable is equivalent to the command line option @@ -2017,7 +2048,7 @@ bar - USER + USER The database user you are currently connected as. This is set @@ -2105,8 +2136,8 @@ testdb=> \set content '\'' `sed -e "s/'/\\\\\\'/g" < my_file.txt` '\' The prompts psql issues can be customized - to your preference. The three variables PROMPT1, - PROMPT2, and PROMPT3 contain strings + to your preference. The three variables PROMPT1, + PROMPT2, and PROMPT3 contain strings and special escape sequences that describe the appearance of the prompt. Prompt 1 is the normal prompt that is issued when psql requests a new query. Prompt 2 is @@ -2251,11 +2282,11 @@ testdb=> \set content '\'' `sed -e "s/'/\\\\\\'/g" < my_file.txt` '\' - Readline + Command-Line Editing - psql supports the readline and history - libraries for convenient line editing and retrieval. The command + psql supports the Readline + library for convenient line editing and retrieval. The command history is stored in a file named .psql_history in your home directory and is reloaded when psql starts up. Tab-completion is also diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml index 70dbf636b61..6a927bbbd41 100644 --- a/doc/src/sgml/ref/reindex.sgml +++ b/doc/src/sgml/ref/reindex.sgml @@ -1,5 +1,5 @@ @@ -193,14 +193,15 @@ REINDEX as there is in psql. To continue a command across multiple lines, you must type backslash just before each newline except the last one. - Also, you won't have any of the conveniences of readline processing + Also, you won't have any of the conveniences of command-line editing (no command history, for example). - To quit the backend, type EOF (control-D, usually). + To quit the backend, type EOF (ControlD, usually). diff --git a/doc/src/sgml/ref/reset.sgml b/doc/src/sgml/ref/reset.sgml index 9f78d9df6c1..1af55703cd3 100644 --- a/doc/src/sgml/ref/reset.sgml +++ b/doc/src/sgml/ref/reset.sgml @@ -1,5 +1,5 @@ @@ -79,7 +79,7 @@ SET variable TO DEFAULT Examples - Set DateStyle to its default value: + Set DateStyle to its default value: RESET DateStyle; @@ -87,7 +87,7 @@ RESET DateStyle; - Set Geqo to its default value: + Set geqo to its default value: RESET GEQO; diff --git a/doc/src/sgml/ref/select.sgml b/doc/src/sgml/ref/select.sgml index 7192de53e0d..cad427da3c6 100644 --- a/doc/src/sgml/ref/select.sgml +++ b/doc/src/sgml/ref/select.sgml @@ -1,5 +1,5 @@ @@ -451,8 +451,8 @@ where from_item can be: (i.e., all combined rows that pass its ON condition), plus one copy of each row in the left-hand table for which there was no right-hand row that passed the ON condition. This left-hand row is extended to the full - width of the joined table by inserting NULLs for the right-hand columns. - Note that only the JOIN's own ON or USING condition is considered while + width of the joined table by inserting null values for the right-hand columns. + Note that only the JOIN's own ON or USING condition is considered while deciding which rows have matches. Outer ON or WHERE conditions are applied afterwards. @@ -647,11 +647,13 @@ SELECT name FROM distributors ORDER BY code; - Optionally one may add the keyword DESC (descending) - or ASC (ascending) after each column name in the ORDER BY clause. - If not specified, ASC is assumed by default. Alternatively, a - specific ordering operator name may be specified. ASC is equivalent - to USING < and DESC is equivalent to USING >. + Optionally one may add the key word DESC (descending) + or ASC (ascending) after each column name in the + ORDER BY clause. If not specified, ASC is + assumed by default. Alternatively, a specific ordering operator + name may be specified. ASC is equivalent to + USING < and DESC is equivalent to + USING >. @@ -689,7 +691,7 @@ SELECT name FROM distributors ORDER BY code; The UNION operator computes the collection (set union) of the rows returned by the queries involved. - The two SELECTs that represent the direct operands of the UNION must + The two SELECT statements that represent the direct operands of the UNION must produce the same number of columns, and corresponding columns must be of compatible data types. @@ -1064,7 +1066,7 @@ SELECT * FROM distributors_2(111) AS (f1 int, f2 text); PostgreSQL allows one to omit the FROM clause from a query. This feature -was retained from the original PostQuel query language. It has +was retained from the original PostQUEL query language. It has a straightforward use to compute the results of simple expressions: @@ -1075,7 +1077,7 @@ SELECT 2+2; 4 -Some other DBMSes cannot do this except by introducing a dummy one-row +Some other SQL databases cannot do this except by introducing a dummy one-row table to do the select from. A less obvious use is to abbreviate a normal select from one or more tables: diff --git a/doc/src/sgml/ref/set.sgml b/doc/src/sgml/ref/set.sgml index 02fe51f6951..bc3688a5652 100644 --- a/doc/src/sgml/ref/set.sgml +++ b/doc/src/sgml/ref/set.sgml @@ -1,5 +1,5 @@ @@ -113,7 +113,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - DATESTYLE + DATESTYLE Choose the date/time representation style. Two separate @@ -126,7 +126,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - ISO + ISO Use ISO 8601-style dates and times (YYYY-MM-DD @@ -136,7 +136,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - SQL + SQL Use Oracle/Ingres-style dates and times. Note that this @@ -147,7 +147,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - PostgreSQL + PostgreSQL Use traditional PostgreSQL format. @@ -156,7 +156,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - German + German Use dd.mm.yyyy for numeric date representations. @@ -173,7 +173,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - European + European Use dd/mm/yyyy for numeric date representations. @@ -182,8 +182,8 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - NonEuropean - US + NonEuropean + US Use mm/dd/yyyy for numeric date representations. @@ -209,7 +209,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - There are several now-deprecated means for setting the datestyle + There are several now-deprecated means for setting the date style in addition to the normal methods of setting it via SET or a configuration-file entry: @@ -225,9 +225,9 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone Setting the client's PGDATESTYLE environment variable. - If PGDATESTYLE is set in the frontend environment of a client - based on libpq, libpq will automatically set DATESTYLE to the - value of PGDATESTYLE during connection start-up. This is + If PGDATESTYLE is set in the frontend environment of a client + based on libpq, libpq will automatically set DATESTYLE to the + value of PGDATESTYLE during connection start-up. This is equivalent to a manually issued SET DATESTYLE. @@ -283,7 +283,7 @@ SELECT setseed(value); Shows the server-side multibyte encoding. (At present, this parameter can be shown but not set, because the encoding is - determined at initdb time.) + determined at initdb time.) @@ -373,7 +373,7 @@ SELECT setseed(value); If the PGTZ environment variable is set in the frontend - environment of a client based on libpq, libpq will automatically + environment of a client based on libpq, libpq will automatically SET TIMEZONE to the value of PGTZ during connection start-up. diff --git a/doc/src/sgml/ref/set_session_auth.sgml b/doc/src/sgml/ref/set_session_auth.sgml index dfb20357005..eb9d760f10e 100644 --- a/doc/src/sgml/ref/set_session_auth.sgml +++ b/doc/src/sgml/ref/set_session_auth.sgml @@ -1,4 +1,4 @@ - + 2001-04-21 @@ -48,7 +48,7 @@ RESET SESSION AUTHORIZATION The session user identifier may be changed only if the initial session user (the authenticated user) had the superuser privilege. Otherwise, the command is accepted only if it - specifies the authenticated username. + specifies the authenticated user name. diff --git a/doc/src/sgml/ref/unlisten.sgml b/doc/src/sgml/ref/unlisten.sgml index 1abe7c3b418..599b6bfcf21 100644 --- a/doc/src/sgml/ref/unlisten.sgml +++ b/doc/src/sgml/ref/unlisten.sgml @@ -1,5 +1,5 @@ @@ -90,7 +90,7 @@ UNLISTEN { notifyname | * } UNLISTEN is used to remove an existing NOTIFY registration. - UNLISTEN cancels any existing registration of the current + UNLISTEN cancels any existing registration of the current PostgreSQL session as a listener on the notify condition notifyname. The special condition wildcard * cancels all listener registrations @@ -117,7 +117,7 @@ UNLISTEN { notifyname | * } as a name up to 64 characters long. - The backend does not complain if you UNLISTEN something you were not + The backend does not complain if you unlisten something you were not listening for. Each backend will automatically execute UNLISTEN * when exiting. @@ -143,7 +143,7 @@ Asynchronous NOTIFY 'virtual' from backend with pid '8448' received - Once UNLISTEN has been executed, further NOTIFY commands will be + Once UNLISTEN has been executed, further NOTIFY commands will be ignored: diff --git a/doc/src/sgml/ref/vacuum.sgml b/doc/src/sgml/ref/vacuum.sgml index 310dd4004f7..f45aaf2f13d 100644 --- a/doc/src/sgml/ref/vacuum.sgml +++ b/doc/src/sgml/ref/vacuum.sgml @@ -1,5 +1,5 @@ @@ -162,7 +162,7 @@ INFO: Index index: Pages 28; VACUUM reclaims storage occupied by deleted tuples. In normal PostgreSQL operation, tuples that - are DELETEd or obsoleted by UPDATE are not physically removed from + are deleted or obsoleted by UPDATE are not physically removed from their table; they remain present until a VACUUM is done. Therefore it's necessary to do VACUUM periodically, especially on frequently-updated tables. diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index 40e6e074319..8fada3eac0f 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -1,5 +1,5 @@ @@ -79,8 +79,8 @@ PostgreSQL documentation - -d dbname - --dbname dbname + + Specifies the name of the database to be cleaned or analyzed. @@ -94,8 +94,8 @@ PostgreSQL documentation - -a - --all + + Vacuum all databases. @@ -104,8 +104,8 @@ PostgreSQL documentation - -f - --full + + Perform full vacuuming. @@ -114,8 +114,8 @@ PostgreSQL documentation - -v - --verbose + + Print detailed information during processing. @@ -124,8 +124,8 @@ PostgreSQL documentation - -z - --analyze + + Calculate statistics for use by the optimizer. @@ -134,8 +134,8 @@ PostgreSQL documentation - -t table [ (column [,...]) ] - --table table [ (column [,...]) ] + + Clean or analyze table only. @@ -160,8 +160,8 @@ PostgreSQL documentation - -h host - --host host + + Specifies the host name of the machine on which the @@ -173,8 +173,8 @@ PostgreSQL documentation - -p port - --port port + + Specifies the Internet TCP/IP port or local Unix domain socket file @@ -185,8 +185,8 @@ PostgreSQL documentation - -U username - --username username + + User name to connect as @@ -195,8 +195,8 @@ PostgreSQL documentation - -W - --password + + Force password prompt. @@ -205,8 +205,8 @@ PostgreSQL documentation - -e - --echo + + Echo the commands that vacuumdb generates @@ -216,8 +216,8 @@ PostgreSQL documentation - -q - --quiet + + Do not display a response. diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index 64c5abdf8ce..c095be5317e 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -1,4 +1,4 @@ - + Regression Tests @@ -87,8 +87,8 @@ The parallel regression test starts quite a few processes under your user ID. Presently, the maximum concurrency is twenty parallel test - scripts, which means sixty processes --- there's a backend, a psql, - and usually a shell parent process for the psql for each test script. + scripts, which means sixty processes --- there's a backend, a psql, + and usually a shell parent process for the psql for each test script. So if your system enforces a per-user limit on the number of processes, make sure this limit is at least seventy-five or so, else you may get random-seeming failures in the parallel test. If you are not in @@ -184,7 +184,7 @@ problem by providing alternative result files that together are known to handle a large number of locales. For example, for the char test, the expected file - char.out handles the C and POSIX locales, + char.out handles the C and POSIX locales, and the file char_1.out handles many other locales. The regression test driver will automatically pick the best file to match against when checking for success and for diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index f373cc6e25d..549e58e32e8 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -1,5 +1,5 @@ @@ -598,7 +598,7 @@ Reject invalid multibyte character sequences (Tatsuo) - PL/pgSQL + <application>PL/pgSQL</> Now uses portals for SELECT loops, allowing huge result sets (Jan) CURSOR and REFCURSOR support (Jan) @@ -638,7 +638,7 @@ Add spi_lastoid function (bob@redivi.com) - Psql + <application>Psql</> \d displays indexes in unique, primary groupings (Christopher Kings-Lynne) Allow trailing semicolons in backslash commands (Greg Sabino Mullane) @@ -649,7 +649,7 @@ Format the correct number of columns for Unicode (Patrice) - Libpq + <application>Libpq</> New function PQescapeString() to escape quotes in command strings (Florian Weimer) New function PQescapeBytea() escapes binary strings for use as SQL string literals @@ -711,7 +711,7 @@ Add more compatibility functions to odbc.sql (Peter E) - ECPG + <application>ECPG</> EXECUTE ... INTO implemented (Christof Petig) Multiple row descriptor support (e.g. CARDINALITY) (Christof Petig) @@ -981,7 +981,7 @@ outer join syntax. The previous C function manager did not -handle NULLs properly, nor did it support 64-bit CPU's (Alpha). The new +handle null values properly, nor did it support 64-bit CPU's (Alpha). The new function manager does. You can continue using your old custom functions, but you may want to rewrite them in the future to use the new function manager call interface. @@ -1438,9 +1438,11 @@ ecpg changes (Michael) - SQL92 join syntax is now supported, though only as INNER JOINs - for this release. JOIN, NATURAL JOIN, JOIN/USING, JOIN/ON are - available, as are column correlation names. + SQL92 join syntax is now supported, though only as + INNER JOIN for this release. JOIN, + NATURAL JOIN, JOIN/USING, + and JOIN/ON are available, as are + column correlation names. @@ -2530,7 +2532,7 @@ original PL/pgTCL procedural language he contributed -We have optional multiple-byte character set support from Tatsuo Iishi +We have optional multiple-byte character set support from Tatsuo Ishii to complement our existing locale support. @@ -3592,10 +3594,11 @@ optimizer which uses genetic - The "random" results in the random test should cause the "random" test - to be "failed", since the regression tests are evaluated using a simple - diff. However, "random" does not seem to produce random results on my - test machine (Linux/gcc/i686). + The random results in the random test should cause the + random test to be failed, since the + regression tests are evaluated using a simple diff. However, + random does not seem to produce random results on my test + machine (Linux/gcc/i686). @@ -3946,7 +3949,7 @@ than NULL. See the copy manual page for full details. -If you are loading an older binary copy or non-stdout copy, there is no +If you are loading an older binary copy or non-stdout copy, there is no end-of-data character, and hence no conversion necessary. diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index f26319c463a..493f2f05b82 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -1,4 +1,4 @@ - + The Rule System @@ -179,7 +179,7 @@ DELETE queries don't need a target list because they don't - produce any result. In fact the planner will add a special CTID + produce any result. In fact the planner will add a special CTID entry to the empty target list. But this is after the rule system and will be discussed later. For the rule system the target list is empty. @@ -199,7 +199,7 @@ expressions from the SET attribute = expression part of the query. The planner will add missing columns by inserting expressions that copy the values from the old row into the new one. And it will add - the special CTID entry just as for DELETE too. + the special CTID entry just as for DELETE too. @@ -781,24 +781,25 @@ SELECT t1.a, t2.b FROM t1, t2 WHERE t1.a = t2.a; To resolve this problem, another entry is added to the target list - in UPDATE (and also in DELETE) statements: the current tuple ID (CTID). + in UPDATE (and also in DELETE) statements: the current tuple ID (CTID). This is a system attribute containing the file block number and position in the block for the row. Knowing the table, - the CTID can be used to retrieve the original t1 row to be updated. - After adding the CTID to the target list, the query actually looks like + the CTID can be used to retrieve the original t1 row to be updated. + After adding the CTID to the target list, the query actually looks like SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; - Now another detail of PostgreSQL enters the - stage. At this moment, table rows aren't overwritten and this is why - ABORT TRANSACTION is fast. In an UPDATE, the new result row is inserted - into the table (after stripping CTID) and in the tuple header of the row - that CTID pointed to the cmax and xmax entries are set to the current - command counter and current transaction ID. Thus the old row is hidden - and after the transaction committed the vacuum cleaner can really move - it out. + Now another detail of PostgreSQL enters + the stage. At this moment, table rows aren't overwritten and this + is why ABORT TRANSACTION is fast. In an UPDATE, the new result row + is inserted into the table (after stripping CTID) and + in the tuple header of the row that CTID pointed to + the cmax and xmax entries are set to the + current command counter and current transaction ID. Thus the old + row is hidden and after the transaction committed the vacuum + cleaner can really move it out. diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index 3ae2750fd0c..7265896eaba 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -1,5 +1,5 @@ @@ -528,7 +528,7 @@ postmaster -c log_connections=yes -c syslog=2 env PGOPTIONS='-c geqo=off' psql - (This works for any libpq-based client application, not just + (This works for any libpq-based client application, not just psql.) Note that this won't work for options that are fixed when the server is started, such as the port number. @@ -1002,7 +1002,7 @@ env PGOPTIONS='-c geqo=off' psql Determines whether EXPLAIN VERBOSE uses the indented - or non-indented format for displaying detailed querytree dumps. + or non-indented format for displaying detailed query-tree dumps. @@ -1058,10 +1058,10 @@ env PGOPTIONS='-c geqo=off' psql LOG_PID (boolean) - Prefixes each server message in the logfile with the process ID of + Prefixes each server message in the log file with the process ID of the backend process. This is useful to sort out which messages pertain to which connection. The default is off. This parameter - does not affect messages logged via syslog(), which always contain + does not affect messages logged via syslog, which always contain the process ID. @@ -1669,7 +1669,7 @@ dynamic_library_path = '/usr/local/lib/postgresql:/home/my_project/lib:$libdir' This variable specifies the order in which namespaces are searched - when an object (table, datatype, function, etc) is referenced by a + when an object (table, data type, function, etc) is referenced by a simple name with no schema component. When there are objects of identical names in different namespaces, the one found first in the search path is used. An object that is not in any of the @@ -1865,7 +1865,7 @@ dynamic_library_path = '/usr/local/lib/postgresql:/home/my_project/lib:$libdir' However, filtered forms in Microsoft Access generate queries that appear to use expr = NULL to test for - NULLs, so if you use that interface to access the database you + null values, so if you use that interface to access the database you might want to turn this option on. Since expressions of the form expr = NULL always return NULL (using the correct interpretation) they are not diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index 72ce0880fba..3c606b098ad 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -1,5 +1,5 @@ @@ -715,19 +715,19 @@ SELECT (5 !) - 6; IS - test for TRUE, FALSE, UNKNOWN, NULL + IS TRUE, IS FALSE, IS UNKNOWN, IS NULL ISNULL - test for NULL + test for null NOTNULL - test for NOT NULL + test for not null @@ -1042,13 +1042,13 @@ sqrt(2) The first form of aggregate expression invokes the aggregate across all input rows for which the given expression yields a - non-NULL value. (Actually, it is up to the aggregate function - whether to ignore NULLs or not --- but all the standard ones do.) + non-null value. (Actually, it is up to the aggregate function + whether to ignore null values or not --- but all the standard ones do.) The second form is the same as the first, since ALL is the default. The third form invokes the - aggregate for all distinct non-NULL values of the expression found + aggregate for all distinct non-null values of the expression found in the input rows. The last form invokes the aggregate once for - each input row regardless of NULL or non-NULL values; since no + each input row regardless of null or non-null values; since no particular input value is specified, it is generally only useful for the count() aggregate function. @@ -1056,9 +1056,9 @@ sqrt(2) For example, count(*) yields the total number of input rows; count(f1) yields the number of - input rows in which f1 is non-NULL; + input rows in which f1 is non-null; count(distinct f1) yields the number of - distinct non-NULL values of f1. + distinct non-null values of f1. @@ -1106,7 +1106,7 @@ CAST ( expression AS type to the type that a value expression must produce (for example, when it is assigned to a table column); the system will automatically apply a type cast in such cases. However, automatic casting is only done for - cast functions that are marked okay to apply implicitly + cast functions that are marked OK to apply implicitly in the system catalogs. Other cast functions must be invoked with explicit casting syntax. This restriction is intended to prevent surprising conversions from being applied silently. @@ -1140,7 +1140,7 @@ CAST ( expression AS type It is an error to use a query that returns more than one row or more than one column as a scalar subquery. (But if, during a particular execution, the subquery returns no rows, - there is no error; the scalar result is taken to be NULL.) + there is no error; the scalar result is taken to be null.) The subquery can refer to variables from the surrounding query, which will act as constants during any one evaluation of the subquery. See also . diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml index f94c2b7d8a7..fa3e149accc 100644 --- a/doc/src/sgml/trigger.sgml +++ b/doc/src/sgml/trigger.sgml @@ -1,17 +1,18 @@ Triggers - PostgreSQL has various server-side function - interfaces. Server-side functions can be written in SQL, PL/pgSQL, - Tcl, or C. Trigger functions can be written in any of these - languages except SQL. Note that statement-level trigger events are not - supported in the current version. You can currently specify BEFORE or - AFTER on INSERT, DELETE or UPDATE of a tuple as a trigger event. + PostgreSQL has various server-side + function interfaces. Server-side functions can be written in SQL, + C, or any defined procedural language. Trigger functions can be + written in C and most procedural languages, but not in SQL. Note that + statement-level trigger events are not supported in the current + version. You can currently specify BEFORE or AFTER on INSERT, + DELETE or UPDATE of a tuple as a trigger event. @@ -19,7 +20,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/trigger.sgml,v 1.24 2002/08/22 00:01:40 tgl If a trigger event occurs, the trigger manager (called by the Executor) - sets up a TriggerData information structure (described below) and calls + sets up a TriggerData information structure (described below) and calls the trigger function to handle the event. @@ -27,7 +28,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/trigger.sgml,v 1.24 2002/08/22 00:01:40 tgl The trigger function must be defined before the trigger itself can be created. The trigger function must be declared as a function taking no arguments and returning type trigger. - (The trigger function receives its input through a TriggerData + (The trigger function receives its input through a TriggerData structure, not in the form of ordinary function arguments.) If the function is written in C, it must use the version 1 function manager interface. @@ -36,12 +37,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/trigger.sgml,v 1.24 2002/08/22 00:01:40 tgl The syntax for creating triggers is: - + CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | DELETE | UPDATE [ OR ... ] ] ON relation FOR EACH [ ROW | STATEMENT ] EXECUTE PROCEDURE procedure (args); - + where the arguments are: @@ -116,7 +117,7 @@ CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | args - The arguments passed to the function in the TriggerData structure. + The arguments passed to the function in the TriggerData structure. This is either empty or a list of one or more simple literal constants (which will be passed to the function as strings). @@ -127,7 +128,7 @@ CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | triggers with similar requirements to call the same function. As an example, there could be a generalized trigger function that takes as its arguments two field names and puts the - current user in one and the current timestamp in the other. + current user in one and the current time stamp in the other. Properly written, this trigger function would be independent of the specific table it is triggering on. So the same function could be used for INSERT events on any table with suitable fields, @@ -141,15 +142,16 @@ CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | - Trigger functions return a HeapTuple to the calling Executor. The return + Trigger functions return a HeapTuple to the calling executor. The return value is ignored for triggers fired AFTER an operation, but it allows BEFORE triggers to: - Return NULL to skip the operation for the current tuple (and so the - tuple will not be inserted/updated/deleted). + Return a NULL pointer to skip the operation for the + current tuple (and so the tuple will not be + inserted/updated/deleted). @@ -173,12 +175,12 @@ CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | - If more than one trigger - is defined for the same event on the same relation, the triggers will - be fired in alphabetical order by name. In the case of BEFORE triggers, - the possibly-modified tuple returned by each trigger becomes the input - to the next trigger. If any BEFORE trigger returns NULL, the operation - is abandoned and subsequent triggers are not fired. + If more than one trigger is defined for the same event on the same + relation, the triggers will be fired in alphabetical order by + name. In the case of BEFORE triggers, the possibly-modified tuple + returned by each trigger becomes the input to the next trigger. + If any BEFORE trigger returns NULL, the operation is + abandoned and subsequent triggers are not fired. @@ -207,24 +209,24 @@ CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | The interface described here applies for PostgreSQL 7.1 and later. - Earlier versions passed the TriggerData pointer in a global - variable CurrentTriggerData. + Earlier versions passed the TriggerData pointer in a global + variable CurrentTriggerData. When a function is called by the trigger manager, it is not passed any normal parameters, but it is passed a context pointer pointing to a - TriggerData structure. C functions can check whether they were called + TriggerData structure. C functions can check whether they were called from the trigger manager or not by executing the macro CALLED_AS_TRIGGER(fcinfo), which expands to - - ((fcinfo)->context != NULL && IsA((fcinfo)->context, TriggerData)) - - If this returns TRUE, then it is safe to cast fcinfo->context to type + +((fcinfo)->context != NULL && IsA((fcinfo)->context, TriggerData)) + + If this returns true, then it is safe to cast fcinfo->context to type TriggerData * and make use of the pointed-to - TriggerData structure. - The function must not alter the TriggerData + TriggerData structure. + The function must not alter the TriggerData structure or any of the data it points to. @@ -248,7 +250,7 @@ typedef struct TriggerData - type + type Always T_TriggerData if this is a trigger event. @@ -257,7 +259,7 @@ typedef struct TriggerData - tg_event + tg_event describes the event for which the function is called. You may use the @@ -334,21 +336,24 @@ typedef struct TriggerData - tg_relation + tg_relation - is a pointer to structure describing the triggered relation. Look at - src/include/utils/rel.h for details about this structure. The most - interesting things are tg_relation->rd_att (descriptor of the relation - tuples) and tg_relation->rd_rel->relname (relation's name. This is not - char*, but NameData. Use SPI_getrelname(tg_relation) to get char* if - you need a copy of name). + is a pointer to structure describing the triggered + relation. Look at utils/rel.h for details about + this structure. The most interesting things are + tg_relation->rd_att (descriptor of the relation + tuples) and tg_relation->rd_rel->relname + (relation's name. This is not char*, but + NameData. Use + SPI_getrelname(tg_relation) to get char* if you + need a copy of the name). - tg_trigtuple + tg_trigtuple is a pointer to the tuple for which the trigger is fired. This is the tuple @@ -361,10 +366,10 @@ typedef struct TriggerData - tg_newtuple + tg_newtuple - is a pointer to the new version of tuple if UPDATE and NULL if this is + is a pointer to the new version of tuple if UPDATE and NULL if this is for an INSERT or a DELETE. This is what you are to return to Executor if UPDATE and you don't want to replace this tuple with another one or skip the operation. @@ -373,12 +378,12 @@ typedef struct TriggerData - tg_trigger + tg_trigger - is pointer to structure Trigger defined in src/include/utils/rel.h: + is pointer to structure Trigger defined in utils/rel.h: - + typedef struct Trigger { Oid tgoid; @@ -394,12 +399,13 @@ typedef struct Trigger int16 tgattr[FUNC_MAX_ARGS]; char **tgargs; } Trigger; - + - where - tgname is the trigger's name, tgnargs is number of arguments in tgargs, - tgargs is an array of pointers to the arguments specified in the CREATE - TRIGGER statement. Other members are for internal use only. + where tgname is the trigger's name, + tgnargs is number of arguments in + tgargs, tgargs is an array of + pointers to the arguments specified in the CREATE TRIGGER + statement. Other members are for internal use only. @@ -415,9 +421,9 @@ typedef struct Trigger changes made by the query itself (via SQL-function, SPI-function, triggers) are invisible to the query scan. For example, in query - + INSERT INTO a SELECT * FROM a; - + tuples inserted are invisible for SELECT scan. In effect, this duplicates the database table within itself (subject to unique index @@ -438,7 +444,7 @@ execution of Q) or after Q is done. This is true for triggers as well so, though a tuple being inserted - (tg_trigtuple) is not visible to queries in a BEFORE trigger, this tuple + (tg_trigtuple) is not visible to queries in a BEFORE trigger, this tuple (just inserted) is visible to queries in an AFTER trigger, and to queries in BEFORE/AFTER triggers fired after this! @@ -454,14 +460,14 @@ execution of Q) or after Q is done. - Here is a very simple example of trigger usage. Function trigf reports - the number of tuples in the triggered relation ttest and skips the - operation if the query attempts to insert NULL into x (i.e - it acts as a - NOT NULL constraint but doesn't abort the transaction). + Here is a very simple example of trigger usage. Function trigf reports + the number of tuples in the triggered relation ttest and skips the + operation if the query attempts to insert a null value into x (i.e - it acts as a + not-null constraint but doesn't abort the transaction). - -#include "executor/spi.h" /* this is what you need to work with SPI */ -#include "commands/trigger.h" /* -"- and triggers */ + +#include "executor/spi.h" /* this is what you need to work with SPI */ +#include "commands/trigger.h" /* -"- and triggers */ extern Datum trigf(PG_FUNCTION_ARGS); @@ -470,79 +476,79 @@ PG_FUNCTION_INFO_V1(trigf); Datum trigf(PG_FUNCTION_ARGS) { - TriggerData *trigdata = (TriggerData *) fcinfo->context; - TupleDesc tupdesc; - HeapTuple rettuple; - char *when; - bool checknull = false; - bool isnull; - int ret, i; - - /* Make sure trigdata is pointing at what I expect */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "trigf: not fired by trigger manager"); - - /* tuple to return to Executor */ - if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - rettuple = trigdata->tg_newtuple; - else - rettuple = trigdata->tg_trigtuple; - - /* check for NULLs ? */ - if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event) && - TRIGGER_FIRED_BEFORE(trigdata->tg_event)) - checknull = true; - - if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) - when = "before"; - else - when = "after "; - - tupdesc = trigdata->tg_relation->rd_att; - - /* Connect to SPI manager */ - if ((ret = SPI_connect()) < 0) - elog(INFO, "trigf (fired %s): SPI_connect returned %d", when, ret); - - /* Get number of tuples in relation */ - ret = SPI_exec("SELECT count(*) FROM ttest", 0); - - if (ret < 0) - elog(NOTICE, "trigf (fired %s): SPI_exec returned %d", when, ret); - - /* count(*) returns int8 as of PG 7.2, so be careful to convert */ - i = (int) DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 1, - &isnull)); - - elog (NOTICE, "trigf (fired %s): there are %d tuples in ttest", when, i); - - SPI_finish(); - - if (checknull) - { - (void) SPI_getbinval(rettuple, tupdesc, 1, &isnull); - if (isnull) - rettuple = NULL; - } - - return PointerGetDatum(rettuple); + TriggerData *trigdata = (TriggerData *) fcinfo->context; + TupleDesc tupdesc; + HeapTuple rettuple; + char *when; + bool checknull = false; + bool isnull; + int ret, i; + + /* Make sure trigdata is pointing at what I expect */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "trigf: not fired by trigger manager"); + + /* tuple to return to Executor */ + if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) + rettuple = trigdata->tg_newtuple; + else + rettuple = trigdata->tg_trigtuple; + + /* check for null values */ + if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event) + && TRIGGER_FIRED_BEFORE(trigdata->tg_event)) + checknull = true; + + if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) + when = "before"; + else + when = "after "; + + tupdesc = trigdata->tg_relation->rd_att; + + /* Connect to SPI manager */ + if ((ret = SPI_connect()) < 0) + elog(INFO, "trigf (fired %s): SPI_connect returned %d", when, ret); + + /* Get number of tuples in relation */ + ret = SPI_exec("SELECT count(*) FROM ttest", 0); + + if (ret < 0) + elog(NOTICE, "trigf (fired %s): SPI_exec returned %d", when, ret); + + /* count(*) returns int8 as of PG 7.2, so be careful to convert */ + i = (int) DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, + &isnull)); + + elog (NOTICE, "trigf (fired %s): there are %d tuples in ttest", when, i); + + SPI_finish(); + + if (checknull) + { + (void) SPI_getbinval(rettuple, tupdesc, 1, &isnull); + if (isnull) + rettuple = NULL; + } + + return PointerGetDatum(rettuple); } - + Now, compile and create the trigger function: - + CREATE FUNCTION trigf () RETURNS TRIGGER AS -'...path_to_so' LANGUAGE 'C'; +'...path_to_so' LANGUAGE C; CREATE TABLE ttest (x int4); - + - + vac=> CREATE TRIGGER tbefore BEFORE INSERT OR UPDATE OR DELETE ON ttest FOR EACH ROW EXECUTE PROCEDURE trigf(); CREATE @@ -556,8 +562,8 @@ INSERT 0 0 -- Insertion skipped and AFTER trigger is not fired vac=> SELECT * FROM ttest; -x -- + x +--- (0 rows) vac=> INSERT INTO ttest VALUES (1); @@ -567,9 +573,9 @@ INFO: trigf (fired after ): there are 1 tuples in ttest remember what we said about visibility. INSERT 167793 1 vac=> SELECT * FROM ttest; -x -- -1 + x +--- + 1 (1 row) vac=> INSERT INTO ttest SELECT x * 2 FROM ttest; @@ -579,13 +585,13 @@ INFO: trigf (fired after ): there are 2 tuples in ttest remember what we said about visibility. INSERT 167794 1 vac=> SELECT * FROM ttest; -x -- -1 -2 + x +--- + 1 + 2 (2 rows) -vac=> UPDATE ttest SET x = null WHERE x = 2; +vac=> UPDATE ttest SET x = NULL WHERE x = 2; INFO: trigf (fired before): there are 2 tuples in ttest UPDATE 0 vac=> UPDATE ttest SET x = 4 WHERE x = 2; @@ -593,10 +599,10 @@ INFO: trigf (fired before): there are 2 tuples in ttest INFO: trigf (fired after ): there are 2 tuples in ttest UPDATE 1 vac=> SELECT * FROM ttest; -x -- -1 -4 + x +--- + 1 + 4 (2 rows) vac=> DELETE FROM ttest; @@ -608,10 +614,10 @@ INFO: trigf (fired after ): there are 0 tuples in ttest remember what we said about visibility. DELETE 2 vac=> SELECT * FROM ttest; -x -- + x +--- (0 rows) - + diff --git a/doc/src/sgml/xaggr.sgml b/doc/src/sgml/xaggr.sgml index e82406ec022..4496b45c5f6 100644 --- a/doc/src/sgml/xaggr.sgml +++ b/doc/src/sgml/xaggr.sgml @@ -1,5 +1,5 @@ @@ -86,7 +86,7 @@ SELECT complex_sum(a) FROM test_complex; Another bit of default behavior for a strict transition function is that the previous state value is retained unchanged whenever a - NULL input value is encountered. Thus, NULLs are ignored. If you + NULL input value is encountered. Thus, null values are ignored. If you need some other behavior for NULL inputs, just define your transition function as non-strict, and code it to test for NULL inputs and do whatever is needed. diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index 3999bf81769..fba17d13bc3 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -1,5 +1,5 @@ @@ -1183,7 +1183,7 @@ CREATE FUNCTION concat_text(text, text) RETURNS text meaning that the system should automatically assume a NULL result if any input value is NULL. By doing this, we avoid having to check for NULL inputs - in the function code. Without this, we'd have to check for NULLs + in the function code. Without this, we'd have to check for null values explicitly, for example by checking for a null pointer for each pass-by-reference argument. (For pass-by-value arguments, we don't even have a way to check!) @@ -1497,7 +1497,7 @@ LANGUAGE C; either base (scalar) data types, or composite (multi-column) data types. The API is split into two main components: support for returning composite data types, and support for returning multiple rows - (set returning functions or SRFs). + (set returning functions or SRFs). @@ -1511,17 +1511,19 @@ LANGUAGE C; - Returning Tuples (Composite Types) + Returning Rows (Composite Types) The Table Function API support for returning composite data types - (or tuples) starts with the AttInMetadata struct. This struct holds - arrays of individual attribute information needed to create a tuple from - raw C strings. It also saves a pointer to the TupleDesc. The information - carried here is derived from the TupleDesc, but it is stored here to - avoid redundant CPU cycles on each call to a Table Function. In the - case of a function returning a set, the AttInMetadata struct should be - computed once during the first call and saved for re-use in later calls. + (or rows) starts with the AttInMetadata + structure. This structure holds arrays of individual attribute + information needed to create a row from raw C strings. It also + saves a pointer to the TupleDesc. The information + carried here is derived from the TupleDesc, but it + is stored here to avoid redundant CPU cycles on each call to a + table function. In the case of a function returning a set, the + AttInMetadata structure should be computed + once during the first call and saved for re-use in later calls. typedef struct AttInMetadata { @@ -1538,70 +1540,80 @@ typedef struct AttInMetadata int32 *atttypmods; } AttInMetadata; - To assist you in populating this struct, several functions and a macro + + + + To assist you in populating this structure, several functions and a macro are available. Use TupleDesc RelationNameGetTupleDesc(const char *relname) - to get a TupleDesc based on a specified relation, or + to get a TupleDesc based on a specified relation, or TupleDesc TypeGetTupleDesc(Oid typeoid, List *colaliases) - to get a TupleDesc based on a type OID. This can be used to - get a TupleDesc for a base (scalar) or composite (relation) type. Then + to get a TupleDesc based on a type OID. This can + be used to get a TupleDesc for a base (scalar) or + composite (relation) type. Then AttInMetadata *TupleDescGetAttInMetadata(TupleDesc tupdesc) - will return a pointer to an AttInMetadata struct, initialized based on - the given TupleDesc. AttInMetadata can be used in conjunction with - C strings to produce a properly formed tuple. The metadata is stored here - to avoid redundant work across multiple calls. + will return a pointer to an AttInMetadata, + initialized based on the given + TupleDesc. AttInMetadata can be + used in conjunction with C strings to produce a properly formed + tuple. The metadata is stored here to avoid redundant work across + multiple calls. To return a tuple you must create a tuple slot based on the - TupleDesc. You can use + TupleDesc. You can use TupleTableSlot *TupleDescGetSlot(TupleDesc tupdesc) to initialize this tuple slot, or obtain one through other (user provided) - means. The tuple slot is needed to create a Datum for return by the + means. The tuple slot is needed to create a Datum for return by the function. The same slot can (and should) be re-used on each call. - After constructing an AttInMetadata structure, + After constructing an AttInMetadata structure, HeapTuple BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) - can be used to build a HeapTuple given user data in C string form. - "values" is an array of C strings, one for each attribute of the return - tuple. Each C string should be in the form expected by the input function - of the attribute data type. In order to return a NULL value for - one of the attributes, the corresponding pointer in the "values" array - should be set to NULL. This function will need to be called again - for each tuple you return. + can be used to build a HeapTuple given user data + in C string form. "values" is an array of C strings, one for + each attribute of the return tuple. Each C string should be in + the form expected by the input function of the attribute data + type. In order to return a null value for one of the attributes, + the corresponding pointer in the values array + should be set to NULL. This function will need to + be called again for each tuple you return. - Building a tuple via TupleDescGetAttInMetadata and BuildTupleFromCStrings - is only convenient if your function naturally computes the values to - be returned as text strings. If your code naturally computes the - values as a set of Datums, you should instead use the underlying - heap_formtuple routine to convert the Datums directly into a tuple. - You will still need the TupleDesc and a TupleTableSlot, but not - AttInMetadata. + Building a tuple via TupleDescGetAttInMetadata and + BuildTupleFromCStrings is only convenient if your + function naturally computes the values to be returned as text + strings. If your code naturally computes the values as a set of + Datums, you should instead use the underlying + heap_formtuple routine to convert the + Datums directly into a tuple. You will still need + the TupleDesc and a TupleTableSlot, + but not AttInMetadata. Once you have built a tuple to return from your function, the tuple must - be converted into a Datum. Use + be converted into a Datum. Use TupleGetDatum(TupleTableSlot *slot, HeapTuple tuple) - to get a Datum given a tuple and a slot. This Datum can be returned - directly if you intend to return just a single row, or it can be used - as the current return value in a set-returning function. + to get a Datum given a tuple and a slot. This + Datum can be returned directly if you intend to return + just a single row, or it can be used as the current return value + in a set-returning function. @@ -1614,74 +1626,75 @@ TupleGetDatum(TupleTableSlot *slot, HeapTuple tuple) Returning Sets - A set-returning function (SRF) is normally called once for each item it - returns. The SRF must therefore save enough state to remember what it - was doing and return the next item on each call. The Table Function API - provides the FuncCallContext struct to help control this process. - fcinfo->flinfo->fn_extra is used to - hold a pointer to FuncCallContext across calls. + A set-returning function (SRF) is normally called + once for each item it returns. The SRF must + therefore save enough state to remember what it was doing and + return the next item on each call. The Table Function API + provides the FuncCallContext structure to help + control this process. fcinfo->flinfo->fn_extra + is used to hold a pointer to FuncCallContext + across calls. typedef struct { - /* - * Number of times we've been called before. - * - * call_cntr is initialized to 0 for you by SRF_FIRSTCALL_INIT(), and - * incremented for you every time SRF_RETURN_NEXT() is called. - */ - uint32 call_cntr; - - /* - * OPTIONAL maximum number of calls - * - * max_calls is here for convenience ONLY and setting it is OPTIONAL. - * If not set, you must provide alternative means to know when the - * function is done. - */ - uint32 max_calls; - - /* - * OPTIONAL pointer to result slot - * - * slot is for use when returning tuples (i.e. composite data types) - * and is not needed when returning base (i.e. scalar) data types. - */ - TupleTableSlot *slot; - - /* - * OPTIONAL pointer to misc user provided context info - * - * user_fctx is for use as a pointer to your own struct to retain - * arbitrary context information between calls for your function. - */ - void *user_fctx; - - /* - * OPTIONAL pointer to struct containing arrays of attribute type input - * metainfo - * - * attinmeta is for use when returning tuples (i.e. composite data types) - * and is not needed when returning base (i.e. scalar) data types. It - * is ONLY needed if you intend to use BuildTupleFromCStrings() to create - * the return tuple. - */ - AttInMetadata *attinmeta; - - /* - * memory context used for structures which must live for multiple calls - * - * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used - * by SRF_RETURN_DONE() for cleanup. It is the most appropriate memory - * context for any memory that is to be re-used across multiple calls - * of the SRF. - */ - MemoryContext multi_call_memory_ctx; - -} FuncCallContext; + /* + * Number of times we've been called before. + * + * call_cntr is initialized to 0 for you by SRF_FIRSTCALL_INIT(), and + * incremented for you every time SRF_RETURN_NEXT() is called. + */ + uint32 call_cntr; + + /* + * OPTIONAL maximum number of calls + * + * max_calls is here for convenience ONLY and setting it is OPTIONAL. + * If not set, you must provide alternative means to know when the + * function is done. + */ + uint32 max_calls; + + /* + * OPTIONAL pointer to result slot + * + * slot is for use when returning tuples (i.e. composite data types) + * and is not needed when returning base (i.e. scalar) data types. + */ + TupleTableSlot *slot; + + /* + * OPTIONAL pointer to misc user provided context info + * + * user_fctx is for use as a pointer to your own struct to retain + * arbitrary context information between calls for your function. + */ + void *user_fctx; + + /* + * OPTIONAL pointer to struct containing arrays of attribute type input + * metainfo + * + * attinmeta is for use when returning tuples (i.e. composite data types) + * and is not needed when returning base (i.e. scalar) data types. It + * is ONLY needed if you intend to use BuildTupleFromCStrings() to create + * the return tuple. + */ + AttInMetadata *attinmeta; + + /* + * memory context used for structures which must live for multiple calls + * + * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used + * by SRF_RETURN_DONE() for cleanup. It is the most appropriate memory + * context for any memory that is to be re-used across multiple calls + * of the SRF. + */ + MemoryContext multi_call_memory_ctx; +} FuncCallContext; - An SRF uses several functions and macros that automatically manipulate - the FuncCallContext struct (and expect to find it via - fn_extra). Use + An SRF uses several functions and macros that + automatically manipulate the FuncCallContext + structure (and expect to find it via fn_extra). Use SRF_IS_FIRSTCALL() @@ -1690,13 +1703,14 @@ SRF_IS_FIRSTCALL() SRF_FIRSTCALL_INIT() - to initialize the FuncCallContext struct. On every function call, + to initialize the FuncCallContext. On every function call, including the first, use SRF_PERCALL_SETUP() - to properly set up for using the FuncCallContext struct and clearing - any previously returned data left over from the previous pass. + to properly set up for using the FuncCallContext + and clearing any previously returned data left over from the + previous pass. @@ -1704,24 +1718,25 @@ SRF_PERCALL_SETUP() SRF_RETURN_NEXT(funcctx, result) - to return it to the caller. (The result - must be a Datum, either a single value or a tuple prepared as described - earlier.) Finally, when your function is finished returning data, use + to return it to the caller. (The result must be a + Datum, either a single value or a tuple prepared as + described earlier.) Finally, when your function is finished + returning data, use SRF_RETURN_DONE(funcctx) - to clean up and end the SRF. + to clean up and end the SRF. - The palloc memory context that is current when the SRF is called is + The memory context that is current when the SRF is called is a transient context that will be cleared between calls. This means - that you do not need to be careful about pfree'ing everything - you palloc; it will go away anyway. However, if you want to allocate + that you do not need to pfree everything + you palloc; it will go away anyway. However, if you want to allocate any data structures to live across calls, you need to put them somewhere else. The memory context referenced by multi_call_memory_ctx is a suitable location for any - data that needs to survive until the SRF is finished running. In most + data that needs to survive until the SRF is finished running. In most cases, this means that you should switch into multi_call_memory_ctx while doing the first-call setup. @@ -1776,7 +1791,7 @@ my_Set_Returning_Function(PG_FUNCTION_ARGS) - A complete example of a simple SRF returning a composite type looks like: + A complete example of a simple SRF returning a composite type looks like: PG_FUNCTION_INFO_V1(testpassbyval); Datum @@ -1882,7 +1897,7 @@ CREATE OR REPLACE FUNCTION testpassbyval(int4, int4) RETURNS setof __testpassbyv - See contrib/tablefunc for more examples of Table Functions. + See contrib/tablefunc for more examples of table functions. @@ -1897,7 +1912,7 @@ CREATE OR REPLACE FUNCTION testpassbyval(int4, int4) RETURNS setof __testpassbyv programming language functions. Be warned: this section of the manual will not make you a programmer. You must have a good understanding of C - (including the use of pointers and the malloc memory manager) + (including the use of pointers) before trying to write C functions for use with PostgreSQL. While it may be possible to load functions written in languages other @@ -2183,15 +2198,15 @@ WHERE proname LIKE 'bytea%'; The call handler is called in the same way as any other function: It receives a pointer to a - FunctionCallInfoData struct containing + FunctionCallInfoData struct containing argument values and information about the called function, and it is expected to return a Datum result (and possibly set the isnull field of the - FunctionCallInfoData struct, if it wishes + FunctionCallInfoData structure, if it wishes to return an SQL NULL result). The difference between a call handler and an ordinary callee function is that the flinfo->fn_oid field of the - FunctionCallInfoData struct will contain + FunctionCallInfoData structure will contain the OID of the actual function to be called, not of the call handler itself. The call handler must use this field to determine which function to execute. Also, the passed argument list has diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 42b4167cab0..62467dca57e 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -1,5 +1,5 @@ @@ -16,7 +16,7 @@ PostgreSQL documentation over a new type, nor associate operators of a new type with secondary indexes. To do these things, we must define an operator class - for the new datatype. We will describe operator classes in the + for the new data type. We will describe operator classes in the context of a running example: a new operator class for the B-tree access method that stores and sorts complex numbers in ascending absolute value order. @@ -25,7 +25,7 @@ PostgreSQL documentation Prior to PostgreSQL release 7.3, it was - necesssary to make manual additions to + necessary to make manual additions to pg_amop, pg_amproc, and pg_opclass in order to create a user-defined operator class. That approach is now deprecated in favor of @@ -55,7 +55,7 @@ PostgreSQL documentation access method needs to be able to use to work with a particular data type. Operator classes are so called because one thing they specify is the set of WHERE-clause operators that can be used with an index (ie, can be - converted into an indexscan qualification). An operator class may also + converted into an index scan qualification). An operator class may also specify some support procedures that are needed by the internal operations of the index access method, but do not directly correspond to any WHERE-clause operator that can be used with the index. @@ -63,16 +63,16 @@ PostgreSQL documentation It is possible to define multiple operator classes for the same - input datatype and index access method. By doing this, multiple - sets of indexing semantics can be defined for a single datatype. + input data type and index access method. By doing this, multiple + sets of indexing semantics can be defined for a single data type. For example, a B-tree index requires a sort ordering to be defined - for each datatype it works on. - It might be useful for a complex-number datatype + for each data type it works on. + It might be useful for a complex-number data type to have one B-tree operator class that sorts the data by complex absolute value, another that sorts by real part, and so on. Typically one of the operator classes will be deemed most commonly useful and will be marked as the default operator class for that - datatype and index access method. + data type and index access method. @@ -101,7 +101,7 @@ PostgreSQL documentation comparison it is. Instead, the index access method defines a set of strategies, which can be thought of as generalized operators. Each operator class shows which actual operator corresponds to each - strategy for a particular datatype and interpretation of the index + strategy for a particular data type and interpretation of the index semantics. @@ -240,7 +240,7 @@ PostgreSQL documentation In short, an operator class must specify a set of operators that express - each of these semantic ideas for the operator class's datatype. + each of these semantic ideas for the operator class's data type. @@ -262,7 +262,7 @@ PostgreSQL documentation Just as with operators, the operator class identifies which specific - functions should play each of these roles for a given datatype and + functions should play each of these roles for a given data type and semantic interpretation. The index access method specifies the set of functions it needs, and the operator class identifies the correct functions to use by assigning support function numbers to them. @@ -572,7 +572,7 @@ CREATE OPERATOR CLASS complex_abs_ops OPERATOR 1 < (complex, complex) , - but there is no need to do so when the operators take the same datatype + but there is no need to do so when the operators take the same data type we are defining the operator class for. @@ -631,9 +631,9 @@ CREATE OPERATOR CLASS polygon_ops At present, only the GiST access method supports a - STORAGE type that's different from the column datatype. + STORAGE type that's different from the column data type. The GiST compress and decompress support - routines must deal with datatype conversion when STORAGE + routines must deal with data-type conversion when STORAGE is used. diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 079caad5cbd..23db99705f7 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -1,5 +1,5 @@ @@ -200,9 +200,9 @@ SELECT (a + b) AS c FROM test_complex; Providing a negator is very helpful to the query optimizer since - it allows expressions like NOT (x = y) to be simplified into + it allows expressions like NOT (x = y) to be simplified into x <> y. This comes up more often than you might think, because - NOTs can be inserted as a consequence of other rearrangements. + NOT operations can be inserted as a consequence of other rearrangements. @@ -378,14 +378,14 @@ table1.column1 OP table2.column2 - MERGES (SORT1, SORT2, LTCMP, GTCMP) + <literal>MERGES</> (<literal>SORT1</>, <literal>SORT2</>, <literal>LTCMP</>, <literal>GTCMP</>) The MERGES clause, if present, tells the system that it is permissible to use the merge join method for a join based on this operator. MERGES only makes sense for binary operators that return boolean, and in practice the operator must represent - equality for some datatype or pair of datatypes. + equality for some data type or pair of data types. @@ -400,19 +400,19 @@ table1.column1 OP table2.column2 it is possible to merge-join two distinct data types so long as they are logically compatible. For example, the int2-versus-int4 equality operator - is mergejoinable. + is merge-joinable. We only need sorting operators that will bring both data types into a logically compatible sequence. Execution of a merge join requires that the system be able to identify - four operators related to the mergejoin equality operator: less-than - comparison for the left input datatype, less-than comparison for the - right input datatype, less-than comparison between the two datatypes, and - greater-than comparison between the two datatypes. (These are actually - four distinct operators if the mergejoinable operator has two different - input datatypes; but when the input types are the same the three + four operators related to the merge-join equality operator: less-than + comparison for the left input data type, less-than comparison for the + right input data type, less-than comparison between the two data types, and + greater-than comparison between the two data types. (These are actually + four distinct operators if the merge-joinable operator has two different + input data types; but when the input types are the same the three less-than operators are all the same operator.) It is possible to specify these operators individually by name, as the SORT1, @@ -426,8 +426,8 @@ table1.column1 OP table2.column2 - The input datatypes of the four comparison operators can be deduced - from the input types of the mergejoinable operator, so just as with + The input data types of the four comparison operators can be deduced + from the input types of the merge-joinable operator, so just as with COMMUTATOR, only the operator names need be given in these clauses. Unless you are using peculiar choices of operator names, it's sufficient to write MERGES and let the system fill in @@ -440,14 +440,14 @@ table1.column1 OP table2.column2 There are additional restrictions on operators that you mark - mergejoinable. These restrictions are not currently checked by + merge-joinable. These restrictions are not currently checked by CREATE OPERATOR, but errors may occur when the operator is used if any are not true: - A mergejoinable equality operator must have a mergejoinable + A merge-joinable equality operator must have a merge-joinable commutator (itself if the two data types are the same, or a related equality operator if they are different). @@ -455,10 +455,10 @@ table1.column1 OP table2.column2 - If there is a mergejoinable operator relating any two data types - A and B, and another mergejoinable operator relating B to any - third data type C, then A and C must also have a mergejoinable - operator; in other words, having a mergejoinable operator must + If there is a merge-joinable operator relating any two data types + A and B, and another merge-joinable operator relating B to any + third data type C, then A and C must also have a merge-joinable + operator; in other words, having a merge-joinable operator must be transitive. @@ -476,7 +476,7 @@ table1.column1 OP table2.column2 In PostgreSQL versions before 7.3, the MERGES shorthand was not available: to make a - mergejoinable operator one had to write both SORT1 and + merge-joinable operator one had to write both SORT1 and SORT2 explicitly. Also, the LTCMP and GTCMP options did not exist; the names of those operators were hardwired as diff --git a/doc/src/sgml/xplang.sgml b/doc/src/sgml/xplang.sgml index 34bdcf664f8..5207cf84373 100644 --- a/doc/src/sgml/xplang.sgml +++ b/doc/src/sgml/xplang.sgml @@ -1,5 +1,5 @@ @@ -122,10 +122,10 @@ CREATE TRUSTED PROCEDURAL LANGUAGE PostgreSQL installation, the handler for the PL/pgSQL language is built and installed into the library - directory. If Tcl/Tk support is configured in, the handlers for - PL/Tcl and PL/TclU are also built and installed in the same - location. Likewise, the PL/Perl and PL/PerlU handlers are built - and installed if Perl support is configured, and PL/Python is + directory. If Tcl/Tk support is configured in, the handlers for + PL/Tcl and PL/TclU are also built and installed in the same + location. Likewise, the PL/Perl and PL/PerlU handlers are built + and installed if Perl support is configured, and PL/Python is installed if Python support is configured. The createlang script automates and