* For the raison d'etre of this file, check the comment above the definition
* of the PGAC_C_INLINE macro in config/c-compiler.m4.
*/
-static inline int fun () { return 0; }
+static inline int
+fun()
+{
+ return 0;
+}
DefineCustomBoolVariable("auto_explain.log_triggers",
"Include trigger statistics in plans.",
- "This has no effect unless log_analyze is also set.",
+ "This has no effect unless log_analyze is also set.",
&auto_explain_log_triggers,
false,
PGC_SUSET,
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
- * in pg_type. This might be less than sizeof(Interval) if the compiler
+ * in pg_type. This might be less than sizeof(Interval) if the compiler
* insists on adding alignment padding at the end of the struct.
*/
#define INTERVALSIZE 16
** Auxiliary funxtions
*/
static double distance_1D(double a1, double a2, double b1, double b2);
-static bool cube_is_point_internal(NDBOX *cube);
+static bool cube_is_point_internal(NDBOX *cube);
/*****************************************************************************
rt_cube_size(datum_r, &size_r);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
{
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
SET_VARSIZE(result, size);
SET_DIM(result, dim);
- /* First compute the union of the dimensions present in both args */
+ /* First compute the union of the dimensions present in both args */
for (i = 0; i < DIM(b); i++)
{
result->x[i] = Min(
- Min(LL_COORD(a, i), UR_COORD(a, i)),
- Min(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
result->x[i + DIM(a)] = Max(
- Max(LL_COORD(a, i), UR_COORD(a, i)),
- Max(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
/* continue on the higher dimensions only present in 'a' */
for (; i < DIM(a); i++)
{
result->x[i] = Min(0,
- Min(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
result->x[i + dim] = Max(0,
- Max(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
/*
if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
+
b = a;
a = tmp;
swapped = true;
SET_VARSIZE(result, size);
SET_DIM(result, dim);
- /* First compute intersection of the dimensions present in both args */
+ /* First compute intersection of the dimensions present in both args */
for (i = 0; i < DIM(b); i++)
{
result->x[i] = Max(
- Min(LL_COORD(a, i), UR_COORD(a, i)),
- Min(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
result->x[i + DIM(a)] = Min(
- Max(LL_COORD(a, i), UR_COORD(a, i)),
- Max(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
/* continue on the higher dimemsions only present in 'a' */
for (; i < DIM(a); i++)
{
result->x[i] = Max(0,
- Min(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
result->x[i + DIM(a)] = Min(0,
- Max(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
/*
/* compute within the dimensions of (b) */
for (i = 0; i < DIM(b); i++)
{
- d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), LL_COORD(b,i), UR_COORD(b,i));
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), LL_COORD(b, i), UR_COORD(b, i));
distance += d * d;
}
/* compute distance to zero for those dimensions in (a) absent in (b) */
for (i = DIM(b); i < DIM(a); i++)
{
- d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), 0.0, 0.0);
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), 0.0, 0.0);
distance += d * d;
}
return true;
/*
- * Even if the point-flag is not set, all the lower-left coordinates
- * might match the upper-right coordinates, so that the value is in
- * fact a point. Such values don't arise with current code - the point
- * flag is always set if appropriate - but they might be present on-disk
- * in clusters upgraded from pre-9.4 versions.
+ * Even if the point-flag is not set, all the lower-left coordinates might
+ * match the upper-right coordinates, so that the value is in fact a
+ * point. Such values don't arise with current code - the point flag is
+ * always set if appropriate - but they might be present on-disk in
+ * clusters upgraded from pre-9.4 versions.
*/
for (i = 0; i < DIM(cube); i++)
{
{
NDBOX *c = PG_GETARG_NDBOX(0);
int dim = DIM(c);
+
PG_FREE_IF_COPY(c, 0);
PG_RETURN_INT32(dim);
}
double result;
if (DIM(c) >= n && n > 0)
- result = Min(LL_COORD(c, n-1), UR_COORD(c, n-1));
+ result = Min(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
double result;
if (DIM(c) >= n && n > 0)
- result = Max(LL_COORD(c, n-1), UR_COORD(c, n-1));
+ result = Max(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
for (i = 0, j = dim; i < DIM(a); i++, j++)
{
- if (LL_COORD(a,i) >= UR_COORD(a,i))
+ if (LL_COORD(a, i) >= UR_COORD(a, i))
{
- result->x[i] = UR_COORD(a,i) - r;
- result->x[j] = LL_COORD(a,i) + r;
+ result->x[i] = UR_COORD(a, i) - r;
+ result->x[j] = LL_COORD(a, i) + r;
}
else
{
- result->x[i] = LL_COORD(a,i) - r;
- result->x[j] = UR_COORD(a,i) + r;
+ result->x[i] = LL_COORD(a, i) - r;
+ result->x[j] = UR_COORD(a, i) + r;
}
if (result->x[i] > result->x[j])
{
result->x[DIM(result) + i] = cube->x[DIM(cube) + i];
}
result->x[DIM(result) - 1] = x;
- result->x[2*DIM(result) - 1] = x;
+ result->x[2 * DIM(result) - 1] = x;
}
PG_FREE_IF_COPY(cube, 0);
int size;
int i;
- if (IS_POINT(cube) && (x1 == x2)){
+ if (IS_POINT(cube) && (x1 == x2))
+ {
size = POINT_SIZE((DIM(cube) + 1));
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
*
* Following information is stored:
*
- * bits 0-7 : number of cube dimensions;
- * bits 8-30 : unused, initialize to zero;
- * bit 31 : point flag. If set, the upper right coordinates are not
+ * bits 0-7 : number of cube dimensions;
+ * bits 8-30 : unused, initialize to zero;
+ * bit 31 : point flag. If set, the upper right coordinates are not
* stored, and are implicitly the same as the lower left
* coordinates.
*----------
} NDBOX;
#define POINT_BIT 0x80000000
-#define DIM_MASK 0x7fffffff
+#define DIM_MASK 0x7fffffff
#define IS_POINT(cube) ( ((cube)->header & POINT_BIT) != 0 )
-#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
+#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
#define DIM(cube) ( (cube)->header & DIM_MASK )
-#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
+#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
#define LL_COORD(cube, i) ( (cube)->x[i] )
#define UR_COORD(cube, i) ( IS_POINT(cube) ? (cube)->x[i] : (cube)->x[(i) + DIM(cube)] )
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
- * with NULLs for any dropped columns. Otherwise we have to deal with two
+ * with NULLs for any dropped columns. Otherwise we have to deal with two
* different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
}
/*
- * For non-superusers, insist that the connstr specify a password. This
+ * For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
- * is unportable, so we use version-1. However, if it's passed-by-reference,
+ * is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.
{"encoding", ForeignTableRelationId},
{"force_not_null", AttributeRelationId},
{"force_null", AttributeRelationId},
+
/*
* force_quote is not supported by file_fdw because it's for COPY TO.
*/
errmsg("conflicting or redundant options")));
filename = defGetString(def);
}
+
/*
* force_not_null is a boolean option; after validation we can discard
* it - it will be retrieved later in get_file_fdw_attribute_options()
List *fnncolumns = NIL;
List *fncolumns = NIL;
- List *options = NIL;
+ List *options = NIL;
rel = heap_open(relid, AccessShareLock);
tupleDesc = RelationGetDescr(rel);
heap_close(rel, AccessShareLock);
- /* Return DefElem only when some column(s) have force_not_null / force_null options set */
+ /*
+ * Return DefElem only when some column(s) have force_not_null /
+ * force_null options set
+ */
if (fnncolumns != NIL)
options = lappend(options, makeDefElem("force_not_null", (Node *) fnncolumns));
if (fncolumns != NIL)
- options = lappend(options,makeDefElem("force_null", (Node *) fncolumns));
+ options = lappend(options, makeDefElem("force_null", (Node *) fncolumns));
return options;
}
&startup_cost, &total_cost);
/*
- * Create a ForeignPath node and add it as only possible path. We use the
+ * Create a ForeignPath node and add it as only possible path. We use the
* fdw_private list of the path to carry the convert_selectively option;
* it will be propagated into the fdw_private list of the Plan node.
*/
* planner's idea of the relation width; which is bogus if not all
* columns are being read, not to mention that the text representation
* of a row probably isn't the same size as its internal
- * representation. Possibly we could do something better, but the
+ * representation. Possibly we could do something better, but the
* real answer to anyone who complains is "ANALYZE" ...
*/
int tuple_width;
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the file and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the file. Therefore, correlation estimates derived later
* array.
*
* If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound. From any cell in the matrix, there is
+ * is less than or equal to the bound. From any cell in the matrix, there is
* theoretical "minimum residual distance" from that cell to the last column
* of the final row. This minimum residual distance is zero when the
* untransformed portions of the strings are of equal length (because we might
stop_column = m + 1;
/*
- * If max_d >= 0, determine whether the bound is impossibly tight. If so,
+ * If max_d >= 0, determine whether the bound is impossibly tight. If so,
* return max_d + 1 immediately. Otherwise, determine whether it's tight
* enough to limit the computation we must perform. If so, figure out
* initial stop column.
* need to fill in. If the string is growing, the theoretical
* minimum distance already incorporates the cost of deleting the
* number of characters necessary to make the two strings equal in
- * length. Each additional deletion forces another insertion, so
+ * length. Each additional deletion forces another insertion, so
* the best-case total cost increases by ins_c + del_c. If the
* string is shrinking, the minimum theoretical cost assumes no
* excess deletions; that is, we're starting no further right than
/*
* The main loop fills in curr, but curr[0] needs a special case: to
* transform the first 0 characters of s into the first j characters
- * of t, we must perform j insertions. However, if start_column > 0,
+ * of t, we must perform j insertions. However, if start_column > 0,
* this special case does not apply.
*/
if (start_column == 0)
* HEntry: there is one of these for each key _and_ value in an hstore
*
* the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry. the ISFIRST flag lets us tell
+ * by subtraction from the previous entry. the ISFIRST flag lets us tell
* whether there is a previous entry.
*/
typedef struct
/*
* When using a GIN index for hstore, we choose to index both keys and values.
* The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values. (As of 9.1 it might be better to
+ * to indicate key, value, or null values. (As of 9.1 it might be better to
* store null values as nulls, but we'll keep it this way for on-disk
* compatibility.)
*/
{
/*
* Index doesn't have information about correspondence of keys and
- * values, so we need recheck. However, if not all the keys are
+ * values, so we need recheck. However, if not all the keys are
* present, we can fail at once.
*/
*recheck = true;
dst;
if (count == 0)
- PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
initStringInfo(&tmp);
initStringInfo(&dst);
dst;
if (count == 0)
- PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
initStringInfo(&tmp);
initStringInfo(&dst);
for (i = 0; i < count; i++)
{
- JsonbValue key, val;
+ JsonbValue key,
+ val;
key.estSize = sizeof(JEntry);
key.type = jbvString;
JsonbParseState *state = NULL;
JsonbValue *res;
StringInfoData tmp;
- bool is_number;
+ bool is_number;
initStringInfo(&tmp);
for (i = 0; i < count; i++)
{
- JsonbValue key, val;
+ JsonbValue key,
+ val;
key.estSize = sizeof(JEntry);
key.type = jbvString;
{
val.type = jbvNumeric;
val.val.numeric = DatumGetNumeric(
- DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+ DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+
val.estSize += VARSIZE_ANY(val.val.numeric) +sizeof(JEntry);
}
else
return FALSE;
/*
- * Set up data for checkcondition_gin. This must agree with the query
+ * Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
*size = (float) ARRNELEMS(a);
}
-/* Sort the given data (len >= 2). Return true if any duplicates found */
+/* Sort the given data (len >= 2). Return true if any duplicates found */
bool
isort(int32 *a, int len)
{
bool r = FALSE;
/*
- * We use a simple insertion sort. While this is O(N^2) in the worst
+ * We use a simple insertion sort. While this is O(N^2) in the worst
* case, it's quite fast if the input is already sorted or nearly so.
* Also, for not-too-large inputs it's faster than more complex methods
* anyhow.
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
- "select $outf from "
+ "select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
- * non-MCV population. Otherwise use the default selectivity for the
+ * non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,
}
/*
- * Dump all databases. There are no system objects to worry about.
+ * Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
/* now build the query */
todo = psprintf(
- "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
- "FROM pg_catalog.pg_class c \n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
- " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
- " pg_catalog.pg_tablespace t \n"
- "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
- " t.oid = CASE\n"
- " WHEN reltablespace <> 0 THEN reltablespace\n"
- " ELSE dattablespace\n"
- " END AND \n"
- " (%s) \n"
- "ORDER BY relname\n",
- opts->extended ? addfields : "",
- qualifiers);
+ "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+ "FROM pg_catalog.pg_class c \n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
+ " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
+ " pg_catalog.pg_tablespace t \n"
+ "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
+ " t.oid = CASE\n"
+ " WHEN reltablespace <> 0 THEN reltablespace\n"
+ " ELSE dattablespace\n"
+ " END AND \n"
+ " (%s) \n"
+ "ORDER BY relname\n",
+ opts->extended ? addfields : "",
+ qualifiers);
free(qualifiers);
/* pageinspect >= 1.2 uses pg_lsn instead of text for the LSN field. */
if (tupdesc->attrs[0]->atttypid == TEXTOID)
{
- char lsnchar[64];
+ char lsnchar[64];
+
snprintf(lsnchar, sizeof(lsnchar), "%X/%X",
(uint32) (lsn >> 32), (uint32) lsn);
values[0] = CStringGetTextDatum(lsnchar);
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that
+ * deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
{
/*
* Prints the name of the file to be removed and skips the
- * actual removal. The regular printout is so that the
+ * actual removal. The regular printout is so that the
* user can pipe the output into some other program.
*/
printf("%s\n", WALFilePath);
*
* The first argument is the relation to be prewarmed; the second controls
* how prewarming is done; legal options are 'prefetch', 'read', and 'buffer'.
- * The third is the name of the relation fork to be prewarmed. The fourth
+ * The third is the name of the relation fork to be prewarmed. The fourth
* and fifth arguments specify the first and last block to be prewarmed.
* If the fourth argument is NULL, it will be taken as 0; if the fifth argument
* is NULL, it will be taken as the number of blocks in the relation. The
* Track statement execution times across a whole database cluster.
*
* Execution costs are totalled for each distinct source query, and kept in
- * a shared hashtable. (We track only as many distinct queries as will fit
+ * a shared hashtable. (We track only as many distinct queries as will fit
* in the designated amount of shared memory.)
*
* As of Postgres 9.2, this module normalizes query entries. Normalization
*
* Normalization is implemented by fingerprinting queries, selectively
* serializing those fields of each query tree's nodes that are judged to be
- * essential to the query. This is referred to as a query jumble. This is
+ * essential to the query. This is referred to as a query jumble. This is
* distinct from a regular serialization in that various extraneous
* information is ignored as irrelevant or not essential to the query, such
* as the collations of Vars and, most notably, the values of constants.
* because we remove that file on startup; it acts inversely to
* PGSS_DUMP_FILE, in that it is only supposed to be around when the
* server is running, whereas PGSS_DUMP_FILE is only supposed to be around
- * when the server is not running. Leaving the file creates no danger of
+ * when the server is not running. Leaving the file creates no danger of
* a newly restored database having a spurious record of execution costs,
* which is what we're really concerned about here.
*/
/*
* When serializing to disk, we store query texts immediately after their
- * entry data. Any orphaned query texts are thereby excluded.
+ * entry data. Any orphaned query texts are thereby excluded.
*/
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
/*
* We'd like to load the query text file (if needed) while not holding any
- * lock on pgss->lock. In the worst case we'll have to do this again
+ * lock on pgss->lock. In the worst case we'll have to do this again
* after we have the lock, but it's unlikely enough to make this a win
- * despite occasional duplicated work. We need to reload if anybody
+ * despite occasional duplicated work. We need to reload if anybody
* writes to the file (either a retail qtext_store(), or a garbage
* collection) between this point and where we've gotten shared lock. If
* a qtext_store is actually in progress when we look, we might as well
* would be difficult to demonstrate this even under artificial conditions.)
*
* Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist. This is because pgss_store releases and
+ * entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/
* have it handy, so we require them to pass it too.
*
* If successful, returns true, and stores the new entry's offset in the file
- * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
+ * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
* number of garbage collections that have occurred so far.
*
* On failure, returns false.
*
* At least a shared lock on pgss->lock must be held by the caller, so as
- * to prevent a concurrent garbage collection. Share-lock-holding callers
+ * to prevent a concurrent garbage collection. Share-lock-holding callers
* should pass a gc_count pointer to obtain the number of garbage collections,
* so that they can recheck the count after obtaining exclusive lock to
* detect whether a garbage collection occurred (and removed this entry).
/*
* When called from pgss_store, some other session might have proceeded
* with garbage collection in the no-lock-held interim of lock strength
- * escalation. Check once more that this is actually necessary.
+ * escalation. Check once more that this is actually necessary.
*/
if (!need_gc_qtexts())
return;
}
/*
- * Truncate away any now-unused space. If this fails for some odd reason,
+ * Truncate away any now-unused space. If this fails for some odd reason,
* we log it, but there's no need to fail.
*/
if (ftruncate(fileno(qfile), extent) != 0)
*
* Note: the reason we don't simply use expression_tree_walker() is that the
* point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types. We should complain
+ * most tree node types, but here we care about all types. We should complain
* about any unrecognized node type.
*/
static void
* a problem.
*
* Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored. (Actually, we assume the
+ * marked as '-1', so that they are later ignored. (Actually, we assume the
* lengths were initialized as -1 to start with, and don't change them here.)
*
* N.B. There is an assumption that a '-' character at a Const location begins
* adjustment of location to that of the leading '-'
* operator in the event of a negative constant. It is
* also useful for our purposes to start from the minus
- * symbol. In this way, queries like "select * from foo
+ * symbol. In this way, queries like "select * from foo
* where bar = 1" and "select * from foo where bar = -2"
* will have identical normalized query strings.
*/
{
for (writes = 0; writes < writes_per_op; writes++)
if (write(tmpfile, buf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
- /*
- * This can generate write failures if the filesystem
- * has a large block size, e.g. 4k, and there is no
- * support for O_DIRECT writes smaller than the
- * file system block size, e.g. XFS.
- */
+
+ /*
+ * This can generate write failures if the filesystem has
+ * a large block size, e.g. 4k, and there is no support
+ * for O_DIRECT writes smaller than the file system block
+ * size, e.g. XFS.
+ */
die("write failed");
if (lseek(tmpfile, 0, SEEK_SET) == -1)
die("seek failed");
{
/*
* Successful regex processing: store NFA-like graph as
- * extra_data. GIN API requires an array of nentries
+ * extra_data. GIN API requires an array of nentries
* Pointers, but we just put the same value in each element.
*/
trglen = ARRNELEM(trg);
/*
* GETBIT() tests may give false positives, due to limited
- * size of the sign array. But since trigramsMatchGraph()
+ * size of the sign array. But since trigramsMatchGraph()
* implements a monotone boolean function, false positives
* in the check array can't lead to false negative answer.
* So we can apply trigramsMatchGraph despite uncertainty,
* In the 2nd stage, the automaton is transformed into a graph based on the
* original NFA. Each state in the expanded graph represents a state from
* the original NFA, plus a prefix identifying the last two characters
- * (colors, to be precise) seen before entering the state. There can be
+ * (colors, to be precise) seen before entering the state. There can be
* multiple states in the expanded graph for each state in the original NFA,
* depending on what characters can precede it. A prefix position can be
* "unknown" if it's uncertain what the preceding character was, or "blank"
* "enter key".
*
* Each arc of the expanded graph is labelled with a trigram that must be
- * present in the string to match. We can construct this from an out-arc of
+ * present in the string to match. We can construct this from an out-arc of
* the underlying NFA state by combining the expanded state's prefix with the
* color label of the underlying out-arc, if neither prefix position is
* "unknown". But note that some of the colors in the trigram might be
*
* When building the graph, if the number of states or arcs exceed pre-defined
* limits, we give up and simply mark any states not yet processed as final
- * states. Roughly speaking, that means that we make use of some portion from
+ * states. Roughly speaking, that means that we make use of some portion from
* the beginning of the regexp. Also, any colors that have too many member
* characters are treated as "unknown", so that we can't derive trigrams
* from them.
* 1) Create state 1 with enter key (UNKNOWN, UNKNOWN, 1).
* 2) Add key (UNKNOWN, "a", 2) to state 1.
* 3) Add key ("a", "b", 3) to state 1.
- * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
+ * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
* from state 1 to state 2 with label trigram "abc".
* 5) Mark state 2 final because state 4 of source NFA is marked as final.
- * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
+ * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
* from state 1 to state 3 with label trigram "abd".
* 7) Mark state 3 final because state 5 of source NFA is marked as final.
*
*
* We call a prefix ambiguous if at least one of its colors is unknown. It's
* fully ambiguous if both are unknown, partially ambiguous if only the first
- * is unknown. (The case of first color known, second unknown is not valid.)
+ * is unknown. (The case of first color known, second unknown is not valid.)
*
* Wholly- or partly-blank prefixes are mostly handled the same as regular
- * color prefixes. This allows us to generate appropriate partly-blank
+ * color prefixes. This allows us to generate appropriate partly-blank
* trigrams when the NFA requires word character(s) to appear adjacent to
* non-word character(s).
*/
/*
* Key identifying a state of our expanded graph: color prefix, and number
- * of the corresponding state in the underlying regex NFA. The color prefix
+ * of the corresponding state in the underlying regex NFA. The color prefix
* shows how we reached the regex state (to the extent that we know it).
*/
typedef struct
* colorTrigramsCount and colorTrigramsGroups contain information about
* how trigrams are grouped into color trigrams. "colorTrigramsCount" is
* the count of color trigrams and "colorTrigramGroups" contains number of
- * simple trigrams for each color trigram. The array of simple trigrams
+ * simple trigrams for each color trigram. The array of simple trigrams
* (stored separately from this struct) is ordered so that the simple
* trigrams for each color trigram are consecutive, and they're in order
* by color trigram number.
/*
* This processing generates a great deal of cruft, which we'd like to
* clean up before returning (since this function may be called in a
- * query-lifespan memory context). Make a temp context we can work in so
+ * query-lifespan memory context). Make a temp context we can work in so
* that cleanup is easy.
*/
tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
/*
* We can ignore the NUL character, since it can never appear in a PG text
- * string. This avoids the need for various special cases when
+ * string. This avoids the need for various special cases when
* reconstructing trigrams.
*/
if (c == 0)
pg_wchar2mb_with_len(&c, s, 1);
/*
- * In IGNORECASE mode, we can ignore uppercase characters. We assume that
+ * In IGNORECASE mode, we can ignore uppercase characters. We assume that
* the regex engine generated both uppercase and lowercase equivalents
* within each color, since we used the REG_ICASE option; so there's no
* need to process the uppercase version.
/*
* Recursively build the expanded graph by processing queue of states
- * (breadth-first search). getState already put initstate in the queue.
+ * (breadth-first search). getState already put initstate in the queue.
*/
while (trgmNFA->queue != NIL)
{
trgmNFA->queue = list_delete_first(trgmNFA->queue);
/*
- * If we overflowed then just mark state as final. Otherwise do
+ * If we overflowed then just mark state as final. Otherwise do
* actual processing.
*/
if (trgmNFA->overflowed)
/*
* Add state's own key, and then process all keys added to keysQueue until
- * queue is empty. But we can quit if the state gets marked final.
+ * queue is empty. But we can quit if the state gets marked final.
*/
addKey(trgmNFA, state, &state->stateKey);
while (trgmNFA->keysQueue != NIL && !state->fin)
/*
* Compare key to each existing enter key of the state to check for
- * redundancy. We can drop either old key(s) or the new key if we find
+ * redundancy. We can drop either old key(s) or the new key if we find
* redundancy.
*/
prev = NULL;
else if (pg_reg_colorisend(trgmNFA->regex, arc->co))
{
/*
- * End of line/string ($). We must consider this arc as a
+ * End of line/string ($). We must consider this arc as a
* transition that doesn't read anything. The reason for adding
* this enter key to the state is that if the arc leads to the
* NFA's final state, we must mark this expanded state as final.
* We can reach the arc destination after reading a word
* character, but the prefix is not something that addArc
* will accept, so no trigram arc can get made for this
- * transition. We must make an enter key to show that the
+ * transition. We must make an enter key to show that the
* arc destination is reachable. The prefix for the enter
* key should reflect the info we have for this arc.
*/
else
{
/*
- * Unexpandable color. Add enter key with ambiguous prefix,
+ * Unexpandable color. Add enter key with ambiguous prefix,
* showing we can reach the destination from this state, but
- * the preceding colors will be uncertain. (We do not set the
+ * the preceding colors will be uncertain. (We do not set the
* first prefix color to key->prefix.colors[1], because a
* prefix of known followed by unknown is invalid.)
*/
return false;
/*
- * We also reject nonblank-blank-anything. The nonblank-blank-nonblank
+ * We also reject nonblank-blank-anything. The nonblank-blank-nonblank
* case doesn't correspond to any trigram the trigram extraction code
- * would make. The nonblank-blank-blank case is also not possible with
+ * would make. The nonblank-blank-blank case is also not possible with
* RPADDING = 1. (Note that in many cases we'd fail to generate such a
* trigram even if it were valid, for example processing "foo bar" will
* not result in considering the trigram "o ". So if you want to support
/*
* Remove color trigrams from the graph so long as total penalty of color
- * trigrams exceeds WISH_TRGM_PENALTY. (If we fail to get down to
+ * trigrams exceeds WISH_TRGM_PENALTY. (If we fail to get down to
* WISH_TRGM_PENALTY, it's OK so long as total count is no more than
* MAX_TRGM_COUNT.) We prefer to remove color trigrams with higher
* penalty, since those are the most promising for reducing the total
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n",
- PG_MAJORVERSION);
+ PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 &&
new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER)
pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
- "because of backend API changes made during development.\n");
+ "because of backend API changes made during development.\n");
/* We read the real port number for PG >= 9.1 */
if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
old_cluster.port == DEF_PGUPORT)
pg_fatal("When checking a pre-PG 9.1 live old server, "
- "you must specify the old server's port number.\n");
+ "you must specify the old server's port number.\n");
if (live_check && old_cluster.port == new_cluster.port)
pg_fatal("When checking a live server, "
- "the old and new port numbers must be different.\n");
+ "the old and new port numbers must be different.\n");
}
return (pg_strcasecmp(loca, locb) == 0);
/*
- * Compare the encoding parts. Windows tends to use code page numbers for
+ * Compare the encoding parts. Windows tends to use code page numbers for
* the encoding part, which equivalent_encoding() won't like, so accept if
* the strings are case-insensitive equal; otherwise use
* equivalent_encoding() to compare.
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
pg_fatal("New cluster database \"%s\" is not empty\n",
- new_cluster.dbarr.dbs[dbnum].db_name);
+ new_cluster.dbarr.dbs[dbnum].db_name);
}
}
if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ *analyze_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
#ifndef WIN32
if (chmod(*analyze_script_file_name, S_IRWXU) != 0)
pg_fatal("Could not add execute permission to file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ *analyze_script_file_name, getErrorText(errno));
#endif
if (os_info.user_specified)
if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ *deletion_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
}
else
{
- char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
+ char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
/*
* Simply delete the tablespace directory, which might be ".old"
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_fatal("Could not add execute permission to file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ *deletion_script_file_name, getErrorText(errno));
#endif
check_ok();
if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
pg_fatal("database user \"%s\" is not a superuser\n",
- os_info.user);
+ os_info.user);
cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));
if (PQntuples(res) != 0)
pg_fatal("The %s cluster contains prepared transactions\n",
- CLUSTER_NAME(cluster));
+ CLUSTER_NAME(cluster));
PQclear(res);
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
"bigint data type. Your old and new clusters pass bigint values\n"
"differently so this cluster cannot currently be upgraded. You can\n"
- "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
- "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
- "the problem functions is in the file:\n"
- " %s\n\n", output_path);
+ "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
+ "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
+ "the problem functions is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
- "remove the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "remove the problem tables and restart the upgrade. A list of the problem\n"
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_fatal("Could not get pg_ctl version data using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
pclose(output);
* pg_control data. pg_resetxlog cannot be run while the server is running
* so we use pg_controldata; pg_controldata doesn't provide all the fields
* we need to actually perform the upgrade, but it provides enough for
- * check mode. We do not implement pg_resetxlog -n because it is hard to
+ * check mode. We do not implement pg_resetxlog -n because it is hard to
* return valid xid data for a running server.
*/
void
if ((output = popen(cmd, "r")) == NULL)
pg_fatal("Could not get control data using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
/* Only pre-8.4 has these so if they are not set below we will check later */
cluster->controldata.lc_collate = NULL;
for (p = bufin; *p; p++)
if (!isascii(*p))
pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
- "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
- "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
+ "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
+ "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
}
#endif
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_fatal("You will need to rebuild the new server with configure option\n"
- "--disable-integer-datetimes or get server binaries built with those\n"
- "options.\n");
+ "--disable-integer-datetimes or get server binaries built with those\n"
+ "options.\n");
}
/*
/*
* Set umask for this function, all functions it calls, and all
- * subprocesses/threads it creates. We can't use fopen_priv()
- * as Windows uses threads and umask is process-global.
+ * subprocesses/threads it creates. We can't use fopen_priv() as Windows
+ * uses threads and umask is process-global.
*/
old_umask = umask(S_IRWXG | S_IRWXO);
va_list ap;
#ifdef WIN32
-static DWORD mainThreadId = 0;
+ static DWORD mainThreadId = 0;
/* We assume we are called from the primary thread first */
if (mainThreadId == 0)
pg_log(PG_VERBOSE, "%s\n", cmd);
#ifdef WIN32
+
/*
- * For some reason, Windows issues a file-in-use error if we write data
- * to the log file from a non-primary thread just before we create a
- * subprocess that also writes to the same log file. One fix is to
- * sleep for 100ms. A cleaner fix is to write to the log file _after_
- * the subprocess has completed, so we do this only when writing from
- * a non-primary thread. fflush(), running system() twice, and
- * pre-creating the file do not see to help.
+ * For some reason, Windows issues a file-in-use error if we write data to
+ * the log file from a non-primary thread just before we create a
+ * subprocess that also writes to the same log file. One fix is to sleep
+ * for 100ms. A cleaner fix is to write to the log file _after_ the
+ * subprocess has completed, so we do this only when writing from a
+ * non-primary thread. fflush(), running system() twice, and pre-creating
+ * the file do not see to help.
*/
if (mainThreadId != GetCurrentThreadId())
result = system(cmd);
for (iter = 0; iter < 4 && log == NULL; iter++)
{
- pg_usleep(1000000); /* 1 sec */
+ pg_usleep(1000000); /* 1 sec */
log = fopen(log_file, "a");
}
}
}
#ifndef WIN32
+
/*
* We can't do this on Windows because it will keep the "pg_ctl start"
* output filename open until the server stops, so we do the \n\n above on
* that platform. We use a unique filename for "pg_ctl start" that is
- * never reused while the server is running, so it works fine. We could
+ * never reused while the server is running, so it works fine. We could
* log these commands to a third file, but that just adds complexity.
*/
if ((log = fopen(log_file, "a")) == NULL)
/* ENOTDIR means we will throw a more useful error later */
if (errno != ENOENT && errno != ENOTDIR)
pg_fatal("could not open file \"%s\" for reading: %s\n",
- path, getErrorText(errno));
+ path, getErrorText(errno));
return false;
}
int fd;
/*
- * We open a file we would normally create anyway. We do this even in
+ * We open a file we would normally create anyway. We do this even in
* 'check' mode, which isn't ideal, but this is the best we can do.
*/
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
- * directory. If any of the subdirectories are missing (or secured against
+ * directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
* check_bin_dir()
*
* This function searches for the executables that we expect to find
- * in the binaries directory. If we find that a required executable
+ * in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/
*/
if (stat(path, &buf) < 0)
pg_fatal("check for \"%s\" failed: %s\n",
- path, getErrorText(errno));
+ path, getErrorText(errno));
else if (!S_ISREG(buf.st_mode))
pg_fatal("check for \"%s\" failed: not an executable file\n",
- path);
+ path);
/*
* Ensure that the file is both executable and readable (required for
if ((buf.st_mode & S_IRUSR) == 0)
#endif
pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n",
- path);
+ path);
#ifndef WIN32
if (access(path, X_OK) != 0)
if ((buf.st_mode & S_IXUSR) == 0)
#endif
pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n",
- path);
+ path);
}
if (pg_link_file(existing_file, new_link_file) == -1)
{
pg_fatal("Could not create hard link between old and new data directories: %s\n"
- "In link mode the old and new data directories must be on the same file system volume.\n",
- getErrorText(errno));
+ "In link mode the old and new data directories must be on the same file system volume.\n",
+ getErrorText(errno));
}
unlink(new_link_file);
}
* plpython2u language was created with library name plpython2.so as a
* symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u
- * pointing to it. For this reason, any reference to library name
+ * pointing to it. For this reason, any reference to library name
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* the new cluster.
*
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
fprintf(script, "Could not load library \"%s\"\n%s\n",
lib,
PQerrorMessage(conn));
fclose(script);
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation references loadable libraries that are missing from the\n"
- "new installation. You can add these libraries to the new installation,\n"
- "or remove the functions using them from the old installation. A list of\n"
- "problem libraries is in the file:\n"
- " %s\n\n", output_path);
+ "new installation. You can add these libraries to the new installation,\n"
+ "or remove the functions using them from the old installation. A list of\n"
+ "problem libraries is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
if (old_rel->reloid != new_rel->reloid)
pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n",
- old_db->db_name, old_rel->reloid, new_rel->reloid);
+ old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
* TOAST table names initially match the heap pg_class oid. In
strcmp(old_rel->nspname, "pg_toast") != 0) &&
strcmp(old_rel->relname, new_rel->relname) != 0))
pg_fatal("Mismatch of relation names in database \"%s\": "
- "old name \"%s.%s\", new name \"%s.%s\"\n",
- old_db->db_name, old_rel->nspname, old_rel->relname,
- new_rel->nspname, new_rel->relname);
+ "old name \"%s.%s\", new name \"%s.%s\"\n",
+ old_db->db_name, old_rel->nspname, old_rel->relname,
+ new_rel->nspname, new_rel->relname);
create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
old_rel, new_rel, maps + num_maps);
*/
if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
pg_fatal("old and new databases \"%s\" have a different number of relations\n",
- old_db->db_name);
+ old_db->db_name);
*nmaps = num_maps;
return maps;
i_relfilenode,
i_reltablespace;
char query[QUERY_ALLOC];
- char *last_namespace = NULL, *last_tablespace = NULL;
+ char *last_namespace = NULL,
+ *last_tablespace = NULL;
/*
* pg_largeobject contains user data that does not appear in pg_dumpall
"SELECT reltoastrelid "
"FROM info_rels i JOIN pg_catalog.pg_class c "
" ON i.reloid = c.oid "
- " AND c.reltoastrelid != %u", InvalidOid));
+ " AND c.reltoastrelid != %u", InvalidOid));
PQclear(executeQueryOrDie(conn,
"INSERT INTO info_rels "
"SELECT indexrelid "
curr->nsp_alloc = false;
/*
- * Many of the namespace and tablespace strings are identical,
- * so we try to reuse the allocated string pointers where possible
- * to reduce memory consumption.
+ * Many of the namespace and tablespace strings are identical, so we
+ * try to reuse the allocated string pointers where possible to reduce
+ * memory consumption.
*/
/* Can we reuse the previous string allocation? */
if (last_namespace && strcmp(nspname, last_namespace) == 0)
default:
pg_fatal("Try \"%s --help\" for more information.\n",
- os_info.progname);
+ os_info.progname);
break;
}
}
/* Turn off read-only mode; add prefix to PGOPTIONS? */
if (getenv("PGOPTIONS"))
{
- char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
- getenv("PGOPTIONS"));
+ char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
+ getenv("PGOPTIONS"));
+
pg_putenv("PGOPTIONS", pgoptions);
pfree(pgoptions);
}
}
else
pg_fatal("You must identify the directory where the %s.\n"
- "Please use the %s command-line option or the %s environment variable.\n",
- description, cmdLineOption, envVarName);
+ "Please use the %s command-line option or the %s environment variable.\n",
+ description, cmdLineOption, envVarName);
}
/*
/*
* We don't have a data directory yet, so we can't check the PG version,
- * so this might fail --- only works for PG 9.2+. If this fails,
+ * so this might fail --- only works for PG 9.2+. If this fails,
* pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_fatal("Could not get data directory using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
pclose(output);
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
- * to the new format. If the versions are identical, this function just
+ * to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
*/
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
- * newPageVersion and oldPageVersion. If a plugin is found, this
+ * newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.
thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
/*
- * Move last active thead arg struct into the now-dead slot,
- * and the now-dead slot to the end for reuse by the next thread.
- * Though the thread struct is in use by another thread, we can
- * safely swap the struct pointers within the array.
+ * Move last active thead arg struct into the now-dead slot, and the
+ * now-dead slot to the end for reuse by the next thread. Though the
+ * thread struct is in use by another thread, we can safely swap the
+ * struct pointers within the array.
*/
tmp_args = cur_thread_args[thread_num];
cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
/*
* Most failures happen in create_new_objects(), which has completed at
- * this point. We do this here because it is just before linking, which
+ * this point. We do this here because it is just before linking, which
* will link the old and new cluster data files, preventing the old
* cluster from being safely started once the new cluster is started.
*/
{
/*
* If we have a postmaster.pid file, try to start the server. If it
- * starts, the pid file was stale, so stop the server. If it doesn't
+ * starts, the pid file was stale, so stop the server. If it doesn't
* start, assume the server is running. If the pid file is left over
* from a server crash, this also allows any committed transactions
* stored in the WAL to be replayed so they are not lost, because WAL
{
if (!user_opts.check)
pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ "Please shutdown that postmaster and try again.\n");
else
*live_check = true;
}
stop_postmaster(false);
else
pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ "Please shutdown that postmaster and try again.\n");
}
/* get path to pg_upgrade executable */
/*
* Install support functions in the global-object restore database to
- * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
- * database so objects we add into 'template1' are not propogated. They
+ * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
+ * database so objects we add into 'template1' are not propogated. They
* are removed on pg_upgrade exit.
*/
install_support_functions_in_new_db("template1");
*/
typedef struct
{
- const char *old_tablespace;
- const char *new_tablespace;
- const char *old_tablespace_suffix;
- const char *new_tablespace_suffix;
+ const char *old_tablespace;
+ const char *new_tablespace;
+ const char *old_tablespace_suffix;
+ const char *new_tablespace_suffix;
Oid old_db_oid;
Oid new_db_oid;
{
Oid db_oid; /* oid of the database */
char *db_name; /* database name */
- char db_tablespace[MAXPGPATH]; /* database default tablespace path */
+ char db_tablespace[MAXPGPATH]; /* database default tablespace
+ * path */
RelInfoArr rel_arr; /* array of all user relinfos */
} DbInfo;
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
pg_fatal(const char *fmt,...)
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2),noreturn));
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2), noreturn));
void end_progress_output(void);
void
prep_status(const char *fmt,...)
/*
* Transfering files by tablespace is tricky because a single database can
* use multiple tablespaces. For non-parallel mode, we just pass a NULL
- * tablespace path, which matches all tablespaces. In parallel mode, we
+ * tablespace path, which matches all tablespaces. In parallel mode, we
* pass the default tablespace and all user-created tablespaces and let
* those operations happen in parallel.
*/
if (new_dbnum >= new_db_arr->ndbs)
pg_fatal("old database \"%s\" not found in the new cluster\n",
- old_db->db_name);
+ old_db->db_name);
n_maps = 0;
mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
/*
* get_pg_database_relfilenode()
*
- * Retrieves the relfilenode for a few system-catalog tables. We need these
+ * Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void
return;
else
pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file,
- getErrorText(errno));
+ map->nspname, map->relname, old_file, new_file,
+ getErrorText(errno));
}
close(fd);
}
if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_fatal("This upgrade requires page-by-page conversion, "
- "you must use copy mode instead of link mode.\n");
+ "you must use copy mode instead of link mode.\n");
if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
{
if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file, msg);
+ map->nspname, map->relname, old_file, new_file, msg);
}
else
{
if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file, msg);
+ map->nspname, map->relname, old_file, new_file, msg);
}
}
return false;
/*
- * We set this here to make sure atexit() shuts down the server,
- * but only if we started the server successfully. We do it
- * before checking for connectivity in case the server started but
- * there is a connectivity failure. If pg_ctl did not return success,
- * we will exit below.
+ * We set this here to make sure atexit() shuts down the server, but only
+ * if we started the server successfully. We do it before checking for
+ * connectivity in case the server started but there is a connectivity
+ * failure. If pg_ctl did not return success, we will exit below.
*
* Pre-9.1 servers do not have PQping(), so we could be leaving the server
- * running if authentication was misconfigured, so someday we might went to
- * be more aggressive about doing server shutdowns even if pg_ctl fails,
- * but now (2013-08-14) it seems prudent to be cautious. We don't want to
- * shutdown a server that might have been accidentally started during the
- * upgrade.
+ * running if authentication was misconfigured, so someday we might went
+ * to be more aggressive about doing server shutdowns even if pg_ctl
+ * fails, but now (2013-08-14) it seems prudent to be cautious. We don't
+ * want to shutdown a server that might have been accidentally started
+ * during the upgrade.
*/
if (pg_ctl_return)
os_info.running_cluster = cluster;
/*
- * pg_ctl -w might have failed because the server couldn't be started,
- * or there might have been a connection problem in _checking_ if the
- * server has started. Therefore, even if pg_ctl failed, we continue
- * and test for connectivity in case we get a connection reason for the
- * failure.
+ * pg_ctl -w might have failed because the server couldn't be started, or
+ * there might have been a connection problem in _checking_ if the server
+ * has started. Therefore, even if pg_ctl failed, we continue and test
+ * for connectivity in case we get a connection reason for the failure.
*/
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
PQstatus(conn) != CONNECTION_OK)
if (conn)
PQfinish(conn);
pg_fatal("could not connect to %s postmaster started with the command:\n"
- "%s\n",
- CLUSTER_NAME(cluster), cmd);
+ "%s\n",
+ CLUSTER_NAME(cluster), cmd);
}
PQfinish(conn);
/*
* If pg_ctl failed, and the connection didn't fail, and throw_error is
- * enabled, fail now. This could happen if the server was already running.
+ * enabled, fail now. This could happen if the server was already
+ * running.
*/
if (!pg_ctl_return)
pg_fatal("pg_ctl failed to start the %s server, or connection failed\n",
- CLUSTER_NAME(cluster));
+ CLUSTER_NAME(cluster));
return true;
}
(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
strcmp(value, "::1") != 0 && value[0] != '/'))
pg_fatal("libpq environment variable %s has a non-local server value: %s\n",
- option->envvar, value);
+ option->envvar, value);
}
}
if (os_info.num_old_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
- "using tablespaces.\n");
+ "using tablespaces.\n");
}
* Effectively, this is checking only for tables/indexes in
* non-existent tablespace directories. Databases located in
* non-existent tablespaces already throw a backend error.
- * Non-existent tablespace directories can occur when a data
- * directory that contains user tablespaces is moved as part
- * of pg_upgrade preparation and the symbolic links are not
- * updated.
+ * Non-existent tablespace directories can occur when a data directory
+ * that contains user tablespaces is moved as part of pg_upgrade
+ * preparation and the symbolic links are not updated.
*/
if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
{
os_info.old_tablespaces[tblnum]);
else
report_status(PG_FATAL,
- "cannot stat() tablespace directory \"%s\": %s\n",
- os_info.old_tablespaces[tblnum], getErrorText(errno));
+ "cannot stat() tablespace directory \"%s\": %s\n",
+ os_info.old_tablespaces[tblnum], getErrorText(errno));
}
if (!S_ISDIR(statBuf.st_mode))
- report_status(PG_FATAL,
- "tablespace path \"%s\" is not a directory\n",
- os_info.old_tablespaces[tblnum]);
+ report_status(PG_FATAL,
+ "tablespace path \"%s\" is not a directory\n",
+ os_info.old_tablespaces[tblnum]);
}
PQclear(res);
static
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
void
pg_log_v(eLogType type, const char *fmt, va_list ap)
{
/*
* Do not free envstr because it becomes part of the environment on
- * some operating systems. See port/unsetenv.c::unsetenv.
+ * some operating systems. See port/unsetenv.c::unsetenv.
*/
#else
SetEnvironmentVariableA(var, val);
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"name\" data type in user tables. This\n"
"data type changed its internal alignment between your old and new\n"
- "clusters so this cluster cannot currently be upgraded. You can remove\n"
+ "clusters so this cluster cannot currently be upgraded. You can remove\n"
"the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"tsquery\" data type. This data type\n"
- "added a new internal field between your old and new clusters so this\n"
+ "added a new internal field between your old and new clusters so this\n"
"cluster cannot currently be upgraded. You can remove the problem\n"
- "columns and restart the upgrade. A list of the problem columns is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ "columns and restart the upgrade. A list of the problem columns is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"ltree\" data type. This data type\n"
- "changed its internal storage format between your old and new clusters so this\n"
- "cluster cannot currently be upgraded. You can manually upgrade databases\n"
- "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
- "cluster and restart the upgrade. A list of the problem functions is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ "changed its internal storage format between your old and new clusters so this\n"
+ "cluster cannot currently be upgraded. You can manually upgrade databases\n"
+ "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
+ "cluster and restart the upgrade. A list of the problem functions is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
/*
* Stopgap implementation of timestamptz_to_str that doesn't depend on backend
- * infrastructure. This will work for timestamps that are within the range
+ * infrastructure. This will work for timestamps that are within the range
* of the platform time_t type. (pg_time_t is compatible except for possibly
* being wider.)
*
break;
else
{
- pg_usleep(1000000L); /* 1 second */
+ pg_usleep(1000000L); /* 1 second */
continue;
}
}
bool use_quiet; /* quiet logging onto stderr */
int agg_interval; /* log aggregates instead of individual
* transactions */
-int progress = 0; /* thread progress report every this seconds */
-int progress_nclients = 0; /* number of clients for progress report */
-int progress_nthreads = 0; /* number of threads for progress report */
+int progress = 0; /* thread progress report every this seconds */
+int progress_nclients = 0; /* number of clients for progress
+ * report */
+int progress_nthreads = 0; /* number of threads for progress
+ * report */
bool is_connect; /* establish connection for each transaction */
bool is_latencies; /* report per-command latencies */
int main_pid; /* main process id used in log filename */
int listen; /* 0 indicates that an async query has been
* sent */
int sleeping; /* 1 indicates that the client is napping */
- bool throttling; /* whether nap is for throttling */
+ bool throttling; /* whether nap is for throttling */
int64 until; /* napping until (usec) */
Variable *variables; /* array of variable definitions */
int nvariables;
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
int *exec_count; /* number of cmd executions (per Command) */
unsigned short random_state[3]; /* separate randomness for each thread */
- int64 throttle_trigger; /* previous/next throttling (us) */
- int64 throttle_lag; /* total transaction lag behind throttling */
- int64 throttle_lag_max; /* max transaction lag */
+ int64 throttle_trigger; /* previous/next throttling (us) */
+ int64 throttle_lag; /* total transaction lag behind throttling */
+ int64 throttle_lag_max; /* max transaction lag */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
int xacts;
int64 latencies;
int64 sqlats;
- int64 throttle_lag;
- int64 throttle_lag_max;
+ int64 throttle_lag;
+ int64 throttle_lag_max;
} TResult;
/*
"\nInitialization options:\n"
" -i, --initialize invokes initialization mode\n"
" -F, --fillfactor=NUM set fill factor\n"
- " -n, --no-vacuum do not run VACUUM after initialization\n"
- " -q, --quiet quiet logging (one message each 5 seconds)\n"
+ " -n, --no-vacuum do not run VACUUM after initialization\n"
+ " -q, --quiet quiet logging (one message each 5 seconds)\n"
" -s, --scale=NUM scaling factor\n"
" --foreign-keys create foreign key constraints between tables\n"
" --index-tablespace=TABLESPACE\n"
- " create indexes in the specified tablespace\n"
- " --tablespace=TABLESPACE create tables in the specified tablespace\n"
+ " create indexes in the specified tablespace\n"
+ " --tablespace=TABLESPACE create tables in the specified tablespace\n"
" --unlogged-tables create tables as unlogged tables\n"
"\nBenchmarking options:\n"
" -c, --client=NUM number of concurrent database clients (default: 1)\n"
" -C, --connect establish new connection for each transaction\n"
" -D, --define=VARNAME=VALUE\n"
- " define variable for use by custom script\n"
- " -f, --file=FILENAME read transaction script from FILENAME\n"
+ " define variable for use by custom script\n"
+ " -f, --file=FILENAME read transaction script from FILENAME\n"
" -j, --jobs=NUM number of threads (default: 1)\n"
" -l, --log write transaction times to log file\n"
" -M, --protocol=simple|extended|prepared\n"
" -N, --skip-some-updates skip updates of pgbench_tellers and pgbench_branches\n"
" -P, --progress=NUM show thread progress report every NUM seconds\n"
" -r, --report-latencies report average latency per command\n"
- " -R, --rate=NUM target rate in transactions per second\n"
+ " -R, --rate=NUM target rate in transactions per second\n"
" -s, --scale=NUM report this scale factor in output\n"
" -S, --select-only perform SELECT-only transactions\n"
" -t, --transactions=NUM number of transactions each client runs (default: 10)\n"
- " -T, --time=NUM duration of benchmark test in seconds\n"
+ " -T, --time=NUM duration of benchmark test in seconds\n"
" -v, --vacuum-all vacuum all four standard tables before tests\n"
" --aggregate-interval=NUM aggregate data over NUM seconds\n"
" --sampling-rate=NUM fraction of transactions to log (e.g. 0.01 for 1%%)\n"
"\nCommon options:\n"
" -d, --debug print debugging output\n"
- " -h, --host=HOSTNAME database server host or socket directory\n"
+ " -h, --host=HOSTNAME database server host or socket directory\n"
" -p, --port=PORT database server port number\n"
" -U, --username=USERNAME connect as specified database user\n"
- " -V, --version output version information, then exit\n"
+ " -V, --version output version information, then exit\n"
" -?, --help show this help, then exit\n"
"\n"
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
{
PGresult *res;
Command **commands;
- bool trans_needs_throttle = false;
+ bool trans_needs_throttle = false;
top:
commands = sql_files[st->use_file];
/*
- * Handle throttling once per transaction by sleeping. It is simpler
- * to do this here rather than at the end, because so much complicated
- * logic happens below when statements finish.
+ * Handle throttling once per transaction by sleeping. It is simpler to
+ * do this here rather than at the end, because so much complicated logic
+ * happens below when statements finish.
*/
- if (throttle_delay && ! st->is_throttled)
+ if (throttle_delay && !st->is_throttled)
{
/*
* Use inverse transform sampling to randomly generate a delay, such
* that the series of delays will approximate a Poisson distribution
* centered on the throttle_delay time.
*
- * 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay multiplier,
- * and results in a 0.055 % target underestimation bias:
+ * 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay
+ * multiplier, and results in a 0.055 % target underestimation bias:
*
* SELECT 1.0/AVG(-LN(i/10000.0)) FROM generate_series(1,10000) AS i;
* = 1.000552717032611116335474
*
- * If transactions are too slow or a given wait is shorter than
- * a transaction, the next transaction will start right away.
+ * If transactions are too slow or a given wait is shorter than a
+ * transaction, the next transaction will start right away.
*/
- int64 wait = (int64) (throttle_delay *
- 1.00055271703 * -log(getrand(thread, 1, 10000)/10000.0));
+ int64 wait = (int64) (throttle_delay *
+ 1.00055271703 * -log(getrand(thread, 1, 10000) / 10000.0));
thread->throttle_trigger += wait;
st->throttling = true;
st->is_throttled = true;
if (debug)
- fprintf(stderr, "client %d throttling "INT64_FORMAT" us\n",
+ fprintf(stderr, "client %d throttling " INT64_FORMAT " us\n",
st->id, wait);
}
if (st->sleeping)
{ /* are we sleeping? */
instr_time now;
- int64 now_us;
+ int64 now_us;
INSTR_TIME_SET_CURRENT(now);
now_us = INSTR_TIME_GET_MICROSEC(now);
if (st->throttling)
{
/* Measure lag of throttled transaction relative to target */
- int64 lag = now_us - st->until;
+ int64 lag = now_us - st->until;
+
thread->throttle_lag += lag;
if (lag > thread->throttle_lag_max)
thread->throttle_lag_max = lag;
INSTR_TIME_SUBTRACT(diff, st->txn_begin);
latency = INSTR_TIME_GET_MICROSEC(diff);
st->txn_latencies += latency;
+
/*
* XXX In a long benchmark run of high-latency transactions, this
* int64 addition eventually overflows. For example, 100 threads
st->use_file = (int) getrand(thread, 0, num_files - 1);
commands = sql_files[st->use_file];
st->is_throttled = false;
+
/*
- * No transaction is underway anymore, which means there is nothing
- * to listen to right now. When throttling rate limits are active,
- * a sleep will happen next, as the next transaction starts. And
- * then in any case the next SQL command will set listen back to 1.
+ * No transaction is underway anymore, which means there is
+ * nothing to listen to right now. When throttling rate limits
+ * are active, a sleep will happen next, as the next transaction
+ * starts. And then in any case the next SQL command will set
+ * listen back to 1.
*/
st->listen = 0;
- trans_needs_throttle = (throttle_delay>0);
+ trans_needs_throttle = (throttle_delay > 0);
}
}
}
/*
- * This ensures that a throttling delay is inserted before proceeding
- * with sql commands, after the first transaction. The first transaction
+ * This ensures that a throttling delay is inserted before proceeding with
+ * sql commands, after the first transaction. The first transaction
* throttling is performed when first entering doCustom.
*/
- if (trans_needs_throttle) {
+ if (trans_needs_throttle)
+ {
trans_needs_throttle = false;
goto top;
}
* Note: TPC-B requires at least 100 bytes per row, and the "filler"
* fields in these table declarations were intended to comply with that.
* The pgbench_accounts table complies with that because the "filler"
- * column is set to blank-padded empty string. But for all other tables the
- * column defaults to NULL and so don't actually take any space. We could
- * fix that by giving them non-null default values. However, that would
- * completely break comparability of pgbench results with prior versions.
- * Since pgbench has never pretended to be fully TPC-B compliant anyway, we
- * stick with the historical behavior.
+ * column is set to blank-padded empty string. But for all other tables
+ * the column defaults to NULL and so don't actually take any space. We
+ * could fix that by giving them non-null default values. However, that
+ * would completely break comparability of pgbench results with prior
+ * versions. Since pgbench has never pretended to be fully TPC-B compliant
+ * anyway, we stick with the historical behavior.
*/
struct ddlinfo
{
if (throttle_delay || progress)
{
/* compute and show latency average and standard deviation */
- double latency = 0.001 * total_latencies / normal_xacts;
- double sqlat = (double) total_sqlats / normal_xacts;
+ double latency = 0.001 * total_latencies / normal_xacts;
+ double sqlat = (double) total_sqlats / normal_xacts;
+
printf("latency average: %.3f ms\n"
"latency stddev: %.3f ms\n",
latency, 0.001 * sqrt(sqlat - 1000000.0 * latency * latency));
main(int argc, char **argv)
{
static struct option long_options[] = {
- /* systematic long/short named options*/
+ /* systematic long/short named options */
{"client", required_argument, NULL, 'c'},
{"connect", no_argument, NULL, 'C'},
{"debug", no_argument, NULL, 'd'},
int total_xacts = 0;
int64 total_latencies = 0;
int64 total_sqlats = 0;
- int64 throttle_lag = 0;
- int64 throttle_lag_max = 0;
+ int64 throttle_lag = 0;
+ int64 throttle_lag_max = 0;
int i;
if (progress <= 0)
{
fprintf(stderr,
- "thread progress delay (-P) must be positive (%s)\n",
+ "thread progress delay (-P) must be positive (%s)\n",
optarg);
exit(1);
}
break;
case 'R':
- {
- /* get a double from the beginning of option value */
- double throttle_value = atof(optarg);
- if (throttle_value <= 0.0)
{
- fprintf(stderr, "invalid rate limit: %s\n", optarg);
- exit(1);
+ /* get a double from the beginning of option value */
+ double throttle_value = atof(optarg);
+
+ if (throttle_value <= 0.0)
+ {
+ fprintf(stderr, "invalid rate limit: %s\n", optarg);
+ exit(1);
+ }
+ /* Invert rate limit into a time offset */
+ throttle_delay = (int64) (1000000.0 / throttle_value);
}
- /* Invert rate limit into a time offset */
- throttle_delay = (int64) (1000000.0 / throttle_value);
- }
break;
case 0:
/* This covers long options which take no argument. */
int nstate = thread->nstate;
int remains = nstate; /* number of remaining clients */
int i;
+
/* for reporting progress: */
- int64 thread_start = INSTR_TIME_GET_MICROSEC(thread->start_time);
+ int64 thread_start = INSTR_TIME_GET_MICROSEC(thread->start_time);
int64 last_report = thread_start;
int64 next_report = last_report + (int64) progress * 1000000;
- int64 last_count = 0, last_lats = 0, last_sqlats = 0, last_lags = 0;
+ int64 last_count = 0,
+ last_lats = 0,
+ last_sqlats = 0,
+ last_lags = 0;
AggVals aggs;
st->con = NULL;
continue;
}
- else /* just a nap from the script */
+ else /* just a nap from the script */
{
int this_usec;
/* each process reports its own progression */
if (progress)
{
- instr_time now_time;
- int64 now;
+ instr_time now_time;
+ int64 now;
+
INSTR_TIME_SET_CURRENT(now_time);
now = INSTR_TIME_GET_MICROSEC(now_time);
if (now >= next_report)
{
/* generate and show report */
- int64 count = 0, lats = 0, sqlats = 0;
- int64 lags = thread->throttle_lag;
- int64 run = now - last_report;
- double tps, total_run, latency, sqlat, stdev, lag;
-
- for (i = 0 ; i < nstate ; i++)
+ int64 count = 0,
+ lats = 0,
+ sqlats = 0;
+ int64 lags = thread->throttle_lag;
+ int64 run = now - last_report;
+ double tps,
+ total_run,
+ latency,
+ sqlat,
+ stdev,
+ lag;
+
+ for (i = 0; i < nstate; i++)
{
count += state[i].cnt;
lats += state[i].txn_latencies;
last_sqlats = sqlats;
last_lags = lags;
last_report = now;
- next_report += (int64) progress * 1000000;
+ next_report += (int64) progress *1000000;
}
}
#else
/* progress report by thread 0 for all threads */
if (progress && thread->tid == 0)
{
- instr_time now_time;
- int64 now;
+ instr_time now_time;
+ int64 now;
+
INSTR_TIME_SET_CURRENT(now_time);
now = INSTR_TIME_GET_MICROSEC(now_time);
if (now >= next_report)
{
/* generate and show report */
- int64 count = 0, lats = 0, sqlats = 0, lags = 0;
- int64 run = now - last_report;
- double tps, total_run, latency, sqlat, lag, stdev;
-
- for (i = 0 ; i < progress_nclients ; i++)
+ int64 count = 0,
+ lats = 0,
+ sqlats = 0,
+ lags = 0;
+ int64 run = now - last_report;
+ double tps,
+ total_run,
+ latency,
+ sqlat,
+ lag,
+ stdev;
+
+ for (i = 0; i < progress_nclients; i++)
{
count += state[i].cnt;
- lats += state[i].txn_latencies;
+ lats += state[i].txn_latencies;
sqlats += state[i].txn_sqlats;
}
- for (i = 0 ; i < progress_nthreads ; i++)
+ for (i = 0; i < progress_nthreads; i++)
lags += thread[i].throttle_lag;
total_run = (now - thread_start) / 1000000.0;
last_sqlats = sqlats;
last_lags = lags;
last_report = now;
- next_report += (int64) progress * 1000000;
+ next_report += (int64) progress *1000000;
}
}
-#endif /* PTHREAD_FORK_EMULATION */
+#endif /* PTHREAD_FORK_EMULATION */
}
done:
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* entirely in crypt_blowfish.c.
*
* Put bcrypt generator also here as crypt-blowfish.c
- * may not be compiled always. -- marko
+ * may not be compiled always. -- marko
*/
#include "postgres.h"
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
/*
* There is some confusion about whether and how to carry forward
- * the state of the pools. Seems like original Fortuna does not
+ * the state of the pools. Seems like original Fortuna does not
* do it, resetting hash after each request. I guess expecting
* feeding to happen more often that requesting. This is absolutely
* unsuitable for pgcrypto, as nothing asynchronous happens here.
* How many pools.
*
* Original Fortuna uses 32 pools, that means 32'th pool is
- * used not earlier than in 13th year. This is a waste in
+ * used not earlier than in 13th year. This is a waste in
* pgcrypto, as we have very low-frequancy seeding. Here
* is preferable to have all entropy usable in reasonable time.
*
}
/*
- * Pick a random pool. This uses key bytes as random source.
+ * Pick a random pool. This uses key bytes as random source.
*/
static unsigned
get_rand_pool(FState *st)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
-/* Unsigned magnitude subtraction. Assumes dc is big enough. */
+/* Unsigned magnitude subtraction. Assumes dc is big enough. */
static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
/* }}} */
/*------------------------------------------------------------------------*/
-/* Private functions for internal use. These make assumptions. */
+/* Private functions for internal use. These make assumptions. */
/* {{{ s_alloc(num) */
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
/*
* Test if key len is supported. BF_set_key silently cut large keys and it
- * could be a problem when user transfer crypted data from one server
- * to another.
+ * could be a problem when user transfer crypted data from one server to
+ * another.
*/
if (bf_is_strong == -1)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
}
/*
- * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
+ * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
}
/*
- * Mix user data into RNG. It is for user own interests to have
+ * Mix user data into RNG. It is for user own interests to have
* RNG state shuffled.
*/
static void
}
/*
- * Find next word. Handle ',' and '=' as words. Skip whitespace.
+ * Find next word. Handle ',' and '=' as words. Skip whitespace.
* Put word info into res_p, res_len.
* Returns ptr to next word.
*/
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
/* rijndael specification is in big endian format with */
/* bit 0 as the most significant bit. In the remainder */
/* of the specification the bits are numbered from the */
- /* least significant end of a byte. */
+ /* least significant end of a byte. */
for (i = 0; i < 256; ++i)
{
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
values[j++] = psprintf("%d", indexStat.version);
values[j++] = psprintf("%d", indexStat.level);
values[j++] = psprintf(INT64_FORMAT,
- (indexStat.root_pages +
- indexStat.leaf_pages +
- indexStat.internal_pages +
- indexStat.deleted_pages +
- indexStat.empty_pages) * BLCKSZ);
+ (indexStat.root_pages +
+ indexStat.leaf_pages +
+ indexStat.internal_pages +
+ indexStat.deleted_pages +
+ indexStat.empty_pages) * BLCKSZ);
values[j++] = psprintf("%u", indexStat.root_blkno);
values[j++] = psprintf(INT64_FORMAT, indexStat.internal_pages);
values[j++] = psprintf(INT64_FORMAT, indexStat.leaf_pages);
values[j++] = psprintf(INT64_FORMAT, indexStat.deleted_pages);
if (indexStat.max_avail > 0)
values[j++] = psprintf("%.2f",
- 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0);
+ 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0);
else
values[j++] = pstrdup("NaN");
if (indexStat.leaf_pages > 0)
values[j++] = psprintf("%.2f",
- (double) indexStat.fragments / (double) indexStat.leaf_pages * 100.0);
+ (double) indexStat.fragments / (double) indexStat.leaf_pages * 100.0);
else
values[j++] = pstrdup("NaN");
/*
* To avoid physically reading the table twice, try to do the
- * free-space scan in parallel with the heap scan. However,
+ * free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/
* the right subtransaction nesting depth if we didn't do that already.
*
* will_prep_stmt must be true if caller intends to create any prepared
- * statements. Since those don't go away automatically at transaction end
+ * statements. Since those don't go away automatically at transaction end
* (not even on error), we need this flag to cue manual cleanup.
*
* XXX Note that caching connections theoretically requires a mechanism to
/*
* If cache entry doesn't have a connection, we have to establish a new
- * connection. (If connect_pg_server throws an error, the cache entry
+ * connection. (If connect_pg_server throws an error, the cache entry
* will be left in a valid empty state.)
*/
if (entry->conn == NULL)
}
/*
- * For non-superusers, insist that the connstr specify a password. This
+ * For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
- * to be accessible to non-superusers. (See also dblink_connstr_check in
+ * to be accessible to non-superusers. (See also dblink_connstr_check in
* contrib/dblink.)
*/
static void
/*
* Set remote timezone; this is basically just cosmetic, since all
* transmitted and returned timestamptzs should specify a zone explicitly
- * anyway. However it makes the regression test outputs more predictable.
+ * anyway. However it makes the regression test outputs more predictable.
*
* We don't risk setting remote zone equal to ours, since the remote
- * server might use a different timezone database. Instead, use UTC
+ * server might use a different timezone database. Instead, use UTC
* (quoted, because very old servers are picky about case).
*/
do_sql_command(conn, "SET timezone = 'UTC'");
* We must check that the expression contains only node types we can deparse,
* that all types/functions/operators are safe to send (which we approximate
* as being built-in), and that all collations used in the expression derive
- * from Vars of the foreign table. Because of the latter, the logic is
+ * from Vars of the foreign table. Because of the latter, the logic is
* pretty close to assign_collations_walker() in parse_collate.c, though we
* can assume here that the given expression is valid.
*/
/*
* If the Var is from the foreign table, we consider its
- * collation (if any) safe to use. If it is from another
+ * collation (if any) safe to use. If it is from another
* table, we treat its collation the same way as we would a
* Param's collation, ie it's not safe for it to have a
* non-default collation.
/*
* Detect whether node is introducing a collation not derived
- * from a foreign Var. (If so, we just mark it unsafe for now
+ * from a foreign Var. (If so, we just mark it unsafe for now
* rather than immediately returning false, since the parent
* node might not care.)
*/
/*
* Construct a simple SELECT statement that retrieves desired columns
- * of the specified foreign table, and append it to "buf". The output
+ * of the specified foreign table, and append it to "buf". The output
* contains just "SELECT ... FROM tablename".
*
* We also create an integer List of the columns being retrieved, which is
}
/*
- * Add ctid if needed. We currently don't support retrieving any other
+ * Add ctid if needed. We currently don't support retrieving any other
* system columns.
*/
if (bms_is_member(SelfItemPointerAttributeNumber - FirstLowInvalidHeapAttributeNumber,
/*
* Deparse referenced array expression first. If that expression includes
* a cast, we have to parenthesize to prevent the array subscript from
- * being taken as typename decoration. We can avoid that in the typical
+ * being taken as typename decoration. We can avoid that in the typical
* case of subscripting a Var, but otherwise do it.
*/
if (IsA(node->refexpr, Var))
}
/*
- * Deparse given operator expression. To avoid problems around
+ * Deparse given operator expression. To avoid problems around
* priority of operations, we always parenthesize the arguments.
*/
static void
}
/*
- * Deparse given ScalarArrayOpExpr expression. To avoid problems
+ * Deparse given ScalarArrayOpExpr expression. To avoid problems
* around priority of operations, we always parenthesize the arguments.
*/
static void
* This is used when we're just trying to EXPLAIN the remote query.
* We don't have the actual value of the runtime parameter yet, and we don't
* want the remote planner to generate a plan that depends on such a value
- * anyway. Thus, we can't do something simple like "$1::paramtype".
+ * anyway. Thus, we can't do something simple like "$1::paramtype".
* Instead, we emit "((SELECT null::paramtype)::paramtype)".
* In all extant versions of Postgres, the planner will see that as an unknown
* constant value, which is what we want. This might need adjustment if we
/*
* Generate key-value arrays which include only libpq options from the
- * given list (which can contain any kind of options). Caller must have
+ * given list (which can contain any kind of options). Caller must have
* allocated large-enough arrays. Returns number of options found.
*/
int
* 2) Integer list of attribute numbers retrieved by the SELECT
*
* These items are indexed with the enum FdwScanPrivateIndex, so an item
- * can be fetched with list_nth(). For example, to get the SELECT statement:
+ * can be fetched with list_nth(). For example, to get the SELECT statement:
* sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql));
*/
enum FdwScanPrivateIndex
/*
* If the table or the server is configured to use remote estimates,
- * identify which user to do remote access as during planning. This
- * should match what ExecCheckRTEPerms() does. If we fail due to lack of
+ * identify which user to do remote access as during planning. This
+ * should match what ExecCheckRTEPerms() does. If we fail due to lack of
* permissions, the query would have failed at runtime anyway.
*/
if (fpinfo->use_remote_estimate)
/*
* Identify which attributes will need to be retrieved from the remote
- * server. These include all attrs needed for joins or final output, plus
+ * server. These include all attrs needed for joins or final output, plus
* all attrs used in the local_conds. (Note: if we end up using a
* parameterized scan, it's possible that some of the join clauses will be
* sent to the remote and thus we wouldn't really need to retrieve the
if (fpinfo->use_remote_estimate)
{
/*
- * Get cost/size estimates with help of remote server. Save the
+ * Get cost/size estimates with help of remote server. Save the
* values in fpinfo so we don't need to do it again to generate the
* basic foreign path.
*/
* remote-safety.
*
* Note: the join clauses we see here should be the exact same ones
- * previously examined by postgresGetForeignPaths. Possibly it'd be worth
+ * previously examined by postgresGetForeignPaths. Possibly it'd be worth
* passing forward the classification work done then, rather than
* repeating it here.
*
node->fdw_state = (void *) fsstate;
/*
- * Identify which user to do the remote access as. This should match what
+ * Identify which user to do the remote access as. This should match what
* ExecCheckRTEPerms() does.
*/
rte = rt_fetch(fsplan->scan.scanrelid, estate->es_range_table);
* Prepare remote-parameter expressions for evaluation. (Note: in
* practice, we expect that all these expressions will be just Params, so
* we could possibly do something more efficient than using the full
- * expression-eval machinery for this. But probably there would be little
+ * expression-eval machinery for this. But probably there would be little
* benefit, and it'd require postgres_fdw to know more than is desirable
* about Param evaluation.)
*/
/*
* If any internal parameters affecting this node have changed, we'd
- * better destroy and recreate the cursor. Otherwise, rewinding it should
- * be good enough. If we've only fetched zero or one batch, we needn't
+ * better destroy and recreate the cursor. Otherwise, rewinding it should
+ * be good enough. If we've only fetched zero or one batch, we needn't
* even rewind the cursor, just rescan what we have.
*/
if (node->ss.ps.chgParam != NULL)
* Note: currently, the plan tree generated for UPDATE/DELETE will always
* include a ForeignScan that retrieves ctids (using SELECT FOR UPDATE)
* and then the ModifyTable node will have to execute individual remote
- * UPDATE/DELETE commands. If there are no local conditions or joins
+ * UPDATE/DELETE commands. If there are no local conditions or joins
* needed, it'd be better to let the scan node do UPDATE/DELETE RETURNING
- * and then do nothing at ModifyTable. Room for future optimization ...
+ * and then do nothing at ModifyTable. Room for future optimization ...
*/
static List *
postgresPlanForeignModify(PlannerInfo *root,
fmstate->rel = rel;
/*
- * Identify which user to do the remote access as. This should match what
+ * Identify which user to do the remote access as. This should match what
* ExecCheckRTEPerms() does.
*/
rte = rt_fetch(resultRelInfo->ri_RangeTableIndex, estate->es_range_table);
pgfdw_report_error(ERROR, res, conn, false, sql);
/*
- * Extract cost numbers for topmost plan node. Note we search for a
+ * Extract cost numbers for topmost plan node. Note we search for a
* left paren from the end of the line to avoid being confused by
* other uses of parentheses.
*/
* Notice that we pass NULL for paramTypes, thus forcing the remote server
* to infer types for all parameters. Since we explicitly cast every
* parameter (see deparse.c), the "inference" is trivial and will produce
- * the desired result. This allows us to avoid assuming that the remote
+ * the desired result. This allows us to avoid assuming that the remote
* server has the same OIDs we do for the parameters' types.
*
* We don't use a PG_TRY block here, so be careful not to throw error
* user-visible computations.
*
* We use the equivalent of a function SET option to allow the settings to
- * persist only until the caller calls reset_transmission_modes(). If an
+ * persist only until the caller calls reset_transmission_modes(). If an
* error is thrown in between, guc.c will take care of undoing the settings.
*
* The return value is the nestlevel that must be passed to
int nestlevel = NewGUCNestLevel();
/*
- * The values set here should match what pg_dump does. See also
+ * The values set here should match what pg_dump does. See also
* configure_remote_session in connection.c.
*/
if (DateStyle != USE_ISO_DATES)
*func = postgresAcquireSampleRowsFunc;
/*
- * Now we have to get the number of pages. It's annoying that the ANALYZE
+ * Now we have to get the number of pages. It's annoying that the ANALYZE
* API requires us to return that now, because it forces some duplication
* of effort between this routine and postgresAcquireSampleRowsFunc. But
* it's probably not worth redefining that API at this point.
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the table and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the table. Therefore, correlation estimates derived later
/*
* Callback function which is called when error occurs during column value
- * conversion. Print names of column and relation.
+ * conversion. Print names of column and relation.
*/
static void
conversion_error_callback(void *arg)
* sepgsql_set_client_label
*
* This routine tries to switch the current security label of the client, and
- * checks related permissions. The supplied new label shall be added to the
+ * checks related permissions. The supplied new label shall be added to the
* client_label_pending list, then saved at transaction-commit time to ensure
* transaction-awareness.
*/
/*
* sepgsql_xact_callback
*
- * A callback routine of transaction commit/abort/prepare. Commmit or abort
+ * A callback routine of transaction commit/abort/prepare. Commmit or abort
* changes in the client_label_pending list.
*/
static void
* Access control decisions must be atomic, but multiple system calls may
* be required to make a decision; thus, when referencing the access vector
* cache, we must loop until we complete without an intervening cache flush
- * event. In practice, looping even once should be very rare. Callers should
+ * event. In practice, looping even once should be very rare. Callers should
* do something like this:
*
* sepgsql_avc_check_valid();
/*
* timetravel () --
- * 1. IF an update affects tuple with stop_date eq INFINITY
+ * 1. IF an update affects tuple with stop_date eq INFINITY
* then form (and return) new tuple with start_date eq current date
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
- * 2. IF an delete affects tuple with stop_date eq INFINITY
+ * 2. IF an delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
- * 3. On INSERT, if start_date is NULL then current date will be
+ * 3. On INSERT, if start_date is NULL then current date will be
* inserted, if stop_date is NULL then INFINITY will be inserted.
* [ and insert_user eq current user, update_user and delete_user
* eq NULL ]
* current database encoding if possible. Any invalid characters are
* replaced by question marks.
*
- * Parameter: str - OpenSSL ASN1_STRING structure. Memory management
+ * Parameter: str - OpenSSL ASN1_STRING structure. Memory management
* of this structure is responsibility of caller.
*
* Returns Datum, which can be directly returned from a C language SQL
* triggered_change_notification
*
* This trigger function will send a notification of data modification with
- * primary key values. The channel will be "tcn" unless the trigger is
+ * primary key values. The channel will be "tcn" unless the trigger is
* created with a parameter, in which case that parameter will be used.
*/
PG_FUNCTION_INFO_V1(triggered_change_notification);
PG_MODULE_MAGIC;
/* These must be available to pg_dlsym() */
-extern void _PG_init(void);
-extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
+extern void _PG_init(void);
+extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
typedef struct
{
} TestDecodingData;
static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
- bool is_init);
+ bool is_init);
static void pg_decode_shutdown(LogicalDecodingContext *ctx);
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn);
else if (!parse_bool(strVal(elem->arg), &data->include_xids))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not parse value \"%s\" for parameter \"%s\"",
- strVal(elem->arg), elem->defname)));
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
}
else if (strcmp(elem->defname, "include-timestamp") == 0)
{
else if (!parse_bool(strVal(elem->arg), &data->include_timestamp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not parse value \"%s\" for parameter \"%s\"",
- strVal(elem->arg), elem->defname)));
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
}
else if (strcmp(elem->defname, "force-binary") == 0)
{
- bool force_binary;
+ bool force_binary;
if (elem->arg == NULL)
continue;
else if (!parse_bool(strVal(elem->arg), &force_binary))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not parse value \"%s\" for parameter \"%s\"",
- strVal(elem->arg), elem->defname)));
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
if (force_binary)
opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
OidOutputFunctionCall(typoutput, origval));
else
{
- Datum val; /* definitely detoasted Datum */
+ Datum val; /* definitely detoasted Datum */
+
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
print_literal(s, typid, OidOutputFunctionCall(typoutput, val));
}
appendStringInfoString(ctx->out, "table ");
appendStringInfoString(ctx->out,
quote_qualified_identifier(
- get_namespace_name(
- get_rel_namespace(RelationGetRelid(relation))),
- NameStr(class_form->relname)));
+ get_namespace_name(
+ get_rel_namespace(RelationGetRelid(relation))),
+ NameStr(class_form->relname)));
appendStringInfoString(ctx->out, ":");
switch (change->action)
typedef struct
{
- int nworkers;
+ int nworkers;
BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER];
} worker_state;
test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp);
static worker_state *setup_background_workers(int nworkers,
- dsm_segment *seg);
+ dsm_segment *seg);
static void cleanup_background_workers(dsm_segment *seg, Datum arg);
static void wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr);
{
dsm_segment *seg;
test_shm_mq_header *hdr;
- shm_mq *outq = NULL; /* placate compiler */
- shm_mq *inq = NULL; /* placate compiler */
- worker_state *wstate;
+ shm_mq *outq = NULL; /* placate compiler */
+ shm_mq *inq = NULL; /* placate compiler */
+ worker_state *wstate;
/* Set up a dynamic shared memory segment. */
setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
wait_for_workers_to_become_ready(wstate, hdr);
/*
- * Once we reach this point, all workers are ready. We no longer need
- * to kill them if we die; they'll die on their own as the message queues
+ * Once we reach this point, all workers are ready. We no longer need to
+ * kill them if we die; they'll die on their own as the message queues
* shut down.
*/
cancel_on_dsm_detach(seg, cleanup_background_workers,
dsm_segment **segp, test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp)
{
- shm_toc_estimator e;
- int i;
- Size segsize;
- dsm_segment *seg;
- shm_toc *toc;
+ shm_toc_estimator e;
+ int i;
+ Size segsize;
+ dsm_segment *seg;
+ shm_toc *toc;
test_shm_mq_header *hdr;
/* Ensure a valid queue size. */
/* Set up one message queue per worker, plus one. */
for (i = 0; i <= nworkers; ++i)
{
- shm_mq *mq;
+ shm_mq *mq;
mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
(Size) queue_size);
static worker_state *
setup_background_workers(int nworkers, dsm_segment *seg)
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
BackgroundWorker worker;
- worker_state *wstate;
- int i;
+ worker_state *wstate;
+ int i;
/*
* We need the worker_state object and the background worker handles to
* Arrange to kill all the workers if we abort before all workers are
* finished hooking themselves up to the dynamic shared memory segment.
*
- * If we die after all the workers have finished hooking themselves up
- * to the dynamic shared memory segment, we'll mark the two queues to
- * which we're directly connected as detached, and the worker(s)
- * connected to those queues will exit, marking any other queues to
- * which they are connected as detached. This will cause any
- * as-yet-unaware workers connected to those queues to exit in their
- * turn, and so on, until everybody exits.
+ * If we die after all the workers have finished hooking themselves up to
+ * the dynamic shared memory segment, we'll mark the two queues to which
+ * we're directly connected as detached, and the worker(s) connected to
+ * those queues will exit, marking any other queues to which they are
+ * connected as detached. This will cause any as-yet-unaware workers
+ * connected to those queues to exit in their turn, and so on, until
+ * everybody exits.
*
- * But suppose the workers which are supposed to connect to the queues
- * to which we're directly attached exit due to some error before they
+ * But suppose the workers which are supposed to connect to the queues to
+ * which we're directly attached exit due to some error before they
* actually attach the queues. The remaining workers will have no way of
* knowing this. From their perspective, they're still waiting for those
* workers to start, when in fact they've already died.
wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr)
{
- bool save_set_latch_on_sigusr1;
- bool result = false;
+ bool save_set_latch_on_sigusr1;
+ bool result = false;
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
set_latch_on_sigusr1 = true;
{
for (;;)
{
- int workers_ready;
+ int workers_ready;
/* If all the workers are ready, we have succeeded. */
SpinLockAcquire(&hdr->mutex);
static bool
check_worker_status(worker_state *wstate)
{
- int n;
+ int n;
/* If any workers (or the postmaster) have died, we have failed. */
for (n = 0; n < wstate->nworkers; ++n)
{
BgwHandleStatus status;
- pid_t pid;
+ pid_t pid;
status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
#include "test_shm_mq.h"
-PG_MODULE_MAGIC;
-PG_FUNCTION_INFO_V1(test_shm_mq);
+PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(test_shm_mq);
PG_FUNCTION_INFO_V1(test_shm_mq_pipelined);
void _PG_init(void);
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
- shm_mq_result res;
+ shm_mq_result res;
Size len;
void *data;
/*
* Since this test sends data using the blocking interfaces, it cannot
- * send data to itself. Therefore, a minimum of 1 worker is required.
- * Of course, a negative worker count is nonsensical.
+ * send data to itself. Therefore, a minimum of 1 worker is required. Of
+ * course, a negative worker count is nonsensical.
*/
if (nworkers < 1)
ereport(ERROR,
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
- shm_mq_result res;
+ shm_mq_result res;
Size len;
void *data;
}
else if (res == SHM_MQ_DETACHED)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not receive message")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not receive message")));
}
else
{
if (send_count != receive_count)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("message sent %d times, but received %d times",
- send_count, receive_count)));
+ errmsg("message sent %d times, but received %d times",
+ send_count, receive_count)));
break;
}
if (wait)
{
/*
- * If we made no progress, wait for one of the other processes
- * to which we are connected to set our latch, indicating that
- * they have read or written data and therefore there may now be
- * work for us to do.
+ * If we made no progress, wait for one of the other processes to
+ * which we are connected to set our latch, indicating that they
+ * have read or written data and therefore there may now be work
+ * for us to do.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
CHECK_FOR_INTERRUPTS();
static void
verify_message(Size origlen, char *origdata, Size newlen, char *newdata)
{
- Size i;
+ Size i;
if (origlen != newlen)
ereport(ERROR,
(errmsg("message corrupted"),
errdetail("The original message was %zu bytes but the final message is %zu bytes.",
- origlen, newlen)));
+ origlen, newlen)));
for (i = 0; i < origlen; ++i)
if (origdata[i] != newdata[i])
*/
typedef struct
{
- slock_t mutex;
- int workers_total;
- int workers_attached;
- int workers_ready;
+ slock_t mutex;
+ int workers_total;
+ int workers_attached;
+ int workers_ready;
} test_shm_mq_header;
/* Set up dynamic shared memory and background workers for test run. */
extern void test_shm_mq_setup(int64 queue_size, int32 nworkers,
- dsm_segment **seg, shm_mq_handle **output,
- shm_mq_handle **input);
+ dsm_segment **seg, shm_mq_handle **output,
+ shm_mq_handle **input);
/* Main entrypoint for a worker. */
-extern void test_shm_mq_main(Datum);
+extern void test_shm_mq_main(Datum);
#endif
static void handle_sigterm(SIGNAL_ARGS);
static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
- int myworkernumber, shm_mq_handle **inqhp,
- shm_mq_handle **outqhp);
+ int myworkernumber, shm_mq_handle **inqhp,
+ shm_mq_handle **outqhp);
static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
/*
test_shm_mq_main(Datum main_arg)
{
dsm_segment *seg;
- shm_toc *toc;
+ shm_toc *toc;
shm_mq_handle *inqh;
shm_mq_handle *outqh;
volatile test_shm_mq_header *hdr;
/*
* Establish signal handlers.
*
- * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just
- * as it would a normal user backend. To make that happen, we establish
- * a signal handler that is a stripped-down version of die(). We don't
- * have any equivalent of the backend's command-read loop, where interrupts
- * can be processed immediately, so make sure ImmediateInterruptOK is
- * turned off.
+ * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
+ * it would a normal user backend. To make that happen, we establish a
+ * signal handler that is a stripped-down version of die(). We don't have
+ * any equivalent of the backend's command-read loop, where interrupts can
+ * be processed immediately, so make sure ImmediateInterruptOK is turned
+ * off.
*/
pqsignal(SIGTERM, handle_sigterm);
ImmediateInterruptOK = false;
* memory segment to which we must attach for further instructions. In
* order to attach to dynamic shared memory, we need a resource owner.
* Once we've mapped the segment in our address space, attach to the table
- * of contents so we can locate the various data structures we'll need
- * to find within the segment.
+ * of contents so we can locate the various data structures we'll need to
+ * find within the segment.
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
seg = dsm_attach(DatumGetInt32(main_arg));
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("bad magic number in dynamic shared memory segment")));
+ errmsg("bad magic number in dynamic shared memory segment")));
/*
* Acquire a worker number.
attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
/*
- * Indicate that we're fully initialized and ready to begin the main
- * part of the parallel operation.
+ * Indicate that we're fully initialized and ready to begin the main part
+ * of the parallel operation.
*
* Once we signal that we're ready, the user backend is entitled to assume
* that our on_dsm_detach callbacks will fire before we disconnect from
initialize_worker_spi(table);
/*
- * Quote identifiers passed to us. Note that this must be done after
+ * Quote identifiers passed to us. Note that this must be done after
* initialize_worker_spi, because that routine assumes the names are not
* quoted.
*
* StartTransactionCommand() call should be preceded by a
* SetCurrentStatementStartTimestamp() call, which sets both the time
* for the statement we're about the run, and also the transaction
- * start time. Also, each other query sent to SPI should probably be
+ * start time. Also, each other query sent to SPI should probably be
* preceded by SetCurrentStatementStartTimestamp(), so that statement
* start time is always up to date.
*
int32 i = PG_GETARG_INT32(0);
BackgroundWorker worker;
BackgroundWorkerHandle *handle;
- BgwHandleStatus status;
+ BgwHandleStatus status;
pid_t pid;
worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not start background process"),
- errhint("More details may be available in the server log.")));
+ errhint("More details may be available in the server log.")));
if (status == BGWH_POSTMASTER_DIED)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("cannot start background processes without postmaster"),
+ errmsg("cannot start background processes without postmaster"),
errhint("Kill all remaining database processes and restart the database.")));
Assert(status == BGWH_STARTED);
/*
* Clear the values array, so that not-well-formed documents
- * return NULL in all columns. Note that this also means that
+ * return NULL in all columns. Note that this also means that
* spare columns will be NULL.
*/
for (j = 0; j < ret_tupdesc->natts; j++)
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
{"check_option", RELOPT_TYPE_STRING,
offsetof(StdRdOptions, check_option_offset)},
{"user_catalog_table", RELOPT_TYPE_BOOL,
- offsetof(StdRdOptions, user_catalog_table)}
+ offsetof(StdRdOptions, user_catalog_table)}
};
options = parseRelOptions(reloptions, validate, kind, &numoptions);
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
res = GIN_MAYBE;
break;
case GinEqualStrategy:
+
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = GIN_MAYBE;
Assert(blkno != btree->rootBlkno);
ptr->blkno = blkno;
ptr->buffer = buffer;
+
/*
* parent may be wrong, but if so, the ginFinishSplit call will
* recurse to call ginFindParents again to fix it.
GinPlaceToPageRC rc;
uint16 xlflags = 0;
Page childpage = NULL;
- Page newlpage = NULL, newrpage = NULL;
+ Page newlpage = NULL,
+ newrpage = NULL;
if (GinPageIsData(page))
xlflags |= GIN_INSERT_ISDATA;
}
/*
- * Try to put the incoming tuple on the page. placeToPage will decide
- * if the page needs to be split.
+ * Try to put the incoming tuple on the page. placeToPage will decide if
+ * the page needs to be split.
*/
rc = btree->placeToPage(btree, stack->buffer, stack,
insertdata, updateblkno,
XLogRecPtr recptr;
XLogRecData rdata[3];
ginxlogInsert xlrec;
- BlockIdData childblknos[2];
+ BlockIdData childblknos[2];
xlrec.node = btree->index->rd_node;
xlrec.blkno = BufferGetBlockNumber(stack->buffer);
data.flags = xlflags;
if (childbuf != InvalidBuffer)
{
- Page childpage = BufferGetPage(childbuf);
+ Page childpage = BufferGetPage(childbuf);
+
GinPageGetOpaque(childpage)->flags &= ~GIN_INCOMPLETE_SPLIT;
data.leftChildBlkno = BufferGetBlockNumber(childbuf);
/*
* Construct a new root page containing downlinks to the new left
- * and right pages. (do this in a temporary copy first rather
- * than overwriting the original page directly, so that we can still
+ * and right pages. (do this in a temporary copy first rather than
+ * overwriting the original page directly, so that we can still
* abort gracefully if this fails.)
*/
newrootpg = PageGetTempPage(newrpage);
else
{
elog(ERROR, "unknown return code from GIN placeToPage method: %d", rc);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
bool first = true;
/*
- * freestack == false when we encounter an incompletely split page during a
- * scan, while freestack == true is used in the normal scenario that a
+ * freestack == false when we encounter an incompletely split page during
+ * a scan, while freestack == true is used in the normal scenario that a
* split is finished right after the initial insert.
*/
if (!freestack)
* then continue with the current one.
*
* Note: we have to finish *all* incomplete splits we encounter, even
- * if we have to move right. Otherwise we might choose as the target
- * a page that has no downlink in the parent, and splitting it further
+ * if we have to move right. Otherwise we might choose as the target a
+ * page that has no downlink in the parent, and splitting it further
* would fail.
*/
if (GinPageIsIncompleteSplit(BufferGetPage(parent->buffer)))
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
dlist_head segments; /* a list of leafSegmentInfos */
/*
- * The following fields represent how the segments are split across
- * pages, if a page split is required. Filled in by leafRepackItems.
+ * The following fields represent how the segments are split across pages,
+ * if a page split is required. Filled in by leafRepackItems.
*/
dlist_node *lastleft; /* last segment on left page */
int lsize; /* total size on left page */
typedef struct
{
- dlist_node node; /* linked list pointers */
+ dlist_node node; /* linked list pointers */
/*-------------
* 'action' indicates the status of this in-memory segment, compared to
int nmodifieditems;
/*
- * The following fields represent the items in this segment. If 'items'
- * is not NULL, it contains a palloc'd array of the itemsin this segment.
- * If 'seg' is not NULL, it contains the items in an already-compressed
+ * The following fields represent the items in this segment. If 'items' is
+ * not NULL, it contains a palloc'd array of the itemsin this segment. If
+ * 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.
*/
if (offset != maxoff + 1)
memmove(ptr + sizeof(PostingItem),
ptr,
- (maxoff - offset + 1) * sizeof(PostingItem));
+ (maxoff - offset + 1) *sizeof(PostingItem));
}
memcpy(ptr, data, sizeof(PostingItem));
int maxitems = items->nitem - items->curitem;
Page page = BufferGetPage(buf);
int i;
- ItemPointerData rbound;
- ItemPointerData lbound;
+ ItemPointerData rbound;
+ ItemPointerData lbound;
bool needsplit;
bool append;
int segsize;
Assert(GinPageIsData(page));
- rbound = *GinDataPageGetRightBound(page);
+ rbound = *GinDataPageGetRightBound(page);
/*
* Count how many of the new items belong to this page.
{
/*
* This needs to go to some other location in the tree. (The
- * caller should've chosen the insert location so that at least
- * the first item goes here.)
+ * caller should've chosen the insert location so that at
+ * least the first item goes here.)
*/
Assert(i > 0);
break;
/* Add the new items to the segments */
if (!addItemsToLeaf(leaf, newItems, maxitems))
{
- /* all items were duplicates, we have nothing to do */
+ /* all items were duplicates, we have nothing to do */
items->curitem += maxitems;
MemoryContextSwitchTo(oldCxt);
Assert(GinPageRightMost(page) ||
ginCompareItemPointers(GinDataPageGetRightBound(*newlpage),
- GinDataPageGetRightBound(*newrpage)) < 0);
+ GinDataPageGetRightBound(*newrpage)) < 0);
if (append)
elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)",
* We don't try to re-encode the segments here, even though some of them
* might be really small now that we've removed some items from them. It
* seems like a waste of effort, as there isn't really any benefit from
- * larger segments per se; larger segments only help to pack more items
- * in the same space. We might as well delay doing that until the next
+ * larger segments per se; larger segments only help to pack more items in
+ * the same space. We might as well delay doing that until the next
* insertion, which will need to re-encode at least part of the page
* anyway.
*
- * Also note if the page was in uncompressed, pre-9.4 format before, it
- * is now represented as one huge segment that contains all the items.
- * It might make sense to split that, to speed up random access, but we
- * don't bother. You'll have to REINDEX anyway if you want the full gain
- * of the new tighter index format.
+ * Also note if the page was in uncompressed, pre-9.4 format before, it is
+ * now represented as one huge segment that contains all the items. It
+ * might make sense to split that, to speed up random access, but we don't
+ * bother. You'll have to REINDEX anyway if you want the full gain of the
+ * new tighter index format.
*/
if (removedsomething)
{
{
leafSegmentInfo *seginfo = dlist_container(leafSegmentInfo, node,
iter.cur);
+
if (seginfo->action != GIN_SEGMENT_UNMODIFIED)
modified = true;
if (modified && seginfo->action != GIN_SEGMENT_DELETE)
}
walbufbegin = palloc(
- sizeof(ginxlogRecompressDataLeaf) +
- BLCKSZ + /* max size needed to hold the segment data */
- nmodified * 2 + /* (segno + action) per action */
- sizeof(XLogRecData));
+ sizeof(ginxlogRecompressDataLeaf) +
+ BLCKSZ + /* max size needed to hold the segment
+ * data */
+ nmodified * 2 + /* (segno + action) per action */
+ sizeof(XLogRecData));
walbufend = walbufbegin;
recompress_xlog = (ginxlogRecompressDataLeaf *) walbufend;
int segsize;
/*
- * If the page was in pre-9.4 format before, convert the header, and
- * force all segments to be copied to the page whether they were modified
- * or not.
+ * If the page was in pre-9.4 format before, convert the header, and force
+ * all segments to be copied to the page whether they were modified or
+ * not.
*/
if (!GinPageIsCompressed(page))
{
dlist_node *node;
dlist_node *firstright;
leafSegmentInfo *seginfo;
+
/* these must be static so they can be returned to caller */
static ginxlogSplitDataLeaf split_xlog;
static XLogRecData rdata[3];
Page page = BufferGetPage(buf);
OffsetNumber off = stack->off;
PostingItem *pitem;
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata;
static ginxlogInsertDataInternal data;
int nrightitems;
Size pageSize = PageGetPageSize(oldpage);
ItemPointerData oldbound = *GinDataPageGetRightBound(oldpage);
- ItemPointer bound;
+ ItemPointer bound;
Page lpage;
Page rpage;
OffsetNumber separator;
*prdata = rdata;
/*
- * First construct a new list of PostingItems, which includes all the
- * old items, and the new item.
+ * First construct a new list of PostingItems, which includes all the old
+ * items, and the new item.
*/
memcpy(allitems, GinDataPageGetPostingItem(oldpage, FirstOffsetNumber),
(off - 1) * sizeof(PostingItem));
leafSegmentInfo *newseg;
/*
- * If the page is completely empty, just construct one new segment to
- * hold all the new items.
+ * If the page is completely empty, just construct one new segment to hold
+ * all the new items.
*/
if (dlist_is_empty(&leaf->segments))
{
dlist_foreach(iter, &leaf->segments)
{
- leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
+ leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
int nthis;
- ItemPointer tmpitems;
+ ItemPointer tmpitems;
int ntmpitems;
/*
ItemPointerData next_first;
next = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node,
- dlist_next_node(&leaf->segments, iter.cur));
+ dlist_next_node(&leaf->segments, iter.cur));
if (next->items)
next_first = next->items[0];
else
if (seginfo->seg == NULL)
{
if (seginfo->nitems > GinPostingListSegmentMaxSize)
- npacked = 0; /* no chance that it would fit. */
+ npacked = 0; /* no chance that it would fit. */
else
{
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentMaxSize,
+ GinPostingListSegmentMaxSize,
&npacked);
}
if (npacked != seginfo->nitems)
{
/*
- * Too large. Compress again to the target size, and create
- * a new segment to represent the remaining items. The new
- * segment is inserted after this one, so it will be
- * processed in the next iteration of this loop.
+ * Too large. Compress again to the target size, and
+ * create a new segment to represent the remaining items.
+ * The new segment is inserted after this one, so it will
+ * be processed in the next iteration of this loop.
*/
if (seginfo->seg)
pfree(seginfo->seg);
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentTargetSize,
+ GinPostingListSegmentTargetSize,
&npacked);
if (seginfo->action != GIN_SEGMENT_INSERT)
seginfo->action = GIN_SEGMENT_REPLACE;
*/
if (SizeOfGinPostingList(seginfo->seg) < GinPostingListSegmentMinSize && next_node)
{
- int nmerged;
+ int nmerged;
nextseg = dlist_container(leafSegmentInfo, node, next_node);
GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber;
/*
- * Write as many of the items to the root page as fit. In segments
- * of max GinPostingListSegmentMaxSize bytes each.
+ * Write as many of the items to the root page as fit. In segments of max
+ * GinPostingListSegmentMaxSize bytes each.
*/
nrootitems = 0;
rootsize = 0;
*/
if (data)
{
- char *ptr = GinGetPosting(itup);
+ char *ptr = GinGetPosting(itup);
+
memcpy(ptr, data, dataSize);
}
{
Pointer ptr = GinGetPosting(itup);
int nipd = GinGetNPosting(itup);
- ItemPointer ipd;
+ ItemPointer ipd;
int ndecoded;
if (GinItupIsCompressed(itup))
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* guarantees that inserted row(s) will not continue on next page.
page = BufferGetPage(buffer);
if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
{
- int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+ int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+
scanEntry->predictNumberResult += n;
}
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
else if (GinGetNPosting(itup) > 0)
{
entry->list = ginReadTuple(ginstate, entry->attnum, itup,
- &entry->nlist);
+ &entry->nlist);
entry->predictNumberResult = entry->nlist;
entry->isFinished = FALSE;
* considerably, if the frequent term can be put in the additional set.
*
* There can be many legal ways to divide them entries into these two
- * sets. A conservative division is to just put everything in the
- * required set, but the more you can put in the additional set, the more
- * you can skip during the scan. To maximize skipping, we try to put as
- * many frequent items as possible into additional, and less frequent
- * ones into required. To do that, sort the entries by frequency
+ * sets. A conservative division is to just put everything in the required
+ * set, but the more you can put in the additional set, the more you can
+ * skip during the scan. To maximize skipping, we try to put as many
+ * frequent items as possible into additional, and less frequent ones into
+ * required. To do that, sort the entries by frequency
* (predictNumberResult), and put entries into the required set in that
* order, until the consistent function says that none of the remaining
* entries can form a match, without any items from the required set. The
if (stepright)
{
/*
- * We've processed all the entries on this page. If it was the last
- * page in the tree, we're done.
+ * We've processed all the entries on this page. If it was the
+ * last page in the tree, we're done.
*/
if (GinPageRightMost(page))
{
}
/*
- * Step to next page, following the right link. then find the first
- * ItemPointer greater than advancePast.
+ * Step to next page, following the right link. then find the
+ * first ItemPointer greater than advancePast.
*/
entry->buffer = ginStepRight(entry->buffer,
ginstate->index,
stepright = true;
if (GinPageGetOpaque(page)->flags & GIN_DELETED)
- continue; /* page was deleted by concurrent vacuum */
+ continue; /* page was deleted by concurrent vacuum */
/*
* The first item > advancePast might not be on this page, but
gotitem = true;
break;
}
+
/*
* Not a lossy page. Skip over any offsets <= advancePast, and
* return that.
if (entry->matchResult->blockno == advancePastBlk)
{
/*
- * First, do a quick check against the last offset on the page.
- * If that's > advancePast, so are all the other offsets.
+ * First, do a quick check against the last offset on the
+ * page. If that's > advancePast, so are all the other
+ * offsets.
*/
if (entry->matchResult->offsets[entry->matchResult->ntuples - 1] <= advancePastOff)
{
/*
* We might have already tested this item; if so, no need to repeat work.
- * (Note: the ">" case can happen, if advancePast is exact but we previously
- * had to set curItem to a lossy-page pointer.)
+ * (Note: the ">" case can happen, if advancePast is exact but we
+ * previously had to set curItem to a lossy-page pointer.)
*/
if (ginCompareItemPointers(&key->curItem, &advancePast) > 0)
return;
/*
* Ok, we now know that there are no matches < minItem.
*
- * If minItem is lossy, it means that there were no exact items on
- * the page among requiredEntries, because lossy pointers sort after exact
+ * If minItem is lossy, it means that there were no exact items on the
+ * page among requiredEntries, because lossy pointers sort after exact
* items. However, there might be exact items for the same page among
* additionalEntries, so we mustn't advance past them.
*/
if (entry->isFinished)
key->entryRes[i] = GIN_FALSE;
#if 0
+
/*
* This case can't currently happen, because we loaded all the entries
* for this item earlier.
break;
default:
+
/*
* the 'default' case shouldn't happen, but if the consistent
* function returns something bogus, this is the safe result
}
/*
- * We have a tuple, and we know if it matches or not. If it's a
- * non-match, we could continue to find the next matching tuple, but
- * let's break out and give scanGetItem a chance to advance the other
- * keys. They might be able to skip past to a much higher TID, allowing
- * us to save work.
+ * We have a tuple, and we know if it matches or not. If it's a non-match,
+ * we could continue to find the next matching tuple, but let's break out
+ * and give scanGetItem a chance to advance the other keys. They might be
+ * able to skip past to a much higher TID, allowing us to save work.
*/
/* clean up after consistentFn calls */
* matching item.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
}
/*
- * It's a match. We can conclude that nothing < matches, so
- * the other key streams can skip to this item.
+ * It's a match. We can conclude that nothing < matches, so the
+ * other key streams can skip to this item.
*
- * Beware of lossy pointers, though; from a lossy pointer, we
- * can only conclude that nothing smaller than this *block*
- * matches.
+ * Beware of lossy pointers, though; from a lossy pointer, we can
+ * only conclude that nothing smaller than this *block* matches.
*/
if (ItemPointerIsLossyPage(&key->curItem))
{
}
/*
- * If this is the first key, remember this location as a
- * potential match, and proceed to check the rest of the keys.
+ * If this is the first key, remember this location as a potential
+ * match, and proceed to check the rest of the keys.
*
* Otherwise, check if this is the same item that we checked the
* previous keys for (or a lossy pointer for the same page). If
if (ItemPointerIsLossyPage(&key->curItem) ||
ItemPointerIsLossyPage(item))
{
- Assert (GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
+ Assert(GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
match = (GinItemPointerGetBlockNumber(&key->curItem) ==
GinItemPointerGetBlockNumber(item));
}
/*
* Now *item contains the first ItemPointer after previous result that
- * satisfied all the keys for that exact TID, or a lossy reference
- * to the same page.
+ * satisfied all the keys for that exact TID, or a lossy reference to the
+ * same page.
*
* We must return recheck = true if any of the keys are marked recheck.
*/
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
* Maximum number of MAYBE inputs that shimTriConsistentFn will try to
* resolve by calling all combinations.
*/
-#define MAX_MAYBE_ENTRIES 4
+#define MAX_MAYBE_ENTRIES 4
/*
* Dummy consistent functions for an EVERYTHING key. Just claim it matches.
directTriConsistentFn(GinScanKey key)
{
return DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
}
shimBoolConsistentFn(GinScanKey key)
{
GinTernaryValue result;
+
result = DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
if (result == GIN_MAYBE)
{
key->boolConsistentFn = shimBoolConsistentFn;
if (OidIsValid(ginstate->triConsistentFn[key->attnum - 1].fn_oid))
- key->triConsistentFn = directTriConsistentFn;
+ key->triConsistentFn = directTriConsistentFn;
else
- key->triConsistentFn = shimTriConsistentFn;
+ key->triConsistentFn = shimTriConsistentFn;
}
}
static uint64
decode_varbyte(unsigned char **ptr)
{
- uint64 val;
+ uint64 val;
unsigned char *p = *ptr;
- uint64 c;
+ uint64 c;
c = *(p++);
val = c & 0x7F;
uint64 val = itemptr_to_uint64(&ipd[totalpacked]);
uint64 delta = val - prev;
- Assert (val > prev);
+ Assert(val > prev);
if (endptr - ptr >= 6)
encode_varbyte(delta, &ptr);
encode_varbyte(delta, &p);
if (p - buf > (endptr - ptr))
- break; /* output is full */
+ break; /* output is full */
memcpy(ptr, buf, p - buf);
ptr += (p - buf);
ItemPointer
ginPostingListDecodeAllSegments(GinPostingList *segment, int len, int *ndecoded_out)
{
- ItemPointer result;
+ ItemPointer result;
int nallocated;
uint64 val;
char *endseg = ((char *) segment) + len;
TIDBitmap *tbm)
{
int ndecoded;
- ItemPointer items;
+ ItemPointer items;
items = ginPostingListDecodeAllSegments(ptr, len, &ndecoded);
tbm_add_tuples(tbm, items, ndecoded, false);
dst = (ItemPointer) palloc((na + nb) * sizeof(ItemPointerData));
/*
- * If the argument arrays don't overlap, we can just append them to
- * each other.
+ * If the argument arrays don't overlap, we can just append them to each
+ * other.
*/
if (na == 0 || nb == 0 || ginCompareItemPointers(&a[na - 1], &b[0]) < 0)
{
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
fmgr_info_copy(&(state->extractQueryFn[i]),
index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC),
CurrentMemoryContext);
+
/*
* Check opclass capability to do tri-state or binary logic consistent
* check.
if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->triConsistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
CurrentMemoryContext);
}
if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->consistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
CurrentMemoryContext);
}
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
{
int i,
remaining = 0;
- ItemPointer tmpitems = NULL;
+ ItemPointer tmpitems = NULL;
/*
* Iterate over TIDs array
}
/*
- * if we have root and there are empty pages in tree, then we don't release
- * lock to go further processing and guarantee that tree is unused
+ * if we have root and there are empty pages in tree, then we don't
+ * release lock to go further processing and guarantee that tree is unused
*/
if (!(isRoot && hasVoidPage))
{
Buffer pBuffer;
Page page,
parentPage;
- BlockNumber rightlink;
+ BlockNumber rightlink;
/*
* Lock the pages in the same order as an insertion would, to avoid
data.rightLink = GinPageGetOpaque(page)->rightlink;
/*
- * We can't pass buffer_std = TRUE, because we didn't set pd_lower
- * on pre-9.4 versions. The page might've been binary-upgraded from
- * an older version, and hence not have pd_lower set correctly.
- * Ditto for the left page, but removing the item from the parent
- * updated its pd_lower, so we know that's OK at this point.
+ * We can't pass buffer_std = TRUE, because we didn't set pd_lower on
+ * pre-9.4 versions. The page might've been binary-upgraded from an
+ * older version, and hence not have pd_lower set correctly. Ditto for
+ * the left page, but removing the item from the parent updated its
+ * pd_lower, so we know that's OK at this point.
*/
rdata[0].buffer = dBuffer;
rdata[0].buffer_std = FALSE;
}
/*
- * if we already created a temporary page, make changes in place
+ * if we already created a temporary page, make changes in
+ * place
*/
if (tmppage == origpage)
{
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
- ForkNumber forknum;
+ ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
payload = XLogRecGetData(record) + sizeof(ginxlogInsert);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split.
+ * First clear incomplete-split flag on child page if this finishes a
+ * split.
*/
if (!isLeaf)
{
payload = XLogRecGetData(record) + sizeof(ginxlogSplit);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split
+ * First clear incomplete-split flag on child page if this finishes a
+ * split
*/
if (!isLeaf)
{
if (isRoot)
{
- BlockNumber rootBlkno = data->rrlink;
+ BlockNumber rootBlkno = data->rrlink;
Buffer rootBuf = XLogReadBuffer(data->node, rootBlkno, true);
Page rootPage = BufferGetPage(rootBuf);
Buffer buffer;
/*
- * Restore the metapage. This is essentially the same as a full-page image,
- * so restore the metapage unconditionally without looking at the LSN, to
- * avoid torn page hazards.
+ * Restore the metapage. This is essentially the same as a full-page
+ * image, so restore the metapage unconditionally without looking at the
+ * LSN, to avoid torn page hazards.
*/
metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false);
if (!BufferIsValid(metabuffer))
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
- * minimum possible distances. This means we'll always follow it to the
+ * minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited in
- * LIFO order, ensuring depth-first search of index pages. See comments
+ * LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2Coll(&giststate->picksplitFn[attno],
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
stats->estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
for (ptr = dist; ptr; ptr = ptr->next)
npage++;
+
/*
* the caller should've checked this already, but doesn't hurt to check
* again.
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
* src/backend/access/hash/hashfunc.c
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
HeapTuple newtup, HeapTuple old_key_tup,
bool all_visible_cleared, bool new_all_visible_cleared);
static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
- Bitmapset *hot_attrs,
- Bitmapset *key_attrs, Bitmapset *id_attrs,
- bool *satisfies_hot, bool *satisfies_key,
- bool *satisfies_id,
- HeapTuple oldtup, HeapTuple newtup);
+ Bitmapset *hot_attrs,
+ Bitmapset *key_attrs, Bitmapset *id_attrs,
+ bool *satisfies_hot, bool *satisfies_key,
+ bool *satisfies_id,
+ HeapTuple oldtup, HeapTuple newtup);
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
uint16 old_infomask2, TransactionId add_to_xmax,
LockTupleMode mode, bool is_update,
XLTW_Oper oper, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
- bool *copy);
+ bool *copy);
/*
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
}
/*
- * Be sure to check for interrupts at least once per page. Checks at
+ * Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
- * found. (Note that some other causes, such as permissions problems,
+ * found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
/*
* When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
+ * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously.
*
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
bool need_tuple_data;
/*
- * For logical decoding, we need the tuple even if we're doing a
- * full page write, so make sure to log it separately. (XXX We could
+ * For logical decoding, we need the tuple even if we're doing a full
+ * page write, so make sure to log it separately. (XXX We could
* alternatively store a pointer into the FPW).
*
* Also, if this is a catalog, we need to transmit combocids to
rdata[2].next = NULL;
/*
- * Make a separate rdata entry for the tuple's buffer if we're
- * doing logical decoding, so that an eventual FPW doesn't
- * remove the tuple's data.
+ * Make a separate rdata entry for the tuple's buffer if we're doing
+ * logical decoding, so that an eventual FPW doesn't remove the
+ * tuple's data.
*/
if (need_tuple_data)
{
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
int nthispage;
/*
-