-b
- --blobs
+ --large-objects
+ --blobs (deprecated)
Include large objects in the dump. This is the default behavior
--schema-only is specified. The -b
switch is therefore only useful to add large objects to dumps
where a specific schema or table has been requested. Note that
- blob s are considered data and therefore will be included when
+ large object s are considered data and therefore will be included when
--data-only is used, but not
when --schema-only is.
-B
- --no-blobs
+ --no-large-objects
+ --no-blobs (deprecated)
Exclude large objects in the dump.
Output a directory-format archive suitable for input into
pg_restore . This will create a directory
- with one file for each table and blob being dumped, plus a
+ with one file for each table and large object being dumped, plus a
so-called Table of Contents file describing the dumped objects in a
machine-readable format that
pg_restore
can read. A directory format archive can be manipulated with
- Non-schema objects such as blob s are not dumped when -n is
- specified. You can add blob s back to the dump with the
- --blob s switch.
+ Non-schema objects such as large object s are not dumped when -n is
+ specified. You can add large object s back to the dump with the
+ --large-object s switch.
typedef enum _teSection
{
- SECTION_NONE = 1, /* COMMENT s, ACLs, etc; can be anywhere */
+ SECTION_NONE = 1, /* comment s, ACLs, etc; can be anywhere */
SECTION_PRE_DATA, /* stuff to be processed before data */
- SECTION_DATA, /* TABLE DATA, BLOBS, BLOB COMMENTS */
+ SECTION_DATA, /* table data, large objects, LO comments */
SECTION_POST_DATA /* stuff to be processed after data */
} teSection;
int outputClean;
int outputCreateDB;
- bool outputBlob s;
- bool dontOutputBlob s;
+ bool outputLO s;
+ bool dontOutputLO s;
int outputNoOwner;
char *outputSuperuser;
/* Called to write *data* to the archive */
extern void WriteData(Archive *AHX, const void *data, size_t dLen);
-extern int StartBlob (Archive *AHX, Oid oid);
-extern int EndBlob (Archive *AHX, Oid oid);
+extern int StartLO (Archive *AHX, Oid oid);
+extern int EndLO (Archive *AHX, Oid oid);
extern void CloseArchive(Archive *AHX);
*/
if (strncmp(te->desc, "BLOB", 4) == 0)
{
- DropBlob IfExists(AH, te->catalogId.oid);
+ DropLO IfExists(AH, te->catalogId.oid);
}
else
{
}
/***********
- * BLOB Archival
+ * Large Object Archival
***********/
-/* Called by a dumper to signal start of a BLOB */
+/* Called by a dumper to signal start of a LO */
int
-StartBlob (Archive *AHX, Oid oid)
+StartLO (Archive *AHX, Oid oid)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
- if (!AH->StartBlob Ptr)
+ if (!AH->StartLO Ptr)
pg_fatal("large-object output not supported in chosen format");
- AH->StartBlob Ptr(AH, AH->currToc, oid);
+ AH->StartLO Ptr(AH, AH->currToc, oid);
return 1;
}
-/* Called by a dumper to signal end of a BLOB */
+/* Called by a dumper to signal end of a LO */
int
-EndBlob (Archive *AHX, Oid oid)
+EndLO (Archive *AHX, Oid oid)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
- if (AH->EndBlob Ptr)
- AH->EndBlob Ptr(AH, AH->currToc, oid);
+ if (AH->EndLO Ptr)
+ AH->EndLO Ptr(AH, AH->currToc, oid);
return 1;
}
/**********
- * BLOB Restoration
+ * Large Object Restoration
**********/
/*
- * Called by a format handler before any blob s are restored
+ * Called by a format handler before any LO s are restored
*/
void
-StartRestoreBlob s(ArchiveHandle *AH)
+StartRestoreLO s(ArchiveHandle *AH)
{
RestoreOptions *ropt = AH->public.ropt;
ahprintf(AH, "BEGIN;\n\n");
}
- AH->blob Count = 0;
+ AH->lo Count = 0;
}
/*
- * Called by a format handler after all blob s are restored
+ * Called by a format handler after all LO s are restored
*/
void
-EndRestoreBlob s(ArchiveHandle *AH)
+EndRestoreLO s(ArchiveHandle *AH)
{
RestoreOptions *ropt = AH->public.ropt;
pg_log_info(ngettext("restored %d large object",
"restored %d large objects",
- AH->blob Count),
- AH->blob Count);
+ AH->lo Count),
+ AH->lo Count);
}
/*
- * Called by a format handler to initiate restoration of a blob
+ * Called by a format handler to initiate restoration of a LO
*/
void
-StartRestoreBlob (ArchiveHandle *AH, Oid oid, bool drop)
+StartRestoreLO (ArchiveHandle *AH, Oid oid, bool drop)
{
- bool old_blob _style = (AH->version < K_VERS_1_12);
+ bool old_lo _style = (AH->version < K_VERS_1_12);
Oid loOid;
- AH->blob Count++;
+ AH->lo Count++;
/* Initialize the LO Buffer */
AH->lo_buf_used = 0;
pg_log_info("restoring large object with OID %u", oid);
/* With an old archive we must do drop and create logic here */
- if (old_blob _style && drop)
- DropBlob IfExists(AH, oid);
+ if (old_lo _style && drop)
+ DropLO IfExists(AH, oid);
if (AH->connection)
{
- if (old_blob _style)
+ if (old_lo _style)
{
loOid = lo_create(AH->connection, oid);
if (loOid == 0 || loOid != oid)
}
else
{
- if (old_blob _style)
+ if (old_lo _style)
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
oid, INV_WRITE);
else
oid, INV_WRITE);
}
- AH->writingBlob = 1 ;
+ AH->writingLO = true ;
}
void
-EndRestoreBlob (ArchiveHandle *AH, Oid oid)
+EndRestoreLO (ArchiveHandle *AH, Oid oid)
{
if (AH->lo_buf_used > 0)
{
dump_lo_buf(AH);
}
- AH->writingBlob = 0 ;
+ AH->writingLO = false ;
if (AH->connection)
{
}
/*
- * Dump the current contents of the LO data buffer while writing a BLOB
+ * Dump the current contents of the LO data buffer while writing a LO
*/
static void
dump_lo_buf(ArchiveHandle *AH)
AH->lo_buf_used,
AH);
- /* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
- AH->writingBlob = 0 ;
+ /* Hack: turn off writingLO so ahwrite doesn't recurse to here */
+ AH->writingLO = false ;
ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
- AH->writingBlob = 1 ;
+ AH->writingLO = true ;
destroyPQExpBuffer(buf);
}
{
int bytes_written = 0;
- if (AH->writingBlob )
+ if (AH->writingLO )
{
size_t remaining = size * nmemb;
}
/*
- * Write out all data (tables & blob s)
+ * Write out all data (tables & LO s)
*/
void
WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
if (strcmp(te->desc, "BLOBS") == 0)
{
- startPtr = AH->StartBlob sPtr;
- endPtr = AH->EndBlob sPtr;
+ startPtr = AH->StartLO sPtr;
+ endPtr = AH->EndLO sPtr;
}
else
{
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' or anything to do with BLOB s, then
+ * Special Case: If 'SEQUENCE SET' or anything to do with LO s, then
* it is considered a data entry. We don't need to check for the
* BLOBS entry or old-style BLOB COMMENTS, because they will have
* hadDumper = true ... but we do need to check new-style BLOB ACLs,
appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
appendPQExpBufferStr(buf, fmtId(te->tag));
}
- /* BLOB s just have a name, but it's numeric so must not use fmtId */
+ /* LO s just have a name, but it's numeric so must not use fmtId */
else if (strcmp(type, "BLOB") == 0)
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
/* Historical version numbers (checked in code) */
#define K_VERS_1_0 MAKE_ARCHIVE_VERSION(1, 0, 0)
#define K_VERS_1_2 MAKE_ARCHIVE_VERSION(1, 2, 0) /* Allow No ZLIB */
-#define K_VERS_1_3 MAKE_ARCHIVE_VERSION(1, 3, 0) /* BLOBs */
+#define K_VERS_1_3 MAKE_ARCHIVE_VERSION(1, 3, 0) /* BLOBS */
#define K_VERS_1_4 MAKE_ARCHIVE_VERSION(1, 4, 0) /* Date & name in header */
#define K_VERS_1_5 MAKE_ARCHIVE_VERSION(1, 5, 0) /* Handle dependencies */
#define K_VERS_1_6 MAKE_ARCHIVE_VERSION(1, 6, 0) /* Schema field in TOCs */
typedef void (*WriteDataPtrType) (ArchiveHandle *AH, const void *data, size_t dLen);
typedef void (*EndDataPtrType) (ArchiveHandle *AH, TocEntry *te);
-typedef void (*StartBlob sPtrType) (ArchiveHandle *AH, TocEntry *te);
-typedef void (*StartBlob PtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
-typedef void (*EndBlob PtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
-typedef void (*EndBlob sPtrType) (ArchiveHandle *AH, TocEntry *te);
+typedef void (*StartLO sPtrType) (ArchiveHandle *AH, TocEntry *te);
+typedef void (*StartLO PtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
+typedef void (*EndLO PtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
+typedef void (*EndLO sPtrType) (ArchiveHandle *AH, TocEntry *te);
typedef int (*WriteBytePtrType) (ArchiveHandle *AH, const int i);
typedef int (*ReadBytePtrType) (ArchiveHandle *AH);
PrintExtraTocPtrType PrintExtraTocPtr; /* Extra TOC info for format */
PrintTocDataPtrType PrintTocDataPtr;
- StartBlobsPtrType StartBlob sPtr;
- EndBlobsPtrType EndBlob sPtr;
- StartBlobPtrType StartBlob Ptr;
- EndBlobPtrType EndBlob Ptr;
+ StartLOsPtrType StartLO sPtr;
+ EndLOsPtrType EndLO sPtr;
+ StartLOPtrType StartLO Ptr;
+ EndLOPtrType EndLO Ptr;
SetupWorkerPtrType SetupWorkerPtr;
WorkerJobDumpPtrType WorkerJobDumpPtr;
ArchiverOutput outputKind; /* Flag for what we're currently writing */
bool pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
- int loFd; /* BLOB fd */
- int writingBlob; /* Flag */
- int blobCount; /* # of blob s restored */
+ int loFd;
+ bool writingLO;
+ int loCount; /* # of LO s restored */
char *fSpec; /* Archive File Spec */
FILE *FH; /* General purpose file handle */
int ReadOffset(ArchiveHandle *, pgoff_t *);
size_t WriteOffset(ArchiveHandle *, pgoff_t, int);
-extern void StartRestoreBlob s(ArchiveHandle *AH);
-extern void StartRestoreBlob (ArchiveHandle *AH, Oid oid, bool drop);
-extern void EndRestoreBlob (ArchiveHandle *AH, Oid oid);
-extern void EndRestoreBlob s(ArchiveHandle *AH);
+extern void StartRestoreLO s(ArchiveHandle *AH);
+extern void StartRestoreLO (ArchiveHandle *AH, Oid oid, bool drop);
+extern void EndRestoreLO (ArchiveHandle *AH, Oid oid);
+extern void EndRestoreLO s(ArchiveHandle *AH);
extern void InitArchiveFmt_Custom(ArchiveHandle *AH);
extern void InitArchiveFmt_Null(ArchiveHandle *AH);
extern bool isValidTarHeader(char *header);
extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname);
-extern void DropBlob IfExists(ArchiveHandle *AH, Oid oid);
+extern void DropLO IfExists(ArchiveHandle *AH, Oid oid);
void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
int ahprintf(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3);
static void _PrintData(ArchiveHandle *AH);
static void _skipData(ArchiveHandle *AH);
-static void _skipBlob s(ArchiveHandle *AH);
+static void _skipLO s(ArchiveHandle *AH);
-static void _StartBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _LoadBlob s(ArchiveHandle *AH, bool drop);
+static void _StartLO s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO s(ArchiveHandle *AH, TocEntry *te);
+static void _LoadLO s(ArchiveHandle *AH, bool drop);
static void _PrepParallelRestore(ArchiveHandle *AH);
static void _Clone(ArchiveHandle *AH);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlob s;
- AH->StartBlobPtr = _StartBlob ;
- AH->EndBlobPtr = _EndBlob ;
- AH->EndBlobsPtr = _EndBlob s;
+ AH->StartLOsPtr = _StartLO s;
+ AH->StartLOPtr = _StartLO ;
+ AH->EndLOPtr = _EndLO ;
+ AH->EndLOsPtr = _EndLO s;
AH->PrepParallelRestorePtr = _PrepParallelRestore;
AH->ClonePtr = _Clone;
/*
* Called by archiver when dumper calls WriteData. This routine is
- * called for both BLOB and TABLE data; it is the responsibility of
- * the format to manage each kind of data using StartBlob /StartData.
+ * called for both LO and table data; it is the responsibility of
+ * the format to manage each kind of data using StartLO /StartData.
*
* It should only be called from within a DataDumper routine.
*
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOB s back into memory.
+ * to read the LO s back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*/
static void
-_StartBlob s(ArchiveHandle *AH, TocEntry *te)
+_StartLO s(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
}
/*
- * Called by the archiver when the dumper calls StartBlob .
+ * Called by the archiver when the dumper calls StartLO .
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
}
/*
- * Called by the archiver when the dumper calls EndBlob .
+ * Called by the archiver when the dumper calls EndLO .
*
* Optional.
*/
static void
-_EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
* Optional.
*/
static void
-_EndBlob s(ArchiveHandle *AH, TocEntry *te)
+_EndLO s(ArchiveHandle *AH, TocEntry *te)
{
- /* Write out a fake zero OID to mark end-of-blob s. */
+ /* Write out a fake zero OID to mark end-of-LO s. */
WriteInt(AH, 0);
}
break;
case BLK_BLOBS:
- _skipBlob s(AH);
+ _skipLO s(AH);
break;
default: /* Always have a default */
break;
case BLK_BLOBS:
- _LoadBlob s(AH, AH->public.ropt->dropSchema);
+ _LoadLO s(AH, AH->public.ropt->dropSchema);
break;
default: /* Always have a default */
}
static void
-_LoadBlob s(ArchiveHandle *AH, bool drop)
+_LoadLO s(ArchiveHandle *AH, bool drop)
{
Oid oid;
- StartRestoreBlob s(AH);
+ StartRestoreLO s(AH);
oid = ReadInt(AH);
while (oid != 0)
{
- StartRestoreBlob (AH, oid, drop);
+ StartRestoreLO (AH, oid, drop);
_PrintData(AH);
- EndRestoreBlob (AH, oid);
+ EndRestoreLO (AH, oid);
oid = ReadInt(AH);
}
- EndRestoreBlob s(AH);
+ EndRestoreLO s(AH);
}
/*
- * Skip the BLOB s from the current file position.
- * BLOBS are written sequentially as data blocks (see below).
- * Each BLOB is preceded by its original OID.
- * A zero OID indicates the end of the BLOBS .
+ * Skip the LO s from the current file position.
+ * LOs are written sequentially as data blocks (see below).
+ * Each LO is preceded by its original OID.
+ * A zero OID indicates the end of the LOs .
*/
static void
-_skipBlob s(ArchiveHandle *AH)
+_skipLO s(ArchiveHandle *AH)
{
Oid oid;
* If an archive is to be written, this routine must call:
* WriteHead to save the archive header
* WriteToc to save the TOC entries
- * WriteDataChunks to save all DATA & BLOB s.
+ * WriteDataChunks to save all data & LO s.
*
*/
static void
}
void
-DropBlob IfExists(ArchiveHandle *AH, Oid oid)
+DropLO IfExists(ArchiveHandle *AH, Oid oid)
{
/*
* If we are not restoring to a direct database connection, we have to
- * guess about how to detect whether the blob exists. Assume new-style.
+ * guess about how to detect whether the LO exists. Assume new-style.
*/
if (AH->connection == NULL ||
PQserverVersion(AH->connection) >= 90000)
*
* A directory format dump is a directory, which contains a "toc.dat" file
* for the TOC, and a separate file for each data entry, named ".dat".
- * Large objects (BLOBs) are stored in separate files named "blob_.dat",
+ * Large objects are stored in separate files named "blob_.dat",
* and there's a plain-text TOC file for them called "blobs.toc". If
* compression is used, each data file is individually compressed and the
* ".gz" suffix is added to the filenames. The TOC files are never
cfp *dataFH; /* currently open data file */
- cfp *blobsTocFH; /* file handle for blobs.toc */
+ cfp *LOsTocFH; /* file handle for blobs.toc */
ParallelState *pstate; /* for parallel backup / restore */
} lclContext;
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _LoadBlob s(ArchiveHandle *AH);
+static void _StartLO s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO s(ArchiveHandle *AH, TocEntry *te);
+static void _LoadLO s(ArchiveHandle *AH);
static void _PrepParallelRestore(ArchiveHandle *AH);
static void _Clone(ArchiveHandle *AH);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlob s;
- AH->StartBlobPtr = _StartBlob ;
- AH->EndBlobPtr = _EndBlob ;
- AH->EndBlobsPtr = _EndBlob s;
+ AH->StartLOsPtr = _StartLO s;
+ AH->StartLOPtr = _StartLO ;
+ AH->EndLOPtr = _EndLO ;
+ AH->EndLOsPtr = _EndLO s;
AH->PrepParallelRestorePtr = _PrepParallelRestore;
AH->ClonePtr = _Clone;
AH->formatData = (void *) ctx;
ctx->dataFH = NULL;
- ctx->blob sTocFH = NULL;
+ ctx->LO sTocFH = NULL;
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;
/*
* Called by archiver when dumper calls WriteData. This routine is
- * called for both BLOB and TABLE data; it is the responsibility of
- * the format to manage each kind of data using StartBlob /StartData.
+ * called for both LO and table data; it is the responsibility of
+ * the format to manage each kind of data using StartLO /StartData.
*
* It should only be called from within a DataDumper routine.
*
}
/*
- * Print data for a given file (can be a BLOB as well)
+ * Print data for a given file (can be a LO as well)
*/
static void
_PrintFileData(ArchiveHandle *AH, char *filename)
return;
if (strcmp(te->desc, "BLOBS") == 0)
- _LoadBlob s(AH);
+ _LoadLO s(AH);
else
{
char fname[MAXPGPATH];
}
static void
-_LoadBlob s(ArchiveHandle *AH)
+_LoadLO s(ArchiveHandle *AH)
{
Oid oid;
lclContext *ctx = (lclContext *) AH->formatData;
char tocfname[MAXPGPATH];
char line[MAXPGPATH];
- StartRestoreBlob s(AH);
+ StartRestoreLO s(AH);
setFilePath(AH, tocfname, "blobs.toc");
- ctx->blob sTocFH = cfopen_read(tocfname, PG_BINARY_R);
+ ctx->LO sTocFH = cfopen_read(tocfname, PG_BINARY_R);
- if (ctx->blob sTocFH == NULL)
+ if (ctx->LO sTocFH == NULL)
pg_fatal("could not open large object TOC file \"%s\" for input: %m",
tocfname);
- /* Read the blobs TOC file line-by-line, and process each blob */
- while ((cfgets(ctx->blob sTocFH, line, MAXPGPATH)) != NULL)
+ /* Read the LOs TOC file line-by-line, and process each LO */
+ while ((cfgets(ctx->LO sTocFH, line, MAXPGPATH)) != NULL)
{
- char blob fname[MAXPGPATH + 1];
+ char lo fname[MAXPGPATH + 1];
char path[MAXPGPATH];
- /* Can't overflow because line and blob fname are the same length */
- if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, blob fname) != 2)
+ /* Can't overflow because line and lo fname are the same length */
+ if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, lo fname) != 2)
pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"",
tocfname, line);
- StartRestoreBlob (AH, oid, AH->public.ropt->dropSchema);
- snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, blob fname);
+ StartRestoreLO (AH, oid, AH->public.ropt->dropSchema);
+ snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, lo fname);
_PrintFileData(AH, path);
- EndRestoreBlob (AH, oid);
+ EndRestoreLO (AH, oid);
}
- if (!cfeof(ctx->blob sTocFH))
+ if (!cfeof(ctx->LO sTocFH))
pg_fatal("error reading large object TOC file \"%s\"",
tocfname);
- if (cfclose(ctx->blob sTocFH) != 0)
+ if (cfclose(ctx->LO sTocFH) != 0)
pg_fatal("could not close large object TOC file \"%s\": %m",
tocfname);
- ctx->blob sTocFH = NULL;
+ ctx->LO sTocFH = NULL;
- EndRestoreBlob s(AH);
+ EndRestoreLO s(AH);
}
* If an archive is to be written, this routine must call:
* WriteHead to save the archive header
* WriteToc to save the TOC entries
- * WriteDataChunks to save all DATA & BLOB s.
+ * WriteDataChunks to save all data & LO s.
*/
static void
_CloseArchive(ArchiveHandle *AH)
}
/*
- * BLOB support
+ * LO support
*/
/*
* It is called just prior to the dumper's DataDumper routine.
*
* We open the large object TOC file here, so that we can append a line to
- * it for each blob .
+ * it for each LO .
*/
static void
-_StartBlob s(ArchiveHandle *AH, TocEntry *te)
+_StartLO s(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
pg_compress_specification compression_spec = {0};
setFilePath(AH, fname, "blobs.toc");
- /* The blob TOC file is never compressed */
+ /* The LO TOC file is never compressed */
compression_spec.algorithm = PG_COMPRESSION_NONE;
- ctx->blob sTocFH = cfopen_write(fname, "ab", compression_spec);
- if (ctx->blob sTocFH == NULL)
+ ctx->LO sTocFH = cfopen_write(fname, "ab", compression_spec);
+ if (ctx->LO sTocFH == NULL)
pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
- * Called by the archiver when we're about to start dumping a blob .
+ * Called by the archiver when we're about to start dumping a LO .
*
- * We create a file to write the blob to.
+ * We create a file to write the LO to.
*/
static void
-_StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
char fname[MAXPGPATH];
}
/*
- * Called by the archiver when the dumper is finished writing a blob .
+ * Called by the archiver when the dumper is finished writing a LO .
*
- * We close the blob file and write an entry to the blob TOC file for it.
+ * We close the LO file and write an entry to the LO TOC file for it.
*/
static void
-_EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
char buf[50];
int len;
- /* Close the BLOB data file itself */
+ /* Close the LO data file itself */
if (cfclose(ctx->dataFH) != 0)
- pg_fatal("could not close blob data file: %m");
+ pg_fatal("could not close LO data file: %m");
ctx->dataFH = NULL;
- /* register the blob in blobs.toc */
+ /* register the LO in blobs.toc */
len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
- if (cfwrite(buf, len, ctx->blob sTocFH) != len)
- pg_fatal("could not write to blob s TOC file");
+ if (cfwrite(buf, len, ctx->LO sTocFH) != len)
+ pg_fatal("could not write to LO s TOC file");
}
/*
* Called by the archiver when finishing saving all BLOB DATA.
*
- * We close the blob s TOC file.
+ * We close the LO s TOC file.
*/
static void
-_EndBlob s(ArchiveHandle *AH, TocEntry *te)
+_EndLO s(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
- if (cfclose(ctx->blob sTocFH) != 0)
- pg_fatal("could not close blob s TOC file: %m");
- ctx->blob sTocFH = NULL;
+ if (cfclose(ctx->LO sTocFH) != 0)
+ pg_fatal("could not close LO s TOC file: %m");
+ ctx->LO sTocFH = NULL;
}
/*
* If this is the BLOBS entry, what we stat'd was blobs.toc, which
* most likely is a lot smaller than the actual blob data. We don't
* have a cheap way to estimate how much smaller, but fortunately it
- * doesn't matter too much as long as we get the blob s processed
+ * doesn't matter too much as long as we get the LO s processed
* reasonably early. Arbitrarily scale up by a factor of 1K.
*/
if (strcmp(te->desc, "BLOBS") == 0)
#include "pg_backup_utils.h"
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
-static void _WriteBlob Data(ArchiveHandle *AH, const void *data, size_t dLen);
+static void _WriteLO Data(ArchiveHandle *AH, const void *data, size_t dLen);
static void _EndData(ArchiveHandle *AH, TocEntry *te);
static int _WriteByte(ArchiveHandle *AH, const int i);
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
static void _CloseArchive(ArchiveHandle *AH);
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO s(ArchiveHandle *AH, TocEntry *te);
/*
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
- AH->StartBlobsPtr = _StartBlob s;
- AH->StartBlobPtr = _StartBlob ;
- AH->EndBlobPtr = _EndBlob ;
- AH->EndBlobsPtr = _EndBlob s;
+ AH->StartLOsPtr = _StartLO s;
+ AH->StartLOPtr = _StartLO ;
+ AH->EndLOPtr = _EndLO ;
+ AH->EndLOsPtr = _EndLO s;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
/*
* Called by dumper via archiver from within a data dump routine
- * We substitute this for _WriteData while emitting a BLOB
+ * We substitute this for _WriteData while emitting a LO
*/
static void
-_WriteBlob Data(ArchiveHandle *AH, const void *data, size_t dLen)
+_WriteLO Data(ArchiveHandle *AH, const void *data, size_t dLen)
{
if (dLen > 0)
{
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOB s back into memory.
+ * to read the LO s back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*/
static void
-_StartBlob s(ArchiveHandle *AH, TocEntry *te)
+_StartLO s(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "BEGIN;\n\n");
}
/*
- * Called by the archiver when the dumper calls StartBlob .
+ * Called by the archiver when the dumper calls StartLO .
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
- bool old_blob _style = (AH->version < K_VERS_1_12);
+ bool old_lo _style = (AH->version < K_VERS_1_12);
if (oid == 0)
pg_fatal("invalid OID for large object");
/* With an old archive we must do drop and create logic here */
- if (old_blob _style && AH->public.ropt->dropSchema)
- DropBlob IfExists(AH, oid);
+ if (old_lo _style && AH->public.ropt->dropSchema)
+ DropLO IfExists(AH, oid);
- if (old_blob _style)
+ if (old_lo _style)
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
oid, INV_WRITE);
else
ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
oid, INV_WRITE);
- AH->WriteDataPtr = _WriteBlob Data;
+ AH->WriteDataPtr = _WriteLO Data;
}
/*
- * Called by the archiver when the dumper calls EndBlob .
+ * Called by the archiver when the dumper calls EndLO .
*
* Optional.
*/
static void
-_EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
AH->WriteDataPtr = _WriteData;
* Optional.
*/
static void
-_EndBlob s(ArchiveHandle *AH, TocEntry *te)
+_EndLO s(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "COMMIT;\n\n");
}
AH->currToc = te;
if (strcmp(te->desc, "BLOBS") == 0)
- _StartBlob s(AH, te);
+ _StartLO s(AH, te);
te->dataDumper((Archive *) AH, te->dataDumperArg);
if (strcmp(te->desc, "BLOBS") == 0)
- _EndBlob s(AH, te);
+ _EndLO s(AH, te);
AH->currToc = NULL;
}
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob s(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO s(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO s(ArchiveHandle *AH, TocEntry *te);
#define K_STD_BUF_SIZE 1024
{
int hasSeek;
pgoff_t filePos;
- TAR_MEMBER *blob Toc;
+ TAR_MEMBER *lo Toc;
FILE *tarFH;
pgoff_t tarFHpos;
pgoff_t tarNextMember;
char *filename;
} lclTocEntry;
-static void _LoadBlob s(ArchiveHandle *AH);
+static void _LoadLO s(ArchiveHandle *AH);
static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
static void tarClose(ArchiveHandle *AH, TAR_MEMBER *th);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlob s;
- AH->StartBlobPtr = _StartBlob ;
- AH->EndBlobPtr = _EndBlob ;
- AH->EndBlobsPtr = _EndBlob s;
+ AH->StartLOsPtr = _StartLO s;
+ AH->StartLOPtr = _StartLO ;
+ AH->EndLOPtr = _EndLO ;
+ AH->EndLOsPtr = _EndLO s;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
}
if (strcmp(te->desc, "BLOBS") == 0)
- _LoadBlob s(AH);
+ _LoadLO s(AH);
else
_PrintFileData(AH, tctx->filename);
}
static void
-_LoadBlob s(ArchiveHandle *AH)
+_LoadLO s(ArchiveHandle *AH)
{
Oid oid;
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th;
size_t cnt;
- bool foundBlob = false;
+ bool foundLO = false;
char buf[4096];
- StartRestoreBlob s(AH);
+ StartRestoreLO s(AH);
th = tarOpen(AH, NULL, 'r'); /* Open next file */
while (th != NULL)
{
pg_log_info("restoring large object with OID %u", oid);
- StartRestoreBlob (AH, oid, AH->public.ropt->dropSchema);
+ StartRestoreLO (AH, oid, AH->public.ropt->dropSchema);
while ((cnt = tarRead(buf, 4095, th)) > 0)
{
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
}
- EndRestoreBlob (AH, oid);
- foundBlob = true;
+ EndRestoreLO (AH, oid);
+ foundLO = true;
}
tarClose(AH, th);
}
tarClose(AH, th);
/*
- * Once we have found the first blob, stop at the first non-blob
+ * Once we have found the first LO, stop at the first non-LO
* entry (which will be 'blobs.toc'). This coding would eat all
- * the rest of the archive if there are no blob s ... but this
+ * the rest of the archive if there are no LO s ... but this
* function shouldn't be called at all in that case.
*/
- if (foundBlob )
+ if (foundLO )
break;
}
th = tarOpen(AH, NULL, 'r');
}
- EndRestoreBlob s(AH);
+ EndRestoreLO s(AH);
}
tarClose(AH, th); /* Not needed any more */
/*
- * Now send the data (tables & blob s)
+ * Now send the data (tables & LO s)
*/
WriteDataChunks(AH, NULL);
}
/*
- * BLOB support
+ * Large Object support
*/
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOB s back into memory.
+ * to read the LO s back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
*
*/
static void
-_StartBlob s(ArchiveHandle *AH, TocEntry *te)
+_StartLO s(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
char fname[K_STD_BUF_SIZE];
sprintf(fname, "blobs.toc");
- ctx->blob Toc = tarOpen(AH, fname, 'w');
+ ctx->lo Toc = tarOpen(AH, fname, 'w');
}
/*
- * Called by the archiver when the dumper calls StartBlob .
+ * Called by the archiver when the dumper calls StartLO .
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
sprintf(fname, "blob_%u.dat", oid);
- tarPrintf(ctx->blob Toc, "%u %s\n", oid, fname);
+ tarPrintf(ctx->lo Toc, "%u %s\n", oid, fname);
tctx->TH = tarOpen(AH, fname, 'w');
}
/*
- * Called by the archiver when the dumper calls EndBlob .
+ * Called by the archiver when the dumper calls EndLO .
*
* Optional.
*
*/
static void
-_EndBlob (ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO (ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
*
*/
static void
-_EndBlob s(ArchiveHandle *AH, TocEntry *te)
+_EndLO s(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
- /* Write out a fake zero OID to mark end-of-blob s. */
+ /* Write out a fake zero OID to mark end-of-LO s. */
/* WriteInt(AH, 0); */
- tarClose(AH, ctx->blob Toc);
+ tarClose(AH, ctx->lo Toc);
}
static char *getFormattedOperatorName(const char *oproid);
static char *convertTSFunction(Archive *fout, Oid funcOid);
static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
-static void getBlob s(Archive *fout);
-static void dumpBlob(Archive *fout, const Blob Info *binfo);
-static int dumpBlob s(Archive *fout, const void *arg);
+static void getLO s(Archive *fout);
+static void dumpLO(Archive *fout, const Lo Info *binfo);
+static int dumpLO s(Archive *fout, const void *arg);
static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
static struct option long_options[] = {
{"data-only", no_argument, NULL, 'a'},
{"blobs", no_argument, NULL, 'b'},
+ {"large-objects", no_argument, NULL, 'b'},
{"no-blobs", no_argument, NULL, 'B'},
+ {"no-large-objects", no_argument, NULL, 'B'},
{"clean", no_argument, NULL, 'c'},
{"create", no_argument, NULL, 'C'},
{"dbname", required_argument, NULL, 'd'},
dopt.dataOnly = true;
break;
- case 'b': /* Dump blob s */
- dopt.outputBlob s = true;
+ case 'b': /* Dump LO s */
+ dopt.outputLO s = true;
break;
- case 'B': /* Don't dump blob s */
- dopt.dontOutputBlob s = true;
+ case 'B': /* Don't dump LO s */
+ dopt.dontOutputLO s = true;
break;
case 'c': /* clean (i.e., drop) schema prior to create */
}
/*
- * Dumping blob s is the default for dumps where an inclusion switch is not
- * used (an "include everything" dump). -B can be used to exclude blob s
- * from those dumps. -b can be used to include blob s even when an
+ * Dumping LO s is the default for dumps where an inclusion switch is not
+ * used (an "include everything" dump). -B can be used to exclude LO s
+ * from those dumps. -b can be used to include LO s even when an
* inclusion switch is used.
*
- * -s means "schema only" and blob s are data, not schema, so we never
- * include blob s when -s is used.
+ * -s means "schema only" and LO s are data, not schema, so we never
+ * include LO s when -s is used.
*/
- if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlob s)
- dopt.outputBlob s = true;
+ if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputLO s)
+ dopt.outputLO s = true;
/*
* Collect role names so we can map object owner OIDs to names.
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
- * In binary-upgrade mode, we do not have to worry about the actual blob
+ * In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
- * However, we do need to collect blob information as there may be
- * comments or other information on blob s that we do need to dump out.
+ * However, we do need to collect LO information as there may be
+ * comments or other information on LO s that we do need to dump out.
*/
- if (dopt.outputBlob s || dopt.binary_upgrade)
- getBlob s(fout);
+ if (dopt.outputLO s || dopt.binary_upgrade)
+ getLO s(fout);
/*
* Collect dependency data to assist in ordering the objects.
printf(_("\nOptions controlling the output content:\n"));
printf(_(" -a, --data-only dump only the data, not the schema\n"));
- printf(_(" -b, --blobs include large objects in dump\n"));
- printf(_(" -B, --no-blobs exclude large objects in dump\n"));
+ printf(_(" -b, --large-objects, --blobs\n"
+ " include large objects in dump\n"));
+ printf(_(" -B, --no-large-objects, --no-blobs\n"
+ " exclude large objects in dump\n"));
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create include commands to create database in dump\n"));
printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
/*
- * getBlob s:
+ * getLO s:
* Collect schema-level data about large objects
*/
static void
-getBlob s(Archive *fout)
+getLO s(Archive *fout)
{
DumpOptions *dopt = fout->dopt;
- PQExpBuffer blob Qry = createPQExpBuffer();
- BlobInfo *b info;
- DumpableObject *b data;
+ PQExpBuffer lo Qry = createPQExpBuffer();
+ LoInfo *lo info;
+ DumpableObject *lo data;
PGresult *res;
int ntups;
int i;
pg_log_info("reading large objects");
- /* Fetch BLOB OIDs, and owner/ACL data */
- appendPQExpBufferStr(blob Qry,
+ /* Fetch LO OIDs, and owner/ACL data */
+ appendPQExpBufferStr(lo Qry,
"SELECT oid, lomowner, lomacl, "
"acldefault('L', lomowner) AS acldefault "
"FROM pg_largeobject_metadata");
- res = ExecuteSqlQuery(fout, blob Qry->data, PGRES_TUPLES_OK);
+ res = ExecuteSqlQuery(fout, lo Qry->data, PGRES_TUPLES_OK);
i_oid = PQfnumber(res, "oid");
i_lomowner = PQfnumber(res, "lomowner");
ntups = PQntuples(res);
/*
- * Each large object has its own BLOB archive entry.
+ * Each large object has its own "BLOB" archive entry.
*/
- binfo = (BlobInfo *) pg_malloc(ntups * sizeof(Blob Info));
+ loinfo = (LoInfo *) pg_malloc(ntups * sizeof(Lo Info));
for (i = 0; i < ntups; i++)
{
- binfo[i].dobj.objType = DO_BLOB ;
- b info[i].dobj.catId.tableoid = LargeObjectRelationId;
- b info[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
- AssignDumpId(&b info[i].dobj);
+ loinfo[i].dobj.objType = DO_LARGE_OBJECT ;
+ lo info[i].dobj.catId.tableoid = LargeObjectRelationId;
+ lo info[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
+ AssignDumpId(&lo info[i].dobj);
- b info[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
- b info[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl));
- b info[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
- b info[i].dacl.privtype = 0;
- b info[i].dacl.initprivs = NULL;
- b info[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner));
+ lo info[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
+ lo info[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl));
+ lo info[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
+ lo info[i].dacl.privtype = 0;
+ lo info[i].dacl.initprivs = NULL;
+ lo info[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner));
- /* Blob s have data */
- b info[i].dobj.components |= DUMP_COMPONENT_DATA;
+ /* LO s have data */
+ lo info[i].dobj.components |= DUMP_COMPONENT_DATA;
- /* Mark whether blob has an ACL */
+ /* Mark whether LO has an ACL */
if (!PQgetisnull(res, i, i_lomacl))
- b info[i].dobj.components |= DUMP_COMPONENT_ACL;
+ lo info[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for blobs, we do *not* dump out the blob
+ * In binary-upgrade mode for LOs, we do *not* dump out the LO
* data, as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
*/
if (dopt->binary_upgrade)
- b info[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
+ lo info[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
}
/*
*/
if (ntups > 0)
{
- b data = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
- bdata->objType = DO_BLOB _DATA;
- b data->catId = nilCatalogId;
- AssignDumpId(b data);
- b data->name = pg_strdup("BLOBS");
- b data->components |= DUMP_COMPONENT_DATA;
+ lo data = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
+ lodata->objType = DO_LARGE_OBJECT _DATA;
+ lo data->catId = nilCatalogId;
+ AssignDumpId(lo data);
+ lo data->name = pg_strdup("BLOBS");
+ lo data->components |= DUMP_COMPONENT_DATA;
}
PQclear(res);
- destroyPQExpBuffer(blob Qry);
+ destroyPQExpBuffer(lo Qry);
}
/*
- * dumpBlob
+ * dumpLO
*
* dump the definition (metadata) of the given large object
*/
static void
-dumpBlob(Archive *fout, const BlobInfo *b info)
+dumpLO(Archive *fout, const LoInfo *lo info)
{
PQExpBuffer cquery = createPQExpBuffer();
PQExpBuffer dquery = createPQExpBuffer();
appendPQExpBuffer(cquery,
"SELECT pg_catalog.lo_create('%s');\n",
- b info->dobj.name);
+ lo info->dobj.name);
appendPQExpBuffer(dquery,
"SELECT pg_catalog.lo_unlink('%s');\n",
- b info->dobj.name);
+ lo info->dobj.name);
- if (b info->dobj.dump & DUMP_COMPONENT_DEFINITION)
- ArchiveEntry(fout, binfo->dobj.catId, b info->dobj.dumpId,
- ARCHIVE_OPTS(.tag = b info->dobj.name,
- .owner = b info->rolname,
+ if (lo info->dobj.dump & DUMP_COMPONENT_DEFINITION)
+ ArchiveEntry(fout, loinfo->dobj.catId, lo info->dobj.dumpId,
+ ARCHIVE_OPTS(.tag = lo info->dobj.name,
+ .owner = lo info->rolname,
.description = "BLOB",
.section = SECTION_PRE_DATA,
.createStmt = cquery->data,
.dropStmt = dquery->data));
/* Dump comment if any */
- if (b info->dobj.dump & DUMP_COMPONENT_COMMENT)
- dumpComment(fout, "LARGE OBJECT", b info->dobj.name,
- NULL, b info->rolname,
- binfo->dobj.catId, 0, b info->dobj.dumpId);
+ if (lo info->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpComment(fout, "LARGE OBJECT", lo info->dobj.name,
+ NULL, lo info->rolname,
+ loinfo->dobj.catId, 0, lo info->dobj.dumpId);
/* Dump security label if any */
- if (b info->dobj.dump & DUMP_COMPONENT_SECLABEL)
- dumpSecLabel(fout, "LARGE OBJECT", b info->dobj.name,
- NULL, b info->rolname,
- binfo->dobj.catId, 0, b info->dobj.dumpId);
+ if (lo info->dobj.dump & DUMP_COMPONENT_SECLABEL)
+ dumpSecLabel(fout, "LARGE OBJECT", lo info->dobj.name,
+ NULL, lo info->rolname,
+ loinfo->dobj.catId, 0, lo info->dobj.dumpId);
/* Dump ACL if any */
- if (b info->dobj.dump & DUMP_COMPONENT_ACL)
- dumpACL(fout, b info->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
- b info->dobj.name, NULL,
- NULL, binfo->rolname, &b info->dacl);
+ if (lo info->dobj.dump & DUMP_COMPONENT_ACL)
+ dumpACL(fout, lo info->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+ lo info->dobj.name, NULL,
+ NULL, loinfo->rolname, &lo info->dacl);
destroyPQExpBuffer(cquery);
destroyPQExpBuffer(dquery);
}
/*
- * dumpBlob s:
+ * dumpLO s:
* dump the data contents of all large objects
*/
static int
-dumpBlob s(Archive *fout, const void *arg)
+dumpLO s(Archive *fout, const void *arg)
{
- const char *blob Qry;
- const char *blob FetchQry;
+ const char *lo Qry;
+ const char *lo FetchQry;
PGconn *conn = GetConnection(fout);
PGresult *res;
char buf[LOBBUFSIZE];
pg_log_info("saving large objects");
/*
- * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
+ * Currently, we re-fetch all LO OIDs using a cursor. Consider scanning
* the already-in-memory dumpable objects instead...
*/
- blob Qry =
- "DECLARE blob oid CURSOR FOR "
+ lo Qry =
+ "DECLARE lo oid CURSOR FOR "
"SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
- ExecuteSqlStatement(fout, blob Qry);
+ ExecuteSqlStatement(fout, lo Qry);
/* Command to fetch from cursor */
- blobFetchQry = "FETCH 1000 IN blob oid";
+ loFetchQry = "FETCH 1000 IN lo oid";
do
{
/* Do a fetch */
- res = ExecuteSqlQuery(fout, blob FetchQry, PGRES_TUPLES_OK);
+ res = ExecuteSqlQuery(fout, lo FetchQry, PGRES_TUPLES_OK);
/* Process the tuples, if any */
ntups = PQntuples(res);
for (i = 0; i < ntups; i++)
{
- Oid blob Oid;
+ Oid lo Oid;
int loFd;
- blob Oid = atooid(PQgetvalue(res, i, 0));
- /* Open the BLOB */
- loFd = lo_open(conn, blob Oid, INV_READ);
+ lo Oid = atooid(PQgetvalue(res, i, 0));
+ /* Open the LO */
+ loFd = lo_open(conn, lo Oid, INV_READ);
if (loFd == -1)
pg_fatal("could not open large object %u: %s",
- blob Oid, PQerrorMessage(conn));
+ lo Oid, PQerrorMessage(conn));
- StartBlob(fout, blob Oid);
+ StartLO(fout, lo Oid);
/* Now read it in chunks, sending data to archive */
do
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
pg_fatal("error reading large object %u: %s",
- blob Oid, PQerrorMessage(conn));
+ lo Oid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
lo_close(conn, loFd);
- EndBlob(fout, blob Oid);
+ EndLO(fout, lo Oid);
}
PQclear(res);
if (dopt->no_comments)
return;
- /* Comments are schema not data ... except blob comments are data */
+ /* Comments are schema not data ... except LO comments are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
}
else
{
- /* We do dump blob comments in binary-upgrade mode */
+ /* We do dump LO comments in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return;
}
case DO_DEFAULT_ACL:
dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
break;
- case DO_BLOB :
- dumpBlob(fout, (const Blob Info *) dobj);
+ case DO_LARGE_OBJECT :
+ dumpLO(fout, (const Lo Info *) dobj);
break;
- case DO_BLOB _DATA:
+ case DO_LARGE_OBJECT _DATA:
if (dobj->dump & DUMP_COMPONENT_DATA)
{
TocEntry *te;
ARCHIVE_OPTS(.tag = dobj->name,
.description = "BLOBS",
.section = SECTION_DATA,
- .dumpFn = dumpBlob s));
+ .dumpFn = dumpLO s));
/*
* Set the TocEntry's dataLength in case we are doing a
* parallel dump and want to order dump jobs by table size.
* (We need some size estimate for every TocEntry with a
* DataDumper function.) We don't currently have any cheap
- * way to estimate the size of blob s, but it doesn't matter;
+ * way to estimate the size of LO s, but it doesn't matter;
* let's just set the size to a large value so parallel dumps
- * will launch this job first. If there's lots of blob s, we
+ * will launch this job first. If there's lots of LO s, we
* win, and if there aren't, we don't lose much. (If you want
* to improve on this, really what you should be thinking
- * about is allowing blob dumping to be parallelized, not just
+ * about is allowing LO dumping to be parallelized, not just
* getting a smarter estimate for the single TOC entry.)
*/
te->dataLength = INT_MAX;
if (dopt->aclsSkip)
return InvalidDumpId;
- /* --data-only skips ACLs *except* BLOB ACLs */
+ /* --data-only skips ACLs *except* large object ACLs */
if (dopt->dataOnly && strcmp(type, "LARGE OBJECT") != 0)
return InvalidDumpId;
if (dopt->no_security_labels)
return;
- /* Security labels are schema not data ... except blob labels are data */
+ /* Security labels are schema not data ... except large object labels are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
}
else
{
- /* We do dump blob security labels in binary-upgrade mode */
+ /* We do dump large object security labels in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return;
}
case DO_FDW:
case DO_FOREIGN_SERVER:
case DO_TRANSFORM:
- case DO_BLOB :
+ case DO_LARGE_OBJECT :
/* Pre-data objects: must come before the pre-data boundary */
addObjectDependency(preDataBound, dobj->dumpId);
break;
case DO_TABLE_DATA:
case DO_SEQUENCE_SET:
- case DO_BLOB _DATA:
+ case DO_LARGE_OBJECT _DATA:
/* Data objects: must come between the boundaries */
addObjectDependency(dobj, preDataBound->dumpId);
addObjectDependency(postDataBound, dobj->dumpId);
DO_FOREIGN_SERVER,
DO_DEFAULT_ACL,
DO_TRANSFORM,
- DO_BLOB ,
- DO_BLOB _DATA,
+ DO_LARGE_OBJECT ,
+ DO_LARGE_OBJECT _DATA,
DO_PRE_DATA_BOUNDARY,
DO_POST_DATA_BOUNDARY,
DO_EVENT_TRIGGER,
char defaclobjtype;
} DefaultACLInfo;
-typedef struct _blob Info
+typedef struct _lo Info
{
DumpableObject dobj;
DumpableAcl dacl;
const char *rolname;
-} Blob Info;
+} Lo Info;
/*
* The PolicyInfo struct is used to represent policies on a table and
PRIO_TABLE_ATTACH,
PRIO_DUMMY_TYPE,
PRIO_ATTRDEF,
- PRIO_BLOB ,
+ PRIO_LARGE_OBJECT ,
PRIO_PRE_DATA_BOUNDARY, /* boundary! */
PRIO_TABLE_DATA,
PRIO_SEQUENCE_SET,
- PRIO_BLOB _DATA,
+ PRIO_LARGE_OBJECT _DATA,
PRIO_POST_DATA_BOUNDARY, /* boundary! */
PRIO_CONSTRAINT,
PRIO_INDEX,
PRIO_FOREIGN_SERVER, /* DO_FOREIGN_SERVER */
PRIO_DEFAULT_ACL, /* DO_DEFAULT_ACL */
PRIO_TRANSFORM, /* DO_TRANSFORM */
- PRIO_BLOB, /* DO_BLOB */
- PRIO_BLOB_DATA, /* DO_BLOB _DATA */
+ PRIO_LARGE_OBJECT, /* DO_LARGE_OBJECT */
+ PRIO_LARGE_OBJECT_DATA, /* DO_LARGE_OJECT _DATA */
PRIO_PRE_DATA_BOUNDARY, /* DO_PRE_DATA_BOUNDARY */
PRIO_POST_DATA_BOUNDARY, /* DO_POST_DATA_BOUNDARY */
PRIO_EVENT_TRIGGER, /* DO_EVENT_TRIGGER */
"DEFAULT ACL %s (ID %d OID %u)",
obj->name, obj->dumpId, obj->catId.oid);
return;
- case DO_BLOB :
+ case DO_LARGE_OBJECT :
snprintf(buf, bufsize,
- "BLOB (ID %d OID %u)",
+ "LARGE OBJECT (ID %d OID %u)",
obj->dumpId, obj->catId.oid);
return;
- case DO_BLOB _DATA:
+ case DO_LARGE_OBJECT _DATA:
snprintf(buf, bufsize,
- "BLOB DATA (ID %d)",
+ "LARGE OBJECT DATA (ID %d)",
obj->dumpId);
return;
case DO_POLICY:
'--no-toast-compression', 'postgres',
],
},
- no_blob s => {
+ no_large_object s => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_blob s.sql", '-B',
+ "--file=$tempdir/no_large_object s.sql", '-B',
'postgres',
],
},
'--section=post-data', '--no-sync', 'postgres',
],
},
- test_schema_plus_blob s => {
+ test_schema_plus_large_object s => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/test_schema_plus_blob s.sql",
+ 'pg_dump', "--file=$tempdir/test_schema_plus_large_object s.sql",
'--schema=dump_test', '-b', '-B', '--no-sync', 'postgres',
],
# Tests which target the 'dump_test' schema, specifically.
my %dump_test_schema_runs = (
only_dump_test_schema => 1,
- test_schema_plus_blob s => 1,);
+ test_schema_plus_large_object s => 1,);
# Tests which are considered 'full' dumps by pg_dump, but there
-# are flags used to exclude specific items (ACLs, blob s, etc).
+# are flags used to exclude specific items (ACLs, LO s, etc).
my %full_runs = (
binary_upgrade => 1,
clean => 1,
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_owner => 1,
no_privs => 1,
no_table_access_method => 1,
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blob s => 1,
+ test_schema_plus_large_object s => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
no_owner => 1,
schema_only => 1,
},
},
},
- 'BLOB create (using lo_from_bytea)' => {
+ 'LO create (using lo_from_bytea)' => {
create_order => 50,
create_sql =>
'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blob s => 1,
+ test_schema_plus_large_object s => 1,
},
unlike => {
schema_only => 1,
- no_blobs => 1,
+ no_large_objects => 1,
},
},
- 'BLOB load (using lo_from_bytea)' => {
+ 'LO load (using lo_from_bytea)' => {
regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lowrite(0, \E
data_only => 1,
inserts => 1,
section_data => 1,
- test_schema_plus_blob s => 1,
+ test_schema_plus_large_object s => 1,
},
unlike => {
binary_upgrade => 1,
- no_blobs => 1,
+ no_large_objects => 1,
schema_only => 1,
},
},
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blob s => 1,
+ test_schema_plus_large_object s => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
schema_only => 1,
},
},
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
no_owner => 1,
no_table_access_method => 1,
pg_dumpall_exclude => 1,
schema_only => 1,
section_post_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
no_owner => 1,
no_table_access_method => 1,
pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
},
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blob s => 1,
+ test_schema_plus_large_object s => 1,
binary_upgrade => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
schema_only => 1,
},
# as the regexps are used for each run the test applies to.
# Tests which are considered 'full' dumps by pg_dump, but there
-# are flags used to exclude specific items (ACLs, blob s, etc).
+# are flags used to exclude specific items (ACLs, LO s, etc).
my %full_runs = (
binary_upgrade => 1,
clean => 1,