Line data Source code
1 : /*
2 : * check.c
3 : *
4 : * server checks and output routines
5 : *
6 : * Copyright (c) 2010-2026, PostgreSQL Global Development Group
7 : * src/bin/pg_upgrade/check.c
8 : */
9 :
10 : #include "postgres_fe.h"
11 :
12 : #include "catalog/pg_am_d.h"
13 : #include "catalog/pg_authid_d.h"
14 : #include "catalog/pg_class_d.h"
15 : #include "fe_utils/string_utils.h"
16 : #include "pg_upgrade.h"
17 : #include "common/unicode_version.h"
18 :
19 : static void check_new_cluster_is_empty(void);
20 : static void check_is_install_user(ClusterInfo *cluster);
21 : static void check_for_connection_status(ClusterInfo *cluster);
22 : static void check_for_prepared_transactions(ClusterInfo *cluster);
23 : static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
24 : static void check_for_user_defined_postfix_ops(ClusterInfo *cluster);
25 : static void check_for_incompatible_polymorphics(ClusterInfo *cluster);
26 : static void check_for_tables_with_oids(ClusterInfo *cluster);
27 : static void check_for_not_null_inheritance(ClusterInfo *cluster);
28 : static void check_for_gist_inet_ops(ClusterInfo *cluster);
29 : static void check_for_pg_role_prefix(ClusterInfo *cluster);
30 : static void check_for_new_tablespace_dir(void);
31 : static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster);
32 : static void check_for_unicode_update(ClusterInfo *cluster);
33 : static void check_new_cluster_replication_slots(void);
34 : static void check_new_cluster_subscription_configuration(void);
35 : static void check_old_cluster_for_valid_slots(void);
36 : static void check_old_cluster_subscription_state(void);
37 :
38 : /*
39 : * DataTypesUsageChecks - definitions of data type checks for the old cluster
40 : * in order to determine if an upgrade can be performed. See the comment on
41 : * data_types_usage_checks below for a more detailed description.
42 : */
43 : typedef struct
44 : {
45 : /* Status line to print to the user */
46 : const char *status;
47 : /* Filename to store report to */
48 : const char *report_filename;
49 : /* Query to extract the oid of the datatype */
50 : const char *base_query;
51 : /* Text to store to report in case of error */
52 : const char *report_text;
53 : /* The latest version where the check applies */
54 : int threshold_version;
55 : /* A function pointer for determining if the check applies */
56 : DataTypesUsageVersionCheck version_hook;
57 : } DataTypesUsageChecks;
58 :
59 : /*
60 : * Special values for threshold_version for indicating that a check applies to
61 : * all versions, or that a custom function needs to be invoked to determine
62 : * if the check applies.
63 : */
64 : #define MANUAL_CHECK 1
65 : #define ALL_VERSIONS -1
66 :
67 : /*--
68 : * Data type usage checks. Each check for problematic data type usage is
69 : * defined in this array with metadata, SQL query for finding the data type
70 : * and functionality for deciding if the check is applicable to the version
71 : * of the old cluster. The struct members are described in detail below:
72 : *
73 : * status A oneline string which can be printed to the user to
74 : * inform about progress. Should not end with newline.
75 : * report_filename The filename in which the list of problems detected by
76 : * the check will be printed.
77 : * base_query A query which extracts the Oid of the datatype checked
78 : * for.
79 : * report_text The text which will be printed to the user to explain
80 : * what the check did, and why it failed. The text should
81 : * end with a newline, and does not need to refer to the
82 : * report_filename as that is automatically appended to
83 : * the report with the path to the log folder.
84 : * threshold_version The major version of PostgreSQL for which to run the
85 : * check. Iff the old cluster is less than, or equal to,
86 : * the threshold version then the check will be executed.
87 : * If the old version is greater than the threshold then
88 : * the check is skipped. If the threshold_version is set
89 : * to ALL_VERSIONS then it will be run unconditionally,
90 : * if set to MANUAL_CHECK then the version_hook function
91 : * will be executed in order to determine whether or not
92 : * to run.
93 : * version_hook A function pointer to a version check function of type
94 : * DataTypesUsageVersionCheck which is used to determine
95 : * if the check is applicable to the old cluster. If the
96 : * version_hook returns true then the check will be run,
97 : * else it will be skipped. The function will only be
98 : * executed iff threshold_version is set to MANUAL_CHECK.
99 : */
100 : static DataTypesUsageChecks data_types_usage_checks[] =
101 : {
102 : /*
103 : * Look for composite types that were made during initdb *or* belong to
104 : * information_schema; that's important in case information_schema was
105 : * dropped and reloaded.
106 : *
107 : * The cutoff OID here should match the source cluster's value of
108 : * FirstNormalObjectId. We hardcode it rather than using that C #define
109 : * because, if that #define is ever changed, our own version's value is
110 : * NOT what to use. Eventually we may need a test on the source cluster's
111 : * version to select the correct value.
112 : */
113 : {
114 : .status = gettext_noop("Checking for system-defined composite types in user tables"),
115 : .report_filename = "tables_using_composite.txt",
116 : .base_query =
117 : "SELECT t.oid FROM pg_catalog.pg_type t "
118 : "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
119 : " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
120 : .report_text =
121 : gettext_noop("Your installation contains system-defined composite types in user tables.\n"
122 : "These type OIDs are not stable across PostgreSQL versions,\n"
123 : "so this cluster cannot currently be upgraded. You can drop the\n"
124 : "problem columns and restart the upgrade.\n"),
125 : .threshold_version = ALL_VERSIONS
126 : },
127 :
128 : /*
129 : * 9.3 -> 9.4 Fully implement the 'line' data type in 9.4, which
130 : * previously returned "not enabled" by default and was only functionally
131 : * enabled with a compile-time switch; as of 9.4 "line" has a different
132 : * on-disk representation format.
133 : */
134 : {
135 : .status = gettext_noop("Checking for incompatible \"line\" data type"),
136 : .report_filename = "tables_using_line.txt",
137 : .base_query =
138 : "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
139 : .report_text =
140 : gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
141 : "This data type changed its internal and input/output format\n"
142 : "between your old and new versions so this\n"
143 : "cluster cannot currently be upgraded. You can\n"
144 : "drop the problem columns and restart the upgrade.\n"),
145 : .threshold_version = 903
146 : },
147 :
148 : /*
149 : * pg_upgrade only preserves these system values: pg_class.oid pg_type.oid
150 : * pg_enum.oid
151 : *
152 : * Many of the reg* data types reference system catalog info that is not
153 : * preserved, and hence these data types cannot be used in user tables
154 : * upgraded by pg_upgrade.
155 : */
156 : {
157 : .status = gettext_noop("Checking for reg* data types in user tables"),
158 : .report_filename = "tables_using_reg.txt",
159 :
160 : /*
161 : * Note: older servers will not have all of these reg* types, so we
162 : * have to write the query like this rather than depending on casts to
163 : * regtype.
164 : */
165 : .base_query =
166 : "SELECT oid FROM pg_catalog.pg_type t "
167 : "WHERE t.typnamespace = "
168 : " (SELECT oid FROM pg_catalog.pg_namespace "
169 : " WHERE nspname = 'pg_catalog') "
170 : " AND t.typname IN ( "
171 : /* pg_class.oid is preserved, so 'regclass' is OK */
172 : " 'regcollation', "
173 : " 'regconfig', "
174 : /* pg_database.oid is preserved, so 'regdatabase' is OK */
175 : " 'regdictionary', "
176 : " 'regnamespace', "
177 : " 'regoper', "
178 : " 'regoperator', "
179 : " 'regproc', "
180 : " 'regprocedure' "
181 : /* pg_authid.oid is preserved, so 'regrole' is OK */
182 : /* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
183 : " )",
184 : .report_text =
185 : gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
186 : "These data types reference system OIDs that are not preserved by\n"
187 : "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
188 : "drop the problem columns and restart the upgrade.\n"),
189 : .threshold_version = ALL_VERSIONS
190 : },
191 :
192 : /*
193 : * PG 16 increased the size of the 'aclitem' type, which breaks the
194 : * on-disk format for existing data.
195 : */
196 : {
197 : .status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
198 : .report_filename = "tables_using_aclitem.txt",
199 : .base_query =
200 : "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
201 : .report_text =
202 : gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
203 : "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
204 : "so this cluster cannot currently be upgraded. You can drop the\n"
205 : "problem columns and restart the upgrade.\n"),
206 : .threshold_version = 1500
207 : },
208 :
209 : /*
210 : * It's no longer allowed to create tables or views with "unknown"-type
211 : * columns. We do not complain about views with such columns, because
212 : * they should get silently converted to "text" columns during the DDL
213 : * dump and reload; it seems unlikely to be worth making users do that by
214 : * hand. However, if there's a table with such a column, the DDL reload
215 : * will fail, so we should pre-detect that rather than failing
216 : * mid-upgrade. Worse, if there's a matview with such a column, the DDL
217 : * reload will silently change it to "text" which won't match the on-disk
218 : * storage (which is like "cstring"). So we *must* reject that.
219 : */
220 : {
221 : .status = gettext_noop("Checking for invalid \"unknown\" user columns"),
222 : .report_filename = "tables_using_unknown.txt",
223 : .base_query =
224 : "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
225 : .report_text =
226 : gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
227 : "This data type is no longer allowed in tables, so this cluster\n"
228 : "cannot currently be upgraded. You can drop the problem columns\n"
229 : "and restart the upgrade.\n"),
230 : .threshold_version = 906
231 : },
232 :
233 : /*
234 : * PG 12 changed the 'sql_identifier' type storage to be based on name,
235 : * not varchar, which breaks on-disk format for existing data. So we need
236 : * to prevent upgrade when used in user objects (tables, indexes, ...). In
237 : * 12, the sql_identifier data type was switched from name to varchar,
238 : * which does affect the storage (name is by-ref, but not varlena). This
239 : * means user tables using sql_identifier for columns are broken because
240 : * the on-disk format is different.
241 : */
242 : {
243 : .status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
244 : .report_filename = "tables_using_sql_identifier.txt",
245 : .base_query =
246 : "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
247 : .report_text =
248 : gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
249 : "The on-disk format for this data type has changed, so this\n"
250 : "cluster cannot currently be upgraded. You can drop the problem\n"
251 : "columns and restart the upgrade.\n"),
252 : .threshold_version = 1100
253 : },
254 :
255 : /*
256 : * JSONB changed its storage format during 9.4 beta, so check for it.
257 : */
258 : {
259 : .status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
260 : .report_filename = "tables_using_jsonb.txt",
261 : .base_query =
262 : "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
263 : .report_text =
264 : gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
265 : "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
266 : "cluster cannot currently be upgraded. You can drop the problem \n"
267 : "columns and restart the upgrade.\n"),
268 : .threshold_version = MANUAL_CHECK,
269 : .version_hook = jsonb_9_4_check_applicable
270 : },
271 :
272 : /*
273 : * PG 12 removed types abstime, reltime, tinterval.
274 : */
275 : {
276 : .status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
277 : .report_filename = "tables_using_abstime.txt",
278 : .base_query =
279 : "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
280 : .report_text =
281 : gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
282 : "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
283 : "so this cluster cannot currently be upgraded. You can drop the\n"
284 : "problem columns, or change them to another data type, and restart\n"
285 : "the upgrade.\n"),
286 : .threshold_version = 1100
287 : },
288 : {
289 : .status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
290 : .report_filename = "tables_using_reltime.txt",
291 : .base_query =
292 : "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
293 : .report_text =
294 : gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
295 : "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
296 : "so this cluster cannot currently be upgraded. You can drop the\n"
297 : "problem columns, or change them to another data type, and restart\n"
298 : "the upgrade.\n"),
299 : .threshold_version = 1100
300 : },
301 : {
302 : .status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
303 : .report_filename = "tables_using_tinterval.txt",
304 : .base_query =
305 : "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
306 : .report_text =
307 : gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
308 : "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
309 : "so this cluster cannot currently be upgraded. You can drop the\n"
310 : "problem columns, or change them to another data type, and restart\n"
311 : "the upgrade.\n"),
312 : .threshold_version = 1100
313 : },
314 :
315 : /* End of checks marker, must remain last */
316 : {
317 : NULL, NULL, NULL, NULL, 0, NULL
318 : }
319 : };
320 :
321 : /*
322 : * Private state for check_for_data_types_usage()'s UpgradeTask.
323 : */
324 : struct data_type_check_state
325 : {
326 : DataTypesUsageChecks *check; /* the check for this step */
327 : bool result; /* true if check failed for any database */
328 : PQExpBuffer *report; /* buffer for report on failed checks */
329 : };
330 :
331 : /*
332 : * Returns a palloc'd query string for the data type check, for use by
333 : * check_for_data_types_usage()'s UpgradeTask.
334 : */
335 : static char *
336 0 : data_type_check_query(int checknum)
337 : {
338 0 : DataTypesUsageChecks *check = &data_types_usage_checks[checknum];
339 :
340 0 : return psprintf("WITH RECURSIVE oids AS ( "
341 : /* start with the type(s) returned by base_query */
342 : " %s "
343 : " UNION ALL "
344 : " SELECT * FROM ( "
345 : /* inner WITH because we can only reference the CTE once */
346 : " WITH x AS (SELECT oid FROM oids) "
347 : /* domains on any type selected so far */
348 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
349 : " UNION ALL "
350 : /* arrays over any type selected so far */
351 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
352 : " UNION ALL "
353 : /* composite types containing any type selected so far */
354 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
355 : " WHERE t.typtype = 'c' AND "
356 : " t.oid = c.reltype AND "
357 : " c.oid = a.attrelid AND "
358 : " NOT a.attisdropped AND "
359 : " a.atttypid = x.oid "
360 : " UNION ALL "
361 : /* ranges containing any type selected so far */
362 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
363 : " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
364 : " ) foo "
365 : ") "
366 : /* now look for stored columns of any such type */
367 : "SELECT n.nspname, c.relname, a.attname "
368 : "FROM pg_catalog.pg_class c, "
369 : " pg_catalog.pg_namespace n, "
370 : " pg_catalog.pg_attribute a "
371 : "WHERE c.oid = a.attrelid AND "
372 : " NOT a.attisdropped AND "
373 : " a.atttypid IN (SELECT oid FROM oids) AND "
374 : " c.relkind IN ("
375 : CppAsString2(RELKIND_RELATION) ", "
376 : CppAsString2(RELKIND_MATVIEW) ", "
377 : CppAsString2(RELKIND_INDEX) ") AND "
378 : " c.relnamespace = n.oid AND "
379 : /* exclude possible orphaned temp tables */
380 : " n.nspname !~ '^pg_temp_' AND "
381 : " n.nspname !~ '^pg_toast_temp_' AND "
382 : /* exclude system catalogs, too */
383 : " n.nspname NOT IN ('pg_catalog', 'information_schema')",
384 0 : check->base_query);
385 0 : }
386 :
387 : /*
388 : * Callback function for processing results of queries for
389 : * check_for_data_types_usage()'s UpgradeTask. If the query returned any rows
390 : * (i.e., the check failed), write the details to the report file.
391 : */
392 : static void
393 0 : process_data_type_check(DbInfo *dbinfo, PGresult *res, void *arg)
394 : {
395 0 : struct data_type_check_state *state = (struct data_type_check_state *) arg;
396 0 : int ntups = PQntuples(res);
397 0 : char output_path[MAXPGPATH];
398 0 : int i_nspname = PQfnumber(res, "nspname");
399 0 : int i_relname = PQfnumber(res, "relname");
400 0 : int i_attname = PQfnumber(res, "attname");
401 0 : FILE *script = NULL;
402 :
403 0 : if (ntups == 0)
404 0 : return;
405 :
406 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
407 0 : log_opts.basedir,
408 0 : state->check->report_filename);
409 :
410 : /*
411 : * Make sure we have a buffer to save reports to now that we found a first
412 : * failing check.
413 : */
414 0 : if (*state->report == NULL)
415 0 : *state->report = createPQExpBuffer();
416 :
417 : /*
418 : * If this is the first time we see an error for the check in question
419 : * then print a status message of the failure.
420 : */
421 0 : if (!state->result)
422 : {
423 0 : pg_log(PG_REPORT, "failed check: %s", _(state->check->status));
424 0 : appendPQExpBuffer(*state->report, "\n%s\n%s\n %s\n",
425 0 : _(state->check->report_text),
426 0 : _("A list of the problem columns is in the file:"),
427 0 : output_path);
428 0 : }
429 0 : state->result = true;
430 :
431 0 : if ((script = fopen_priv(output_path, "a")) == NULL)
432 0 : pg_fatal("could not open file \"%s\": %m", output_path);
433 :
434 0 : fprintf(script, "In database: %s\n", dbinfo->db_name);
435 :
436 0 : for (int rowno = 0; rowno < ntups; rowno++)
437 0 : fprintf(script, " %s.%s.%s\n",
438 0 : PQgetvalue(res, rowno, i_nspname),
439 0 : PQgetvalue(res, rowno, i_relname),
440 0 : PQgetvalue(res, rowno, i_attname));
441 :
442 0 : fclose(script);
443 0 : }
444 :
445 : /*
446 : * check_for_data_types_usage()
447 : * Detect whether there are any stored columns depending on given type(s)
448 : *
449 : * If so, write a report to the given file name and signal a failure to the
450 : * user.
451 : *
452 : * The checks to run are defined in a DataTypesUsageChecks structure where
453 : * each check has a metadata for explaining errors to the user, a base_query,
454 : * a report filename and a function pointer hook for validating if the check
455 : * should be executed given the cluster at hand.
456 : *
457 : * base_query should be a SELECT yielding a single column named "oid",
458 : * containing the pg_type OIDs of one or more types that are known to have
459 : * inconsistent on-disk representations across server versions.
460 : *
461 : * We check for the type(s) in tables, matviews, and indexes, but not views;
462 : * there's no storage involved in a view.
463 : */
464 : static void
465 0 : check_for_data_types_usage(ClusterInfo *cluster)
466 : {
467 0 : PQExpBuffer report = NULL;
468 0 : DataTypesUsageChecks *tmp = data_types_usage_checks;
469 0 : int n_data_types_usage_checks = 0;
470 0 : UpgradeTask *task = upgrade_task_create();
471 0 : char **queries = NULL;
472 0 : struct data_type_check_state *states;
473 :
474 0 : prep_status("Checking data type usage");
475 :
476 : /* Gather number of checks to perform */
477 0 : while (tmp->status != NULL)
478 : {
479 0 : n_data_types_usage_checks++;
480 0 : tmp++;
481 : }
482 :
483 : /* Allocate memory for queries and for task states */
484 0 : queries = pg_malloc0(sizeof(char *) * n_data_types_usage_checks);
485 0 : states = pg_malloc0(sizeof(struct data_type_check_state) * n_data_types_usage_checks);
486 :
487 0 : for (int i = 0; i < n_data_types_usage_checks; i++)
488 : {
489 0 : DataTypesUsageChecks *check = &data_types_usage_checks[i];
490 :
491 0 : if (check->threshold_version == MANUAL_CHECK)
492 : {
493 0 : Assert(check->version_hook);
494 :
495 : /*
496 : * Make sure that the check applies to the current cluster version
497 : * and skip it if not.
498 : */
499 0 : if (!check->version_hook(cluster))
500 0 : continue;
501 0 : }
502 0 : else if (check->threshold_version != ALL_VERSIONS)
503 : {
504 0 : if (GET_MAJOR_VERSION(cluster->major_version) > check->threshold_version)
505 0 : continue;
506 0 : }
507 : else
508 0 : Assert(check->threshold_version == ALL_VERSIONS);
509 :
510 0 : queries[i] = data_type_check_query(i);
511 :
512 0 : states[i].check = check;
513 0 : states[i].report = &report;
514 :
515 0 : upgrade_task_add_step(task, queries[i], process_data_type_check,
516 0 : true, &states[i]);
517 0 : }
518 :
519 : /*
520 : * Connect to each database in the cluster and run all defined checks
521 : * against that database before trying the next one.
522 : */
523 0 : upgrade_task_run(task, cluster);
524 0 : upgrade_task_free(task);
525 :
526 0 : if (report)
527 : {
528 0 : pg_fatal("Data type checks failed: %s", report->data);
529 : destroyPQExpBuffer(report);
530 : }
531 :
532 0 : for (int i = 0; i < n_data_types_usage_checks; i++)
533 : {
534 0 : if (queries[i])
535 0 : pg_free(queries[i]);
536 0 : }
537 0 : pg_free(queries);
538 0 : pg_free(states);
539 :
540 0 : check_ok();
541 0 : }
542 :
543 : /*
544 : * fix_path_separator
545 : * For non-Windows, just return the argument.
546 : * For Windows convert any forward slash to a backslash
547 : * such as is suitable for arguments to builtin commands
548 : * like RMDIR and DEL.
549 : */
550 : static char *
551 0 : fix_path_separator(char *path)
552 : {
553 : #ifdef WIN32
554 :
555 : char *result;
556 : char *c;
557 :
558 : result = pg_strdup(path);
559 :
560 : for (c = result; *c != '\0'; c++)
561 : if (*c == '/')
562 : *c = '\\';
563 :
564 : return result;
565 : #else
566 :
567 0 : return path;
568 : #endif
569 : }
570 :
571 : void
572 0 : output_check_banner(void)
573 : {
574 0 : if (user_opts.live_check)
575 : {
576 0 : pg_log(PG_REPORT,
577 : "Performing Consistency Checks on Old Live Server\n"
578 : "------------------------------------------------");
579 0 : }
580 : else
581 : {
582 0 : pg_log(PG_REPORT,
583 : "Performing Consistency Checks\n"
584 : "-----------------------------");
585 : }
586 0 : }
587 :
588 :
589 : void
590 0 : check_and_dump_old_cluster(void)
591 : {
592 : /* -- OLD -- */
593 :
594 0 : if (!user_opts.live_check)
595 0 : start_postmaster(&old_cluster, true);
596 :
597 : /*
598 : * First check that all databases allow connections since we'll otherwise
599 : * fail in later stages.
600 : */
601 0 : check_for_connection_status(&old_cluster);
602 :
603 : /*
604 : * Extract a list of databases, tables, and logical replication slots from
605 : * the old cluster.
606 : */
607 0 : get_db_rel_and_slot_infos(&old_cluster);
608 :
609 0 : init_tablespaces();
610 :
611 0 : get_loadable_libraries();
612 :
613 :
614 : /*
615 : * Check for various failure cases
616 : */
617 0 : check_is_install_user(&old_cluster);
618 0 : check_for_prepared_transactions(&old_cluster);
619 0 : check_for_isn_and_int8_passing_mismatch(&old_cluster);
620 :
621 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1700)
622 : {
623 : /*
624 : * Logical replication slots can be migrated since PG17. See comments
625 : * atop get_old_cluster_logical_slot_infos().
626 : */
627 0 : check_old_cluster_for_valid_slots();
628 :
629 : /*
630 : * Subscriptions and their dependencies can be migrated since PG17.
631 : * Before that the logical slots are not upgraded, so we will not be
632 : * able to upgrade the logical replication clusters completely.
633 : */
634 0 : get_subscription_info(&old_cluster);
635 0 : check_old_cluster_subscription_state();
636 0 : }
637 :
638 0 : check_for_data_types_usage(&old_cluster);
639 :
640 : /*
641 : * Unicode updates can affect some objects that use expressions with
642 : * functions dependent on Unicode.
643 : */
644 0 : check_for_unicode_update(&old_cluster);
645 :
646 : /*
647 : * PG 14 changed the function signature of encoding conversion functions.
648 : * Conversions from older versions cannot be upgraded automatically
649 : * because the user-defined functions used by the encoding conversions
650 : * need to be changed to match the new signature.
651 : */
652 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
653 0 : check_for_user_defined_encoding_conversions(&old_cluster);
654 :
655 : /*
656 : * Pre-PG 14 allowed user defined postfix operators, which are not
657 : * supported anymore. Verify there are none, iff applicable.
658 : */
659 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
660 0 : check_for_user_defined_postfix_ops(&old_cluster);
661 :
662 : /*
663 : * PG 14 changed polymorphic functions from anyarray to
664 : * anycompatiblearray.
665 : */
666 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
667 0 : check_for_incompatible_polymorphics(&old_cluster);
668 :
669 : /*
670 : * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
671 : * supported anymore. Verify there are none, iff applicable.
672 : */
673 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1100)
674 0 : check_for_tables_with_oids(&old_cluster);
675 :
676 : /*
677 : * Pre-PG 18 allowed child tables to omit not-null constraints that their
678 : * parents columns have, but schema restore fails for them. Verify there
679 : * are none, iff applicable.
680 : */
681 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1800)
682 0 : check_for_not_null_inheritance(&old_cluster);
683 :
684 : /*
685 : * The btree_gist extension contains gist_inet_ops and gist_cidr_ops
686 : * opclasses that do not reliably give correct answers. We want to
687 : * deprecate and eventually remove those, and as a first step v19 marks
688 : * them not-opcdefault and instead marks the replacement in-core opclass
689 : * "inet_ops" as opcdefault. That creates a problem for pg_upgrade: in
690 : * versions where those opclasses were marked opcdefault, pg_dump will
691 : * dump indexes using them with no explicit opclass specification, so that
692 : * restore would create them using the inet_ops opclass. That would be
693 : * incompatible with what's actually in the on-disk files. So refuse to
694 : * upgrade if there are any such indexes.
695 : */
696 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1800)
697 0 : check_for_gist_inet_ops(&old_cluster);
698 :
699 : /*
700 : * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
701 : * hash indexes
702 : */
703 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
704 : {
705 0 : if (user_opts.check)
706 0 : old_9_6_invalidate_hash_indexes(&old_cluster, true);
707 0 : }
708 :
709 : /* 9.5 and below should not have roles starting with pg_ */
710 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 905)
711 0 : check_for_pg_role_prefix(&old_cluster);
712 :
713 : /*
714 : * While not a check option, we do this now because this is the only time
715 : * the old server is running.
716 : */
717 0 : if (!user_opts.check)
718 0 : generate_old_dump();
719 :
720 0 : if (!user_opts.live_check)
721 0 : stop_postmaster(false);
722 0 : }
723 :
724 :
725 : void
726 0 : check_new_cluster(void)
727 : {
728 0 : get_db_rel_and_slot_infos(&new_cluster);
729 :
730 0 : check_new_cluster_is_empty();
731 :
732 0 : check_loadable_libraries();
733 :
734 0 : switch (user_opts.transfer_mode)
735 : {
736 : case TRANSFER_MODE_CLONE:
737 0 : check_file_clone();
738 0 : break;
739 : case TRANSFER_MODE_COPY:
740 : break;
741 : case TRANSFER_MODE_COPY_FILE_RANGE:
742 0 : check_copy_file_range();
743 0 : break;
744 : case TRANSFER_MODE_LINK:
745 0 : check_hard_link(TRANSFER_MODE_LINK);
746 0 : break;
747 : case TRANSFER_MODE_SWAP:
748 :
749 : /*
750 : * We do the hard link check for --swap, too, since it's an easy
751 : * way to verify the clusters are in the same file system. This
752 : * allows us to take some shortcuts in the file synchronization
753 : * step. With some more effort, we could probably support the
754 : * separate-file-system use case, but this mode is unlikely to
755 : * offer much benefit if we have to copy the files across file
756 : * system boundaries.
757 : */
758 0 : check_hard_link(TRANSFER_MODE_SWAP);
759 :
760 : /*
761 : * There are a few known issues with using --swap to upgrade from
762 : * versions older than 10. For example, the sequence tuple format
763 : * changed in v10, and the visibility map format changed in 9.6.
764 : * While such problems are not insurmountable (and we may have to
765 : * deal with similar problems in the future, anyway), it doesn't
766 : * seem worth the effort to support swap mode for upgrades from
767 : * long-unsupported versions.
768 : */
769 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 1000)
770 0 : pg_fatal("Swap mode can only upgrade clusters from PostgreSQL version %s and later.",
771 : "10");
772 :
773 0 : break;
774 : }
775 :
776 0 : check_is_install_user(&new_cluster);
777 :
778 0 : check_for_prepared_transactions(&new_cluster);
779 :
780 0 : check_for_new_tablespace_dir();
781 :
782 0 : check_new_cluster_replication_slots();
783 :
784 0 : check_new_cluster_subscription_configuration();
785 0 : }
786 :
787 :
788 : void
789 0 : report_clusters_compatible(void)
790 : {
791 0 : if (user_opts.check)
792 : {
793 0 : pg_log(PG_REPORT, "\n*Clusters are compatible*");
794 : /* stops new cluster */
795 0 : stop_postmaster(false);
796 :
797 0 : cleanup_output_dirs();
798 0 : exit(0);
799 : }
800 :
801 0 : pg_log(PG_REPORT, "\n"
802 : "If pg_upgrade fails after this point, you must re-initdb the\n"
803 : "new cluster before continuing.");
804 0 : }
805 :
806 :
807 : void
808 0 : issue_warnings_and_set_wal_level(void)
809 : {
810 : /*
811 : * We unconditionally start/stop the new server because pg_resetwal -o set
812 : * wal_level to 'minimum'. If the user is upgrading standby servers using
813 : * the rsync instructions, they will need pg_upgrade to write its final
814 : * WAL record showing wal_level as 'replica'.
815 : */
816 0 : start_postmaster(&new_cluster, true);
817 :
818 : /* Reindex hash indexes for old < 10.0 */
819 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
820 0 : old_9_6_invalidate_hash_indexes(&new_cluster, false);
821 :
822 0 : report_extension_updates(&new_cluster);
823 :
824 0 : stop_postmaster(false);
825 0 : }
826 :
827 :
828 : void
829 0 : output_completion_banner(char *deletion_script_file_name)
830 : {
831 0 : PQExpBufferData user_specification;
832 :
833 0 : initPQExpBuffer(&user_specification);
834 0 : if (os_info.user_specified)
835 : {
836 0 : appendPQExpBufferStr(&user_specification, "-U ");
837 0 : appendShellString(&user_specification, os_info.user);
838 0 : appendPQExpBufferChar(&user_specification, ' ');
839 0 : }
840 :
841 0 : pg_log(PG_REPORT,
842 : "Some statistics are not transferred by pg_upgrade.\n"
843 : "Once you start the new server, consider running these two commands:\n"
844 : " %s/vacuumdb %s--all --analyze-in-stages --missing-stats-only\n"
845 : " %s/vacuumdb %s--all --analyze-only",
846 0 : new_cluster.bindir, user_specification.data,
847 0 : new_cluster.bindir, user_specification.data);
848 :
849 0 : if (deletion_script_file_name)
850 0 : pg_log(PG_REPORT,
851 : "Running this script will delete the old cluster's data files:\n"
852 : " %s",
853 0 : deletion_script_file_name);
854 : else
855 0 : pg_log(PG_REPORT,
856 : "Could not create a script to delete the old cluster's data files\n"
857 : "because user-defined tablespaces or the new cluster's data directory\n"
858 : "exist in the old cluster directory. The old cluster's contents must\n"
859 : "be deleted manually.");
860 :
861 0 : termPQExpBuffer(&user_specification);
862 0 : }
863 :
864 :
865 : void
866 0 : check_cluster_versions(void)
867 : {
868 0 : prep_status("Checking cluster versions");
869 :
870 : /* cluster versions should already have been obtained */
871 0 : Assert(old_cluster.major_version != 0);
872 0 : Assert(new_cluster.major_version != 0);
873 :
874 : /*
875 : * We allow upgrades from/to the same major version for alpha/beta
876 : * upgrades
877 : */
878 :
879 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 902)
880 0 : pg_fatal("This utility can only upgrade from PostgreSQL version %s and later.",
881 : "9.2");
882 :
883 : /* Only current PG version is supported as a target */
884 0 : if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
885 0 : pg_fatal("This utility can only upgrade to PostgreSQL version %s.",
886 : PG_MAJORVERSION);
887 :
888 : /*
889 : * We can't allow downgrading because we use the target pg_dump, and
890 : * pg_dump cannot operate on newer database versions, only current and
891 : * older versions.
892 : */
893 0 : if (old_cluster.major_version > new_cluster.major_version)
894 0 : pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.");
895 :
896 : /* Ensure binaries match the designated data directories */
897 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) !=
898 0 : GET_MAJOR_VERSION(old_cluster.bin_version))
899 0 : pg_fatal("Old cluster data and binary directories are from different major versions.");
900 0 : if (GET_MAJOR_VERSION(new_cluster.major_version) !=
901 0 : GET_MAJOR_VERSION(new_cluster.bin_version))
902 0 : pg_fatal("New cluster data and binary directories are from different major versions.");
903 :
904 : /*
905 : * Since from version 18, newly created database clusters always have
906 : * 'signed' default char-signedness, it makes less sense to use
907 : * --set-char-signedness option for upgrading from version 18 or later.
908 : * Users who want to change the default char signedness of the new
909 : * cluster, they can use pg_resetwal manually before the upgrade.
910 : */
911 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
912 0 : user_opts.char_signedness != -1)
913 0 : pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.",
914 : "--set-char-signedness", "18");
915 :
916 0 : check_ok();
917 0 : }
918 :
919 :
920 : void
921 0 : check_cluster_compatibility(void)
922 : {
923 : /* get/check pg_control data of servers */
924 0 : get_control_data(&old_cluster);
925 0 : get_control_data(&new_cluster);
926 0 : check_control_data(&old_cluster.controldata, &new_cluster.controldata);
927 :
928 0 : if (user_opts.live_check && old_cluster.port == new_cluster.port)
929 0 : pg_fatal("When checking a live server, "
930 : "the old and new port numbers must be different.");
931 0 : }
932 :
933 :
934 : static void
935 0 : check_new_cluster_is_empty(void)
936 : {
937 0 : int dbnum;
938 :
939 0 : for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
940 : {
941 0 : int relnum;
942 0 : RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr;
943 :
944 0 : for (relnum = 0; relnum < rel_arr->nrels;
945 0 : relnum++)
946 : {
947 : /* pg_largeobject and its index should be skipped */
948 0 : if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
949 0 : pg_fatal("New cluster database \"%s\" is not empty: found relation \"%s.%s\"",
950 0 : new_cluster.dbarr.dbs[dbnum].db_name,
951 0 : rel_arr->rels[relnum].nspname,
952 0 : rel_arr->rels[relnum].relname);
953 0 : }
954 0 : }
955 0 : }
956 :
957 : /*
958 : * A previous run of pg_upgrade might have failed and the new cluster
959 : * directory recreated, but they might have forgotten to remove
960 : * the new cluster's tablespace directories. Therefore, check that
961 : * new cluster tablespace directories do not already exist. If
962 : * they do, it would cause an error while restoring global objects.
963 : * This allows the failure to be detected at check time, rather than
964 : * during schema restore.
965 : */
966 : static void
967 0 : check_for_new_tablespace_dir(void)
968 : {
969 0 : int tblnum;
970 0 : char new_tablespace_dir[MAXPGPATH];
971 :
972 0 : prep_status("Checking for new cluster tablespace directories");
973 :
974 0 : for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
975 : {
976 0 : struct stat statbuf;
977 :
978 0 : snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
979 0 : new_cluster.tablespaces[tblnum],
980 0 : new_cluster.tablespace_suffix);
981 :
982 0 : if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
983 0 : pg_fatal("new cluster tablespace directory already exists: \"%s\"",
984 0 : new_tablespace_dir);
985 0 : }
986 :
987 0 : check_ok();
988 0 : }
989 :
990 : /*
991 : * create_script_for_old_cluster_deletion()
992 : *
993 : * This is particularly useful for tablespace deletion.
994 : */
995 : void
996 0 : create_script_for_old_cluster_deletion(char **deletion_script_file_name)
997 : {
998 0 : FILE *script = NULL;
999 0 : int tblnum;
1000 0 : char old_cluster_pgdata[MAXPGPATH],
1001 : new_cluster_pgdata[MAXPGPATH];
1002 0 : char *old_tblspc_suffix;
1003 :
1004 0 : *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s",
1005 : SCRIPT_PREFIX, SCRIPT_EXT);
1006 :
1007 0 : strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
1008 0 : canonicalize_path(old_cluster_pgdata);
1009 :
1010 0 : strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
1011 0 : canonicalize_path(new_cluster_pgdata);
1012 :
1013 : /* Some people put the new data directory inside the old one. */
1014 0 : if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata))
1015 : {
1016 0 : pg_log(PG_WARNING,
1017 0 : "\nWARNING: new data directory should not be inside the old data directory, i.e. %s", old_cluster_pgdata);
1018 :
1019 : /* Unlink file in case it is left over from a previous run. */
1020 0 : unlink(*deletion_script_file_name);
1021 0 : pg_free(*deletion_script_file_name);
1022 0 : *deletion_script_file_name = NULL;
1023 0 : return;
1024 : }
1025 :
1026 : /*
1027 : * Some users (oddly) create tablespaces inside the cluster data
1028 : * directory. We can't create a proper old cluster delete script in that
1029 : * case.
1030 : */
1031 0 : for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
1032 : {
1033 0 : char new_tablespace_dir[MAXPGPATH];
1034 :
1035 0 : strlcpy(new_tablespace_dir, new_cluster.tablespaces[tblnum], MAXPGPATH);
1036 0 : canonicalize_path(new_tablespace_dir);
1037 0 : if (path_is_prefix_of_path(old_cluster_pgdata, new_tablespace_dir))
1038 : {
1039 : /* reproduce warning from CREATE TABLESPACE that is in the log */
1040 0 : pg_log(PG_WARNING,
1041 0 : "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", new_tablespace_dir);
1042 :
1043 : /* Unlink file in case it is left over from a previous run. */
1044 0 : unlink(*deletion_script_file_name);
1045 0 : pg_free(*deletion_script_file_name);
1046 0 : *deletion_script_file_name = NULL;
1047 0 : return;
1048 : }
1049 0 : }
1050 :
1051 0 : prep_status("Creating script to delete old cluster");
1052 :
1053 0 : if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
1054 0 : pg_fatal("could not open file \"%s\": %m",
1055 0 : *deletion_script_file_name);
1056 :
1057 : #ifndef WIN32
1058 : /* add shebang header */
1059 0 : fprintf(script, "#!/bin/sh\n\n");
1060 : #endif
1061 :
1062 : /* delete old cluster's default tablespace */
1063 0 : fprintf(script, RMDIR_CMD " %c%s%c\n", PATH_QUOTE,
1064 0 : fix_path_separator(old_cluster.pgdata), PATH_QUOTE);
1065 :
1066 : /* delete old cluster's alternate tablespaces */
1067 0 : old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix);
1068 0 : fix_path_separator(old_tblspc_suffix);
1069 0 : for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
1070 0 : fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
1071 0 : fix_path_separator(old_cluster.tablespaces[tblnum]),
1072 0 : old_tblspc_suffix, PATH_QUOTE);
1073 0 : pfree(old_tblspc_suffix);
1074 :
1075 0 : fclose(script);
1076 :
1077 : #ifndef WIN32
1078 0 : if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
1079 0 : pg_fatal("could not add execute permission to file \"%s\": %m",
1080 0 : *deletion_script_file_name);
1081 : #endif
1082 :
1083 0 : check_ok();
1084 0 : }
1085 :
1086 :
1087 : /*
1088 : * check_is_install_user()
1089 : *
1090 : * Check we are the install user, and that the new cluster
1091 : * has no other users.
1092 : */
1093 : static void
1094 0 : check_is_install_user(ClusterInfo *cluster)
1095 : {
1096 0 : PGresult *res;
1097 0 : PGconn *conn = connectToServer(cluster, "template1");
1098 :
1099 0 : prep_status("Checking database user is the install user");
1100 :
1101 : /* Can't use pg_authid because only superusers can view it. */
1102 0 : res = executeQueryOrDie(conn,
1103 : "SELECT rolsuper, oid "
1104 : "FROM pg_catalog.pg_roles "
1105 : "WHERE rolname = current_user "
1106 : "AND rolname !~ '^pg_'");
1107 :
1108 : /*
1109 : * We only allow the install user in the new cluster (see comment below)
1110 : * and we preserve pg_authid.oid, so this must be the install user in the
1111 : * old cluster too.
1112 : */
1113 0 : if (PQntuples(res) != 1 ||
1114 0 : atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
1115 0 : pg_fatal("database user \"%s\" is not the install user",
1116 0 : os_info.user);
1117 :
1118 0 : PQclear(res);
1119 :
1120 0 : res = executeQueryOrDie(conn,
1121 : "SELECT COUNT(*) "
1122 : "FROM pg_catalog.pg_roles "
1123 : "WHERE rolname !~ '^pg_'");
1124 :
1125 0 : if (PQntuples(res) != 1)
1126 0 : pg_fatal("could not determine the number of users");
1127 :
1128 : /*
1129 : * We only allow the install user in the new cluster because other defined
1130 : * users might match users defined in the old cluster and generate an
1131 : * error during pg_dump restore.
1132 : */
1133 0 : if (cluster == &new_cluster && strcmp(PQgetvalue(res, 0, 0), "1") != 0)
1134 0 : pg_fatal("Only the install user can be defined in the new cluster.");
1135 :
1136 0 : PQclear(res);
1137 :
1138 0 : PQfinish(conn);
1139 :
1140 0 : check_ok();
1141 0 : }
1142 :
1143 :
1144 : /*
1145 : * check_for_connection_status
1146 : *
1147 : * Ensure that all non-template0 databases allow connections since they
1148 : * otherwise won't be restored; and that template0 explicitly doesn't allow
1149 : * connections since it would make pg_dumpall --globals restore fail.
1150 : */
1151 : static void
1152 0 : check_for_connection_status(ClusterInfo *cluster)
1153 : {
1154 0 : int dbnum;
1155 0 : PGconn *conn_template1;
1156 0 : PGresult *dbres;
1157 0 : int ntups;
1158 0 : int i_datname;
1159 0 : int i_datallowconn;
1160 0 : int i_datconnlimit;
1161 0 : FILE *script = NULL;
1162 0 : char output_path[MAXPGPATH];
1163 :
1164 0 : prep_status("Checking database connection settings");
1165 :
1166 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
1167 0 : log_opts.basedir,
1168 : "databases_cannot_connect_to.txt");
1169 :
1170 0 : conn_template1 = connectToServer(cluster, "template1");
1171 :
1172 : /* get database names */
1173 0 : dbres = executeQueryOrDie(conn_template1,
1174 : "SELECT datname, datallowconn, datconnlimit "
1175 : "FROM pg_catalog.pg_database");
1176 :
1177 0 : i_datname = PQfnumber(dbres, "datname");
1178 0 : i_datallowconn = PQfnumber(dbres, "datallowconn");
1179 0 : i_datconnlimit = PQfnumber(dbres, "datconnlimit");
1180 :
1181 0 : ntups = PQntuples(dbres);
1182 0 : for (dbnum = 0; dbnum < ntups; dbnum++)
1183 : {
1184 0 : char *datname = PQgetvalue(dbres, dbnum, i_datname);
1185 0 : char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
1186 0 : char *datconnlimit = PQgetvalue(dbres, dbnum, i_datconnlimit);
1187 :
1188 0 : if (strcmp(datname, "template0") == 0)
1189 : {
1190 : /* avoid restore failure when pg_dumpall tries to create template0 */
1191 0 : if (strcmp(datallowconn, "t") == 0)
1192 0 : pg_fatal("template0 must not allow connections, "
1193 : "i.e. its pg_database.datallowconn must be false");
1194 0 : }
1195 : else
1196 : {
1197 : /*
1198 : * Avoid datallowconn == false databases from being skipped on
1199 : * restore, and ensure that no databases are marked invalid with
1200 : * datconnlimit == -2.
1201 : */
1202 0 : if ((strcmp(datallowconn, "f") == 0) || strcmp(datconnlimit, "-2") == 0)
1203 : {
1204 0 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1205 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1206 :
1207 0 : fprintf(script, "%s\n", datname);
1208 0 : }
1209 : }
1210 0 : }
1211 :
1212 0 : PQclear(dbres);
1213 :
1214 0 : PQfinish(conn_template1);
1215 :
1216 0 : if (script)
1217 : {
1218 0 : fclose(script);
1219 0 : pg_log(PG_REPORT, "fatal");
1220 0 : pg_fatal("All non-template0 databases must allow connections, i.e. their\n"
1221 : "pg_database.datallowconn must be true and pg_database.datconnlimit\n"
1222 : "must not be -2. Your installation contains non-template0 databases\n"
1223 : "which cannot be connected to. Consider allowing connection for all\n"
1224 : "non-template0 databases or drop the databases which do not allow\n"
1225 : "connections. A list of databases with the problem is in the file:\n"
1226 0 : " %s", output_path);
1227 : }
1228 : else
1229 0 : check_ok();
1230 0 : }
1231 :
1232 :
1233 : /*
1234 : * check_for_prepared_transactions()
1235 : *
1236 : * Make sure there are no prepared transactions because the storage format
1237 : * might have changed.
1238 : */
1239 : static void
1240 0 : check_for_prepared_transactions(ClusterInfo *cluster)
1241 : {
1242 0 : PGresult *res;
1243 0 : PGconn *conn = connectToServer(cluster, "template1");
1244 :
1245 0 : prep_status("Checking for prepared transactions");
1246 :
1247 0 : res = executeQueryOrDie(conn,
1248 : "SELECT * "
1249 : "FROM pg_catalog.pg_prepared_xacts");
1250 :
1251 0 : if (PQntuples(res) != 0)
1252 : {
1253 0 : if (cluster == &old_cluster)
1254 0 : pg_fatal("The source cluster contains prepared transactions");
1255 : else
1256 0 : pg_fatal("The target cluster contains prepared transactions");
1257 : }
1258 :
1259 0 : PQclear(res);
1260 :
1261 0 : PQfinish(conn);
1262 :
1263 0 : check_ok();
1264 0 : }
1265 :
1266 : /*
1267 : * Callback function for processing result of query for
1268 : * check_for_isn_and_int8_passing_mismatch()'s UpgradeTask. If the query
1269 : * returned any rows (i.e., the check failed), write the details to the report
1270 : * file.
1271 : */
1272 : static void
1273 0 : process_isn_and_int8_passing_mismatch(DbInfo *dbinfo, PGresult *res, void *arg)
1274 : {
1275 0 : int ntups = PQntuples(res);
1276 0 : int i_nspname = PQfnumber(res, "nspname");
1277 0 : int i_proname = PQfnumber(res, "proname");
1278 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1279 :
1280 0 : if (ntups == 0)
1281 0 : return;
1282 :
1283 0 : if (report->file == NULL &&
1284 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1285 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1286 :
1287 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1288 :
1289 0 : for (int rowno = 0; rowno < ntups; rowno++)
1290 0 : fprintf(report->file, " %s.%s\n",
1291 0 : PQgetvalue(res, rowno, i_nspname),
1292 0 : PQgetvalue(res, rowno, i_proname));
1293 0 : }
1294 :
1295 : /*
1296 : * check_for_isn_and_int8_passing_mismatch()
1297 : *
1298 : * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
1299 : * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
1300 : * it must match for the old and new servers.
1301 : */
1302 : static void
1303 0 : check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
1304 : {
1305 0 : UpgradeTask *task;
1306 0 : UpgradeTaskReport report;
1307 0 : const char *query = "SELECT n.nspname, p.proname "
1308 : "FROM pg_catalog.pg_proc p, "
1309 : " pg_catalog.pg_namespace n "
1310 : "WHERE p.pronamespace = n.oid AND "
1311 : " p.probin = '$libdir/isn'";
1312 :
1313 0 : prep_status("Checking for contrib/isn with bigint-passing mismatch");
1314 :
1315 0 : if (old_cluster.controldata.float8_pass_by_value ==
1316 0 : new_cluster.controldata.float8_pass_by_value)
1317 : {
1318 : /* no mismatch */
1319 0 : check_ok();
1320 0 : return;
1321 : }
1322 :
1323 0 : report.file = NULL;
1324 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1325 0 : log_opts.basedir,
1326 : "contrib_isn_and_int8_pass_by_value.txt");
1327 :
1328 0 : task = upgrade_task_create();
1329 0 : upgrade_task_add_step(task, query, process_isn_and_int8_passing_mismatch,
1330 : true, &report);
1331 0 : upgrade_task_run(task, cluster);
1332 0 : upgrade_task_free(task);
1333 :
1334 0 : if (report.file)
1335 : {
1336 0 : fclose(report.file);
1337 0 : pg_log(PG_REPORT, "fatal");
1338 0 : pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
1339 : "bigint data type. Your old and new clusters pass bigint values\n"
1340 : "differently so this cluster cannot currently be upgraded. You can\n"
1341 : "manually dump databases in the old cluster that use \"contrib/isn\"\n"
1342 : "facilities, drop them, perform the upgrade, and then restore them. A\n"
1343 : "list of the problem functions is in the file:\n"
1344 0 : " %s", report.path);
1345 : }
1346 : else
1347 0 : check_ok();
1348 0 : }
1349 :
1350 : /*
1351 : * Callback function for processing result of query for
1352 : * check_for_user_defined_postfix_ops()'s UpgradeTask. If the query returned
1353 : * any rows (i.e., the check failed), write the details to the report file.
1354 : */
1355 : static void
1356 0 : process_user_defined_postfix_ops(DbInfo *dbinfo, PGresult *res, void *arg)
1357 : {
1358 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1359 0 : int ntups = PQntuples(res);
1360 0 : int i_oproid = PQfnumber(res, "oproid");
1361 0 : int i_oprnsp = PQfnumber(res, "oprnsp");
1362 0 : int i_oprname = PQfnumber(res, "oprname");
1363 0 : int i_typnsp = PQfnumber(res, "typnsp");
1364 0 : int i_typname = PQfnumber(res, "typname");
1365 :
1366 0 : if (ntups == 0)
1367 0 : return;
1368 :
1369 0 : if (report->file == NULL &&
1370 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1371 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1372 :
1373 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1374 :
1375 0 : for (int rowno = 0; rowno < ntups; rowno++)
1376 0 : fprintf(report->file, " (oid=%s) %s.%s (%s.%s, NONE)\n",
1377 0 : PQgetvalue(res, rowno, i_oproid),
1378 0 : PQgetvalue(res, rowno, i_oprnsp),
1379 0 : PQgetvalue(res, rowno, i_oprname),
1380 0 : PQgetvalue(res, rowno, i_typnsp),
1381 0 : PQgetvalue(res, rowno, i_typname));
1382 0 : }
1383 :
1384 : /*
1385 : * Verify that no user defined postfix operators exist.
1386 : */
1387 : static void
1388 0 : check_for_user_defined_postfix_ops(ClusterInfo *cluster)
1389 : {
1390 0 : UpgradeTaskReport report;
1391 0 : UpgradeTask *task = upgrade_task_create();
1392 0 : const char *query;
1393 :
1394 : /*
1395 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1396 : * interpolating that C #define into the query because, if that #define is
1397 : * ever changed, the cutoff we want to use is the value used by
1398 : * pre-version 14 servers, not that of some future version.
1399 : */
1400 0 : query = "SELECT o.oid AS oproid, "
1401 : " n.nspname AS oprnsp, "
1402 : " o.oprname, "
1403 : " tn.nspname AS typnsp, "
1404 : " t.typname "
1405 : "FROM pg_catalog.pg_operator o, "
1406 : " pg_catalog.pg_namespace n, "
1407 : " pg_catalog.pg_type t, "
1408 : " pg_catalog.pg_namespace tn "
1409 : "WHERE o.oprnamespace = n.oid AND "
1410 : " o.oprleft = t.oid AND "
1411 : " t.typnamespace = tn.oid AND "
1412 : " o.oprright = 0 AND "
1413 : " o.oid >= 16384";
1414 :
1415 0 : prep_status("Checking for user-defined postfix operators");
1416 :
1417 0 : report.file = NULL;
1418 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1419 0 : log_opts.basedir,
1420 : "postfix_ops.txt");
1421 :
1422 0 : upgrade_task_add_step(task, query, process_user_defined_postfix_ops,
1423 : true, &report);
1424 0 : upgrade_task_run(task, cluster);
1425 0 : upgrade_task_free(task);
1426 :
1427 0 : if (report.file)
1428 : {
1429 0 : fclose(report.file);
1430 0 : pg_log(PG_REPORT, "fatal");
1431 0 : pg_fatal("Your installation contains user-defined postfix operators, which are not\n"
1432 : "supported anymore. Consider dropping the postfix operators and replacing\n"
1433 : "them with prefix operators or function calls.\n"
1434 : "A list of user-defined postfix operators is in the file:\n"
1435 0 : " %s", report.path);
1436 : }
1437 : else
1438 0 : check_ok();
1439 0 : }
1440 :
1441 : /*
1442 : * Callback function for processing results of query for
1443 : * check_for_incompatible_polymorphics()'s UpgradeTask. If the query returned
1444 : * any rows (i.e., the check failed), write the details to the report file.
1445 : */
1446 : static void
1447 0 : process_incompat_polymorphics(DbInfo *dbinfo, PGresult *res, void *arg)
1448 : {
1449 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1450 0 : int ntups = PQntuples(res);
1451 0 : int i_objkind = PQfnumber(res, "objkind");
1452 0 : int i_objname = PQfnumber(res, "objname");
1453 :
1454 0 : if (ntups == 0)
1455 0 : return;
1456 :
1457 0 : if (report->file == NULL &&
1458 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1459 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1460 :
1461 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1462 :
1463 0 : for (int rowno = 0; rowno < ntups; rowno++)
1464 0 : fprintf(report->file, " %s: %s\n",
1465 0 : PQgetvalue(res, rowno, i_objkind),
1466 0 : PQgetvalue(res, rowno, i_objname));
1467 0 : }
1468 :
1469 : /*
1470 : * check_for_incompatible_polymorphics()
1471 : *
1472 : * Make sure nothing is using old polymorphic functions with
1473 : * anyarray/anyelement rather than the new anycompatible variants.
1474 : */
1475 : static void
1476 0 : check_for_incompatible_polymorphics(ClusterInfo *cluster)
1477 : {
1478 0 : PQExpBufferData old_polymorphics;
1479 0 : UpgradeTask *task = upgrade_task_create();
1480 0 : UpgradeTaskReport report;
1481 0 : char *query;
1482 :
1483 0 : prep_status("Checking for incompatible polymorphic functions");
1484 :
1485 0 : report.file = NULL;
1486 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1487 0 : log_opts.basedir,
1488 : "incompatible_polymorphics.txt");
1489 :
1490 : /* The set of problematic functions varies a bit in different versions */
1491 0 : initPQExpBuffer(&old_polymorphics);
1492 :
1493 0 : appendPQExpBufferStr(&old_polymorphics,
1494 : "'array_append(anyarray,anyelement)'"
1495 : ", 'array_cat(anyarray,anyarray)'"
1496 : ", 'array_prepend(anyelement,anyarray)'");
1497 :
1498 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 903)
1499 0 : appendPQExpBufferStr(&old_polymorphics,
1500 : ", 'array_remove(anyarray,anyelement)'"
1501 : ", 'array_replace(anyarray,anyelement,anyelement)'");
1502 :
1503 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 905)
1504 0 : appendPQExpBufferStr(&old_polymorphics,
1505 : ", 'array_position(anyarray,anyelement)'"
1506 : ", 'array_position(anyarray,anyelement,integer)'"
1507 : ", 'array_positions(anyarray,anyelement)'"
1508 : ", 'width_bucket(anyelement,anyarray)'");
1509 :
1510 : /*
1511 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1512 : * interpolating that C #define into the query because, if that #define is
1513 : * ever changed, the cutoff we want to use is the value used by
1514 : * pre-version 14 servers, not that of some future version.
1515 : */
1516 :
1517 : /* Aggregate transition functions */
1518 0 : query = psprintf("SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1519 : "FROM pg_proc AS p "
1520 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1521 : "JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
1522 : "WHERE p.oid >= 16384 "
1523 : "AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
1524 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1525 :
1526 : /* Aggregate final functions */
1527 : "UNION ALL "
1528 : "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1529 : "FROM pg_proc AS p "
1530 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1531 : "JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
1532 : "WHERE p.oid >= 16384 "
1533 : "AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
1534 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1535 :
1536 : /* Operators */
1537 : "UNION ALL "
1538 : "SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
1539 : "FROM pg_operator AS op "
1540 : "WHERE op.oid >= 16384 "
1541 : "AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
1542 : "AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[])",
1543 0 : old_polymorphics.data,
1544 0 : old_polymorphics.data,
1545 0 : old_polymorphics.data);
1546 :
1547 0 : upgrade_task_add_step(task, query, process_incompat_polymorphics,
1548 : true, &report);
1549 0 : upgrade_task_run(task, cluster);
1550 0 : upgrade_task_free(task);
1551 :
1552 0 : if (report.file)
1553 : {
1554 0 : fclose(report.file);
1555 0 : pg_log(PG_REPORT, "fatal");
1556 0 : pg_fatal("Your installation contains user-defined objects that refer to internal\n"
1557 : "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
1558 : "These user-defined objects must be dropped before upgrading and restored\n"
1559 : "afterwards, changing them to refer to the new corresponding functions with\n"
1560 : "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
1561 : "A list of the problematic objects is in the file:\n"
1562 0 : " %s", report.path);
1563 : }
1564 : else
1565 0 : check_ok();
1566 :
1567 0 : termPQExpBuffer(&old_polymorphics);
1568 0 : pg_free(query);
1569 0 : }
1570 :
1571 : /*
1572 : * Callback function for processing results of query for
1573 : * check_for_tables_with_oids()'s UpgradeTask. If the query returned any rows
1574 : * (i.e., the check failed), write the details to the report file.
1575 : */
1576 : static void
1577 0 : process_with_oids_check(DbInfo *dbinfo, PGresult *res, void *arg)
1578 : {
1579 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1580 0 : int ntups = PQntuples(res);
1581 0 : int i_nspname = PQfnumber(res, "nspname");
1582 0 : int i_relname = PQfnumber(res, "relname");
1583 :
1584 0 : if (ntups == 0)
1585 0 : return;
1586 :
1587 0 : if (report->file == NULL &&
1588 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1589 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1590 :
1591 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1592 :
1593 0 : for (int rowno = 0; rowno < ntups; rowno++)
1594 0 : fprintf(report->file, " %s.%s\n",
1595 0 : PQgetvalue(res, rowno, i_nspname),
1596 0 : PQgetvalue(res, rowno, i_relname));
1597 0 : }
1598 :
1599 : /*
1600 : * Verify that no tables are declared WITH OIDS.
1601 : */
1602 : static void
1603 0 : check_for_tables_with_oids(ClusterInfo *cluster)
1604 : {
1605 0 : UpgradeTaskReport report;
1606 0 : UpgradeTask *task = upgrade_task_create();
1607 0 : const char *query = "SELECT n.nspname, c.relname "
1608 : "FROM pg_catalog.pg_class c, "
1609 : " pg_catalog.pg_namespace n "
1610 : "WHERE c.relnamespace = n.oid AND "
1611 : " c.relhasoids AND"
1612 : " n.nspname NOT IN ('pg_catalog')";
1613 :
1614 0 : prep_status("Checking for tables WITH OIDS");
1615 :
1616 0 : report.file = NULL;
1617 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1618 0 : log_opts.basedir,
1619 : "tables_with_oids.txt");
1620 :
1621 0 : upgrade_task_add_step(task, query, process_with_oids_check,
1622 : true, &report);
1623 0 : upgrade_task_run(task, cluster);
1624 0 : upgrade_task_free(task);
1625 :
1626 0 : if (report.file)
1627 : {
1628 0 : fclose(report.file);
1629 0 : pg_log(PG_REPORT, "fatal");
1630 0 : pg_fatal("Your installation contains tables declared WITH OIDS, which is not\n"
1631 : "supported anymore. Consider removing the oid column using\n"
1632 : " ALTER TABLE ... SET WITHOUT OIDS;\n"
1633 : "A list of tables with the problem is in the file:\n"
1634 0 : " %s", report.path);
1635 : }
1636 : else
1637 0 : check_ok();
1638 0 : }
1639 :
1640 : /*
1641 : * Callback function for processing results of query for
1642 : * check_for_not_null_inheritance.
1643 : */
1644 : static void
1645 0 : process_inconsistent_notnull(DbInfo *dbinfo, PGresult *res, void *arg)
1646 : {
1647 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1648 0 : int ntups = PQntuples(res);
1649 0 : int i_nspname = PQfnumber(res, "nspname");
1650 0 : int i_relname = PQfnumber(res, "relname");
1651 0 : int i_attname = PQfnumber(res, "attname");
1652 :
1653 0 : if (ntups == 0)
1654 0 : return;
1655 :
1656 0 : if (report->file == NULL &&
1657 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1658 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1659 :
1660 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1661 :
1662 0 : for (int rowno = 0; rowno < ntups; rowno++)
1663 : {
1664 0 : fprintf(report->file, " %s.%s.%s\n",
1665 0 : PQgetvalue(res, rowno, i_nspname),
1666 0 : PQgetvalue(res, rowno, i_relname),
1667 0 : PQgetvalue(res, rowno, i_attname));
1668 0 : }
1669 0 : }
1670 :
1671 : /*
1672 : * check_for_not_null_inheritance()
1673 : *
1674 : * An attempt to create child tables lacking not-null constraints that are
1675 : * present in their parents errors out. This can no longer occur since 18,
1676 : * but previously there were various ways for that to happen. Check that
1677 : * the cluster to be upgraded doesn't have any of those problems.
1678 : */
1679 : static void
1680 0 : check_for_not_null_inheritance(ClusterInfo *cluster)
1681 : {
1682 0 : UpgradeTaskReport report;
1683 0 : UpgradeTask *task;
1684 0 : const char *query;
1685 :
1686 0 : prep_status("Checking for not-null constraint inconsistencies");
1687 :
1688 0 : report.file = NULL;
1689 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1690 0 : log_opts.basedir,
1691 : "not_null_inconsistent_columns.txt");
1692 :
1693 0 : query = "SELECT nspname, cc.relname, ac.attname "
1694 : "FROM pg_catalog.pg_inherits i, pg_catalog.pg_attribute ac, "
1695 : " pg_catalog.pg_attribute ap, pg_catalog.pg_class cc, "
1696 : " pg_catalog.pg_namespace nc "
1697 : "WHERE cc.oid = ac.attrelid AND i.inhrelid = ac.attrelid "
1698 : " AND i.inhparent = ap.attrelid AND ac.attname = ap.attname "
1699 : " AND cc.relnamespace = nc.oid "
1700 : " AND ap.attnum > 0 and ap.attnotnull AND NOT ac.attnotnull";
1701 :
1702 0 : task = upgrade_task_create();
1703 0 : upgrade_task_add_step(task, query,
1704 : process_inconsistent_notnull,
1705 : true, &report);
1706 0 : upgrade_task_run(task, cluster);
1707 0 : upgrade_task_free(task);
1708 :
1709 0 : if (report.file)
1710 : {
1711 0 : fclose(report.file);
1712 0 : pg_log(PG_REPORT, "fatal");
1713 0 : pg_fatal("Your installation contains inconsistent NOT NULL constraints.\n"
1714 : "If the parent column(s) are NOT NULL, then the child column must\n"
1715 : "also be marked NOT NULL, or the upgrade will fail.\n"
1716 : "You can fix this by running\n"
1717 : " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
1718 : "on each column listed in the file:\n"
1719 0 : " %s", report.path);
1720 : }
1721 : else
1722 0 : check_ok();
1723 0 : }
1724 :
1725 : /*
1726 : * Callback function for processing results of query for
1727 : * check_for_gist_inet_ops()'s UpgradeTask. If the query returned any rows
1728 : * (i.e., the check failed), write the details to the report file.
1729 : */
1730 : static void
1731 0 : process_gist_inet_ops_check(DbInfo *dbinfo, PGresult *res, void *arg)
1732 : {
1733 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1734 0 : int ntups = PQntuples(res);
1735 0 : int i_nspname = PQfnumber(res, "nspname");
1736 0 : int i_relname = PQfnumber(res, "relname");
1737 :
1738 0 : if (ntups == 0)
1739 0 : return;
1740 :
1741 0 : if (report->file == NULL &&
1742 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1743 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1744 :
1745 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1746 :
1747 0 : for (int rowno = 0; rowno < ntups; rowno++)
1748 0 : fprintf(report->file, " %s.%s\n",
1749 0 : PQgetvalue(res, rowno, i_nspname),
1750 0 : PQgetvalue(res, rowno, i_relname));
1751 0 : }
1752 :
1753 : /*
1754 : * Verify that no indexes use gist_inet_ops/gist_cidr_ops, unless the
1755 : * opclasses have been changed to not-opcdefault (which would allow
1756 : * the old server to dump the index definitions with explicit opclasses).
1757 : */
1758 : static void
1759 0 : check_for_gist_inet_ops(ClusterInfo *cluster)
1760 : {
1761 0 : UpgradeTaskReport report;
1762 0 : UpgradeTask *task = upgrade_task_create();
1763 0 : const char *query = "SELECT nc.nspname, cc.relname "
1764 : "FROM pg_catalog.pg_opclass oc, pg_catalog.pg_index i, "
1765 : " pg_catalog.pg_class cc, pg_catalog.pg_namespace nc "
1766 : "WHERE oc.opcmethod = " CppAsString2(GIST_AM_OID)
1767 : " AND oc.opcname IN ('gist_inet_ops', 'gist_cidr_ops')"
1768 : " AND oc.opcdefault"
1769 : " AND oc.oid = any(i.indclass)"
1770 : " AND i.indexrelid = cc.oid AND cc.relnamespace = nc.oid";
1771 :
1772 0 : prep_status("Checking for uses of gist_inet_ops/gist_cidr_ops");
1773 :
1774 0 : report.file = NULL;
1775 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1776 0 : log_opts.basedir,
1777 : "gist_inet_ops.txt");
1778 :
1779 0 : upgrade_task_add_step(task, query, process_gist_inet_ops_check,
1780 : true, &report);
1781 0 : upgrade_task_run(task, cluster);
1782 0 : upgrade_task_free(task);
1783 :
1784 0 : if (report.file)
1785 : {
1786 0 : fclose(report.file);
1787 0 : pg_log(PG_REPORT, "fatal");
1788 0 : pg_fatal("Your installation contains indexes that use btree_gist's\n"
1789 : "gist_inet_ops or gist_cidr_ops opclasses,\n"
1790 : "which cannot be binary-upgraded. Replace them with indexes\n"
1791 : "that use the built-in GiST inet_ops opclass.\n"
1792 : "A list of indexes with the problem is in the file:\n"
1793 0 : " %s", report.path);
1794 : }
1795 : else
1796 0 : check_ok();
1797 0 : }
1798 :
1799 : /*
1800 : * check_for_pg_role_prefix()
1801 : *
1802 : * Versions older than 9.6 should not have any pg_* roles
1803 : */
1804 : static void
1805 0 : check_for_pg_role_prefix(ClusterInfo *cluster)
1806 : {
1807 0 : PGresult *res;
1808 0 : PGconn *conn = connectToServer(cluster, "template1");
1809 0 : int ntups;
1810 0 : int i_roloid;
1811 0 : int i_rolname;
1812 0 : FILE *script = NULL;
1813 0 : char output_path[MAXPGPATH];
1814 :
1815 0 : prep_status("Checking for roles starting with \"pg_\"");
1816 :
1817 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
1818 0 : log_opts.basedir,
1819 : "pg_role_prefix.txt");
1820 :
1821 0 : res = executeQueryOrDie(conn,
1822 : "SELECT oid AS roloid, rolname "
1823 : "FROM pg_catalog.pg_roles "
1824 : "WHERE rolname ~ '^pg_'");
1825 :
1826 0 : ntups = PQntuples(res);
1827 0 : i_roloid = PQfnumber(res, "roloid");
1828 0 : i_rolname = PQfnumber(res, "rolname");
1829 0 : for (int rowno = 0; rowno < ntups; rowno++)
1830 : {
1831 0 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1832 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1833 0 : fprintf(script, "%s (oid=%s)\n",
1834 0 : PQgetvalue(res, rowno, i_rolname),
1835 0 : PQgetvalue(res, rowno, i_roloid));
1836 0 : }
1837 :
1838 0 : PQclear(res);
1839 :
1840 0 : PQfinish(conn);
1841 :
1842 0 : if (script)
1843 : {
1844 0 : fclose(script);
1845 0 : pg_log(PG_REPORT, "fatal");
1846 0 : pg_fatal("Your installation contains roles starting with \"pg_\".\n"
1847 : "\"pg_\" is a reserved prefix for system roles. The cluster\n"
1848 : "cannot be upgraded until these roles are renamed.\n"
1849 : "A list of roles starting with \"pg_\" is in the file:\n"
1850 0 : " %s", output_path);
1851 : }
1852 : else
1853 0 : check_ok();
1854 0 : }
1855 :
1856 : /*
1857 : * Callback function for processing results of query for
1858 : * check_for_user_defined_encoding_conversions()'s UpgradeTask. If the query
1859 : * returned any rows (i.e., the check failed), write the details to the report
1860 : * file.
1861 : */
1862 : static void
1863 0 : process_user_defined_encoding_conversions(DbInfo *dbinfo, PGresult *res, void *arg)
1864 : {
1865 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1866 0 : int ntups = PQntuples(res);
1867 0 : int i_conoid = PQfnumber(res, "conoid");
1868 0 : int i_conname = PQfnumber(res, "conname");
1869 0 : int i_nspname = PQfnumber(res, "nspname");
1870 :
1871 0 : if (ntups == 0)
1872 0 : return;
1873 :
1874 0 : if (report->file == NULL &&
1875 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1876 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1877 :
1878 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1879 :
1880 0 : for (int rowno = 0; rowno < ntups; rowno++)
1881 0 : fprintf(report->file, " (oid=%s) %s.%s\n",
1882 0 : PQgetvalue(res, rowno, i_conoid),
1883 0 : PQgetvalue(res, rowno, i_nspname),
1884 0 : PQgetvalue(res, rowno, i_conname));
1885 0 : }
1886 :
1887 : /*
1888 : * Verify that no user-defined encoding conversions exist.
1889 : */
1890 : static void
1891 0 : check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
1892 : {
1893 0 : UpgradeTaskReport report;
1894 0 : UpgradeTask *task = upgrade_task_create();
1895 0 : const char *query;
1896 :
1897 0 : prep_status("Checking for user-defined encoding conversions");
1898 :
1899 0 : report.file = NULL;
1900 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1901 0 : log_opts.basedir,
1902 : "encoding_conversions.txt");
1903 :
1904 : /*
1905 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1906 : * interpolating that C #define into the query because, if that #define is
1907 : * ever changed, the cutoff we want to use is the value used by
1908 : * pre-version 14 servers, not that of some future version.
1909 : */
1910 0 : query = "SELECT c.oid as conoid, c.conname, n.nspname "
1911 : "FROM pg_catalog.pg_conversion c, "
1912 : " pg_catalog.pg_namespace n "
1913 : "WHERE c.connamespace = n.oid AND "
1914 : " c.oid >= 16384";
1915 :
1916 0 : upgrade_task_add_step(task, query,
1917 : process_user_defined_encoding_conversions,
1918 : true, &report);
1919 0 : upgrade_task_run(task, cluster);
1920 0 : upgrade_task_free(task);
1921 :
1922 0 : if (report.file)
1923 : {
1924 0 : fclose(report.file);
1925 0 : pg_log(PG_REPORT, "fatal");
1926 0 : pg_fatal("Your installation contains user-defined encoding conversions.\n"
1927 : "The conversion function parameters changed in PostgreSQL version 14\n"
1928 : "so this cluster cannot currently be upgraded. You can remove the\n"
1929 : "encoding conversions in the old cluster and restart the upgrade.\n"
1930 : "A list of user-defined encoding conversions is in the file:\n"
1931 0 : " %s", report.path);
1932 : }
1933 : else
1934 0 : check_ok();
1935 0 : }
1936 :
1937 : /*
1938 : * Callback function for processing results of query for
1939 : * check_for_unicode_update()'s UpgradeTask. If the query returned any rows
1940 : * (i.e., the check failed), write the details to the report file.
1941 : */
1942 : static void
1943 0 : process_unicode_update(DbInfo *dbinfo, PGresult *res, void *arg)
1944 : {
1945 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1946 0 : int ntups = PQntuples(res);
1947 0 : int i_reloid = PQfnumber(res, "reloid");
1948 0 : int i_nspname = PQfnumber(res, "nspname");
1949 0 : int i_relname = PQfnumber(res, "relname");
1950 :
1951 0 : if (ntups == 0)
1952 0 : return;
1953 :
1954 0 : if (report->file == NULL &&
1955 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1956 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1957 :
1958 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1959 :
1960 0 : for (int rowno = 0; rowno < ntups; rowno++)
1961 0 : fprintf(report->file, " (oid=%s) %s.%s\n",
1962 0 : PQgetvalue(res, rowno, i_reloid),
1963 0 : PQgetvalue(res, rowno, i_nspname),
1964 0 : PQgetvalue(res, rowno, i_relname));
1965 0 : }
1966 :
1967 : /*
1968 : * Check if the Unicode version built into Postgres changed between the old
1969 : * cluster and the new cluster.
1970 : */
1971 : static bool
1972 0 : unicode_version_changed(ClusterInfo *cluster)
1973 : {
1974 0 : PGconn *conn_template1 = connectToServer(cluster, "template1");
1975 0 : PGresult *res;
1976 0 : char *old_unicode_version;
1977 0 : bool unicode_updated;
1978 :
1979 0 : res = executeQueryOrDie(conn_template1, "SELECT unicode_version()");
1980 0 : old_unicode_version = PQgetvalue(res, 0, 0);
1981 0 : unicode_updated = (strcmp(old_unicode_version, PG_UNICODE_VERSION) != 0);
1982 :
1983 0 : PQclear(res);
1984 0 : PQfinish(conn_template1);
1985 :
1986 0 : return unicode_updated;
1987 0 : }
1988 :
1989 : /*
1990 : * check_for_unicode_update()
1991 : *
1992 : * Check if the version of Unicode in the old server and the new server
1993 : * differ. If so, check for indexes, partitioned tables, or constraints that
1994 : * use expressions with functions dependent on Unicode behavior.
1995 : */
1996 : static void
1997 0 : check_for_unicode_update(ClusterInfo *cluster)
1998 : {
1999 0 : UpgradeTaskReport report;
2000 0 : UpgradeTask *task;
2001 0 : const char *query;
2002 :
2003 : /*
2004 : * The builtin provider did not exist prior to version 17. While there are
2005 : * still problems that could potentially be caught from earlier versions,
2006 : * such as an index on NORMALIZE(), we don't check for that here.
2007 : */
2008 0 : if (GET_MAJOR_VERSION(cluster->major_version) < 1700)
2009 0 : return;
2010 :
2011 0 : prep_status("Checking for objects affected by Unicode update");
2012 :
2013 0 : if (!unicode_version_changed(cluster))
2014 : {
2015 0 : check_ok();
2016 0 : return;
2017 : }
2018 :
2019 0 : report.file = NULL;
2020 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
2021 0 : log_opts.basedir,
2022 : "unicode_dependent_rels.txt");
2023 :
2024 0 : query =
2025 : /* collations that use built-in Unicode for character semantics */
2026 : "WITH collations(collid) AS ( "
2027 : " SELECT oid FROM pg_collation "
2028 : " WHERE collprovider='b' AND colllocale IN ('C.UTF-8','PG_UNICODE_FAST') "
2029 : /* include default collation, if appropriate */
2030 : " UNION "
2031 : " SELECT 'pg_catalog.default'::regcollation FROM pg_database "
2032 : " WHERE datname = current_database() AND "
2033 : " datlocprovider='b' AND datlocale IN ('C.UTF-8','PG_UNICODE_FAST') "
2034 : "), "
2035 : /* functions that use built-in Unicode */
2036 : "functions(procid) AS ( "
2037 : " SELECT proc.oid FROM pg_proc proc "
2038 : " WHERE proname IN ('normalize','unicode_assigned','unicode_version','is_normalized') AND "
2039 : " pronamespace='pg_catalog'::regnamespace "
2040 : "), "
2041 : /* operators that use the input collation for character semantics */
2042 : "coll_operators(operid, procid, collid) AS ( "
2043 : " SELECT oper.oid, oper.oprcode, collid FROM pg_operator oper, collations "
2044 : " WHERE oprname IN ('~', '~*', '!~', '!~*', '~~*', '!~~*') AND "
2045 : " oprnamespace='pg_catalog'::regnamespace AND "
2046 : " oprright='pg_catalog.text'::pg_catalog.regtype "
2047 : "), "
2048 : /* functions that use the input collation for character semantics */
2049 : "coll_functions(procid, collid) AS ( "
2050 : " SELECT proc.oid, collid FROM pg_proc proc, collations "
2051 : " WHERE pronamespace='pg_catalog'::regnamespace AND "
2052 : " ((proname IN ('lower','initcap','upper','casefold') AND "
2053 : " pronargs = 1 AND "
2054 : " proargtypes[0] = 'pg_catalog.text'::pg_catalog.regtype) OR "
2055 : " (proname = 'substring' AND pronargs = 2 AND "
2056 : " proargtypes[0] = 'pg_catalog.text'::pg_catalog.regtype AND "
2057 : " proargtypes[1] = 'pg_catalog.text'::pg_catalog.regtype) OR "
2058 : " proname LIKE 'regexp_%') "
2059 : /* include functions behind the operators listed above */
2060 : " UNION "
2061 : " SELECT procid, collid FROM coll_operators "
2062 : "), "
2063 :
2064 : /*
2065 : * Generate patterns to search a pg_node_tree for the above functions and
2066 : * operators.
2067 : */
2068 : "patterns(p) AS ( "
2069 : " SELECT '{FUNCEXPR :funcid ' || procid::text || '[ }]' FROM functions "
2070 : " UNION "
2071 : " SELECT '{OPEXPR :opno ' || operid::text || ' (:\\w+ \\w+ )*' || "
2072 : " ':inputcollid ' || collid::text || '[ }]' FROM coll_operators "
2073 : " UNION "
2074 : " SELECT '{FUNCEXPR :funcid ' || procid::text || ' (:\\w+ \\w+ )*' || "
2075 : " ':inputcollid ' || collid::text || '[ }]' FROM coll_functions "
2076 : ") "
2077 :
2078 : /*
2079 : * Match the patterns against expressions used for relation contents.
2080 : */
2081 : "SELECT reloid, relkind, nspname, relname "
2082 : " FROM ( "
2083 : " SELECT conrelid "
2084 : " FROM pg_constraint, patterns WHERE conbin::text ~ p "
2085 : " UNION "
2086 : " SELECT indexrelid "
2087 : " FROM pg_index, patterns WHERE indexprs::text ~ p OR indpred::text ~ p "
2088 : " UNION "
2089 : " SELECT partrelid "
2090 : " FROM pg_partitioned_table, patterns WHERE partexprs::text ~ p "
2091 : " UNION "
2092 : " SELECT ev_class "
2093 : " FROM pg_rewrite, pg_class, patterns "
2094 : " WHERE ev_class = pg_class.oid AND relkind = 'm' AND ev_action::text ~ p"
2095 : " ) s(reloid), pg_class c, pg_namespace n, pg_database d "
2096 : " WHERE s.reloid = c.oid AND c.relnamespace = n.oid AND "
2097 : " d.datname = current_database() AND "
2098 : " d.encoding = pg_char_to_encoding('UTF8');";
2099 :
2100 0 : task = upgrade_task_create();
2101 0 : upgrade_task_add_step(task, query,
2102 : process_unicode_update,
2103 : true, &report);
2104 0 : upgrade_task_run(task, cluster);
2105 0 : upgrade_task_free(task);
2106 :
2107 0 : if (report.file)
2108 : {
2109 0 : fclose(report.file);
2110 0 : report_status(PG_WARNING, "warning");
2111 0 : pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n"
2112 : "A list of potentially-affected relations is in the file:\n"
2113 0 : " %s", report.path);
2114 0 : }
2115 : else
2116 0 : check_ok();
2117 0 : }
2118 :
2119 : /*
2120 : * check_new_cluster_replication_slots()
2121 : *
2122 : * Validate the new cluster's readiness for migrating replication slots:
2123 : * - Ensures no existing logical replication slots on the new cluster when
2124 : * migrating logical slots.
2125 : * - Ensure conflict detection slot does not exist on the new cluster when
2126 : * migrating subscriptions with retain_dead_tuples enabled.
2127 : * - Ensure that the parameter settings on the new cluster necessary for
2128 : * creating slots are sufficient.
2129 : */
2130 : static void
2131 0 : check_new_cluster_replication_slots(void)
2132 : {
2133 0 : PGresult *res;
2134 0 : PGconn *conn;
2135 0 : int nslots_on_old;
2136 0 : int nslots_on_new;
2137 0 : int rdt_slot_on_new;
2138 0 : int max_replication_slots;
2139 0 : char *wal_level;
2140 0 : int i_nslots_on_new;
2141 0 : int i_rdt_slot_on_new;
2142 :
2143 : /*
2144 : * Logical slots can be migrated since PG17 and a physical slot
2145 : * CONFLICT_DETECTION_SLOT can be migrated since PG19.
2146 : */
2147 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1600)
2148 0 : return;
2149 :
2150 0 : nslots_on_old = count_old_cluster_logical_slots();
2151 :
2152 : /*
2153 : * Quick return if there are no slots to be migrated and no subscriptions
2154 : * have the retain_dead_tuples option enabled.
2155 : */
2156 0 : if (nslots_on_old == 0 && !old_cluster.sub_retain_dead_tuples)
2157 0 : return;
2158 :
2159 0 : conn = connectToServer(&new_cluster, "template1");
2160 :
2161 0 : prep_status("Checking for new cluster replication slots");
2162 :
2163 0 : res = executeQueryOrDie(conn, "SELECT %s AS nslots_on_new, %s AS rdt_slot_on_new "
2164 : "FROM pg_catalog.pg_replication_slots",
2165 0 : nslots_on_old > 0
2166 : ? "COUNT(*) FILTER (WHERE slot_type = 'logical' AND temporary IS FALSE)"
2167 : : "0",
2168 0 : old_cluster.sub_retain_dead_tuples
2169 : ? "COUNT(*) FILTER (WHERE slot_name = 'pg_conflict_detection')"
2170 : : "0");
2171 :
2172 0 : if (PQntuples(res) != 1)
2173 0 : pg_fatal("could not count the number of replication slots");
2174 :
2175 0 : i_nslots_on_new = PQfnumber(res, "nslots_on_new");
2176 0 : i_rdt_slot_on_new = PQfnumber(res, "rdt_slot_on_new");
2177 :
2178 0 : nslots_on_new = atoi(PQgetvalue(res, 0, i_nslots_on_new));
2179 :
2180 0 : if (nslots_on_new)
2181 : {
2182 0 : Assert(nslots_on_old);
2183 0 : pg_fatal("expected 0 logical replication slots but found %d",
2184 0 : nslots_on_new);
2185 : }
2186 :
2187 0 : rdt_slot_on_new = atoi(PQgetvalue(res, 0, i_rdt_slot_on_new));
2188 :
2189 0 : if (rdt_slot_on_new)
2190 : {
2191 0 : Assert(old_cluster.sub_retain_dead_tuples);
2192 0 : pg_fatal("The replication slot \"pg_conflict_detection\" already exists on the new cluster");
2193 : }
2194 :
2195 0 : PQclear(res);
2196 :
2197 0 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
2198 : "WHERE name IN ('wal_level', 'max_replication_slots') "
2199 : "ORDER BY name DESC;");
2200 :
2201 0 : if (PQntuples(res) != 2)
2202 0 : pg_fatal("could not determine parameter settings on new cluster");
2203 :
2204 0 : wal_level = PQgetvalue(res, 0, 0);
2205 :
2206 0 : if ((nslots_on_old > 0 || old_cluster.sub_retain_dead_tuples) &&
2207 0 : strcmp(wal_level, "minimal") == 0)
2208 0 : pg_fatal("\"wal_level\" must be \"replica\" or \"logical\" but is set to \"%s\"",
2209 0 : wal_level);
2210 :
2211 0 : max_replication_slots = atoi(PQgetvalue(res, 1, 0));
2212 :
2213 0 : if (old_cluster.sub_retain_dead_tuples &&
2214 0 : nslots_on_old + 1 > max_replication_slots)
2215 0 : pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
2216 : "logical replication slots on the old cluster plus one additional slot required "
2217 : "for retaining conflict detection information (%d)",
2218 0 : max_replication_slots, nslots_on_old + 1);
2219 :
2220 0 : if (nslots_on_old > max_replication_slots)
2221 0 : pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
2222 : "logical replication slots (%d) on the old cluster",
2223 0 : max_replication_slots, nslots_on_old);
2224 :
2225 0 : PQclear(res);
2226 0 : PQfinish(conn);
2227 :
2228 0 : check_ok();
2229 0 : }
2230 :
2231 : /*
2232 : * check_new_cluster_subscription_configuration()
2233 : *
2234 : * Verify that the max_active_replication_origins configuration specified is
2235 : * enough for creating the subscriptions. This is required to create the
2236 : * replication origin for each subscription.
2237 : */
2238 : static void
2239 0 : check_new_cluster_subscription_configuration(void)
2240 : {
2241 0 : PGresult *res;
2242 0 : PGconn *conn;
2243 0 : int max_active_replication_origins;
2244 :
2245 : /* Subscriptions and their dependencies can be migrated since PG17. */
2246 0 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 1700)
2247 0 : return;
2248 :
2249 : /* Quick return if there are no subscriptions to be migrated. */
2250 0 : if (old_cluster.nsubs == 0)
2251 0 : return;
2252 :
2253 0 : prep_status("Checking for new cluster configuration for subscriptions");
2254 :
2255 0 : conn = connectToServer(&new_cluster, "template1");
2256 :
2257 0 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
2258 : "WHERE name = 'max_active_replication_origins';");
2259 :
2260 0 : if (PQntuples(res) != 1)
2261 0 : pg_fatal("could not determine parameter settings on new cluster");
2262 :
2263 0 : max_active_replication_origins = atoi(PQgetvalue(res, 0, 0));
2264 0 : if (old_cluster.nsubs > max_active_replication_origins)
2265 0 : pg_fatal("\"max_active_replication_origins\" (%d) must be greater than or equal to the number of "
2266 : "subscriptions (%d) on the old cluster",
2267 0 : max_active_replication_origins, old_cluster.nsubs);
2268 :
2269 0 : PQclear(res);
2270 0 : PQfinish(conn);
2271 :
2272 0 : check_ok();
2273 0 : }
2274 :
2275 : /*
2276 : * check_old_cluster_for_valid_slots()
2277 : *
2278 : * Verify that all the logical slots are valid and have consumed all the WAL
2279 : * before shutdown.
2280 : */
2281 : static void
2282 0 : check_old_cluster_for_valid_slots(void)
2283 : {
2284 0 : char output_path[MAXPGPATH];
2285 0 : FILE *script = NULL;
2286 :
2287 0 : prep_status("Checking for valid logical replication slots");
2288 :
2289 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
2290 0 : log_opts.basedir,
2291 : "invalid_logical_slots.txt");
2292 :
2293 0 : for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
2294 : {
2295 0 : LogicalSlotInfoArr *slot_arr = &old_cluster.dbarr.dbs[dbnum].slot_arr;
2296 :
2297 0 : for (int slotnum = 0; slotnum < slot_arr->nslots; slotnum++)
2298 : {
2299 0 : LogicalSlotInfo *slot = &slot_arr->slots[slotnum];
2300 :
2301 : /* Is the slot usable? */
2302 0 : if (slot->invalid)
2303 : {
2304 0 : if (script == NULL &&
2305 0 : (script = fopen_priv(output_path, "w")) == NULL)
2306 0 : pg_fatal("could not open file \"%s\": %m", output_path);
2307 :
2308 0 : fprintf(script, "The slot \"%s\" is invalid\n",
2309 0 : slot->slotname);
2310 :
2311 0 : continue;
2312 : }
2313 :
2314 : /*
2315 : * Do additional check to ensure that all logical replication
2316 : * slots have consumed all the WAL before shutdown.
2317 : *
2318 : * Note: This can be satisfied only when the old cluster has been
2319 : * shut down, so we skip this for live checks.
2320 : */
2321 0 : if (!user_opts.live_check && !slot->caught_up)
2322 : {
2323 0 : if (script == NULL &&
2324 0 : (script = fopen_priv(output_path, "w")) == NULL)
2325 0 : pg_fatal("could not open file \"%s\": %m", output_path);
2326 :
2327 0 : fprintf(script,
2328 : "The slot \"%s\" has not consumed the WAL yet\n",
2329 0 : slot->slotname);
2330 0 : }
2331 :
2332 : /*
2333 : * The name "pg_conflict_detection" (defined as
2334 : * CONFLICT_DETECTION_SLOT) has been reserved for logical
2335 : * replication conflict detection slot since PG19.
2336 : */
2337 0 : if (strcmp(slot->slotname, "pg_conflict_detection") == 0)
2338 : {
2339 0 : if (script == NULL &&
2340 0 : (script = fopen_priv(output_path, "w")) == NULL)
2341 0 : pg_fatal("could not open file \"%s\": %m", output_path);
2342 :
2343 0 : fprintf(script,
2344 : "The slot name \"%s\" is reserved\n",
2345 0 : slot->slotname);
2346 0 : }
2347 0 : }
2348 0 : }
2349 :
2350 0 : if (script)
2351 : {
2352 0 : fclose(script);
2353 :
2354 0 : pg_log(PG_REPORT, "fatal");
2355 0 : pg_fatal("Your installation contains logical replication slots that cannot be upgraded.\n"
2356 : "You can remove invalid slots and/or consume the pending WAL for other slots,\n"
2357 : "and then restart the upgrade.\n"
2358 : "A list of the problematic slots is in the file:\n"
2359 0 : " %s", output_path);
2360 : }
2361 :
2362 0 : check_ok();
2363 0 : }
2364 :
2365 : /*
2366 : * Callback function for processing results of query for
2367 : * check_old_cluster_subscription_state()'s UpgradeTask. If the query returned
2368 : * any rows (i.e., the check failed), write the details to the report file.
2369 : */
2370 : static void
2371 0 : process_old_sub_state_check(DbInfo *dbinfo, PGresult *res, void *arg)
2372 : {
2373 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
2374 0 : int ntups = PQntuples(res);
2375 0 : int i_srsubstate = PQfnumber(res, "srsubstate");
2376 0 : int i_subname = PQfnumber(res, "subname");
2377 0 : int i_nspname = PQfnumber(res, "nspname");
2378 0 : int i_relname = PQfnumber(res, "relname");
2379 :
2380 0 : if (ntups == 0)
2381 0 : return;
2382 :
2383 0 : if (report->file == NULL &&
2384 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
2385 0 : pg_fatal("could not open file \"%s\": %m", report->path);
2386 :
2387 0 : for (int i = 0; i < ntups; i++)
2388 0 : fprintf(report->file, "The table sync state \"%s\" is not allowed for database:\"%s\" subscription:\"%s\" schema:\"%s\" relation:\"%s\"\n",
2389 0 : PQgetvalue(res, i, i_srsubstate),
2390 0 : dbinfo->db_name,
2391 0 : PQgetvalue(res, i, i_subname),
2392 0 : PQgetvalue(res, i, i_nspname),
2393 0 : PQgetvalue(res, i, i_relname));
2394 0 : }
2395 :
2396 : /*
2397 : * check_old_cluster_subscription_state()
2398 : *
2399 : * Verify that the replication origin corresponding to each of the
2400 : * subscriptions are present and each of the subscribed tables is in
2401 : * 'i' (initialize) or 'r' (ready) state.
2402 : */
2403 : static void
2404 0 : check_old_cluster_subscription_state(void)
2405 : {
2406 0 : UpgradeTask *task = upgrade_task_create();
2407 0 : UpgradeTaskReport report;
2408 0 : const char *query;
2409 0 : PGresult *res;
2410 0 : PGconn *conn;
2411 0 : int ntup;
2412 :
2413 0 : prep_status("Checking for subscription state");
2414 :
2415 0 : report.file = NULL;
2416 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
2417 0 : log_opts.basedir,
2418 : "subs_invalid.txt");
2419 :
2420 : /*
2421 : * Check that all the subscriptions have their respective replication
2422 : * origin. This check only needs to run once.
2423 : */
2424 0 : conn = connectToServer(&old_cluster, old_cluster.dbarr.dbs[0].db_name);
2425 0 : res = executeQueryOrDie(conn,
2426 : "SELECT d.datname, s.subname "
2427 : "FROM pg_catalog.pg_subscription s "
2428 : "LEFT OUTER JOIN pg_catalog.pg_replication_origin o "
2429 : " ON o.roname = 'pg_' || s.oid "
2430 : "INNER JOIN pg_catalog.pg_database d "
2431 : " ON d.oid = s.subdbid "
2432 : "WHERE o.roname IS NULL;");
2433 0 : ntup = PQntuples(res);
2434 0 : for (int i = 0; i < ntup; i++)
2435 : {
2436 0 : if (report.file == NULL &&
2437 0 : (report.file = fopen_priv(report.path, "w")) == NULL)
2438 0 : pg_fatal("could not open file \"%s\": %m", report.path);
2439 0 : fprintf(report.file, "The replication origin is missing for database:\"%s\" subscription:\"%s\"\n",
2440 0 : PQgetvalue(res, i, 0),
2441 0 : PQgetvalue(res, i, 1));
2442 0 : }
2443 0 : PQclear(res);
2444 0 : PQfinish(conn);
2445 :
2446 : /*
2447 : * We don't allow upgrade if there is a risk of dangling slot or origin
2448 : * corresponding to initial sync after upgrade.
2449 : *
2450 : * A slot/origin not created yet refers to the 'i' (initialize) state,
2451 : * while 'r' (ready) state refers to a slot/origin created previously but
2452 : * already dropped. These states are supported for pg_upgrade. The other
2453 : * states listed below are not supported:
2454 : *
2455 : * a) SUBREL_STATE_DATASYNC: A relation upgraded while in this state would
2456 : * retain a replication slot and origin. The sync worker spawned after the
2457 : * upgrade cannot drop them because the subscription ID used for the slot
2458 : * and origin name no longer matches.
2459 : *
2460 : * b) SUBREL_STATE_SYNCDONE: A relation upgraded while in this state would
2461 : * retain the replication origin when there is a failure in tablesync
2462 : * worker immediately after dropping the replication slot in the
2463 : * publisher.
2464 : *
2465 : * c) SUBREL_STATE_FINISHEDCOPY: A tablesync worker spawned to work on a
2466 : * relation upgraded while in this state would expect an origin ID with
2467 : * the OID of the subscription used before the upgrade, causing it to
2468 : * fail.
2469 : *
2470 : * d) SUBREL_STATE_SYNCWAIT, SUBREL_STATE_CATCHUP and
2471 : * SUBREL_STATE_UNKNOWN: These states are not stored in the catalog, so we
2472 : * need not allow these states.
2473 : */
2474 0 : query = "SELECT r.srsubstate, s.subname, n.nspname, c.relname "
2475 : "FROM pg_catalog.pg_subscription_rel r "
2476 : "LEFT JOIN pg_catalog.pg_subscription s"
2477 : " ON r.srsubid = s.oid "
2478 : "LEFT JOIN pg_catalog.pg_class c"
2479 : " ON r.srrelid = c.oid "
2480 : "LEFT JOIN pg_catalog.pg_namespace n"
2481 : " ON c.relnamespace = n.oid "
2482 : "WHERE r.srsubstate NOT IN ('i', 'r') "
2483 : "ORDER BY s.subname";
2484 :
2485 0 : upgrade_task_add_step(task, query, process_old_sub_state_check,
2486 : true, &report);
2487 :
2488 0 : upgrade_task_run(task, &old_cluster);
2489 0 : upgrade_task_free(task);
2490 :
2491 0 : if (report.file)
2492 : {
2493 0 : fclose(report.file);
2494 0 : pg_log(PG_REPORT, "fatal");
2495 0 : pg_fatal("Your installation contains subscriptions without origin or having relations not in i (initialize) or r (ready) state.\n"
2496 : "You can allow the initial sync to finish for all relations and then restart the upgrade.\n"
2497 : "A list of the problematic subscriptions is in the file:\n"
2498 0 : " %s", report.path);
2499 : }
2500 : else
2501 0 : check_ok();
2502 0 : }
|