$description = $via_count ? 'via COUNT' : 'approximate';
$updraftplus->log("Table $table: Total expected rows ($description): ".$expected_rows);
$this->stow("# Approximate rows expected in table: $expected_rows\n");
if ($expected_rows > UPDRAFTPLUS_WARN_DB_ROWS) {
$this->many_rows_warning = true;
$updraftplus->log(sprintf(__("Table %s has very many rows (%s) - we hope your web hosting company gives you enough resources to dump out that table in the backup.", 'updraftplus'), $table, $expected_rows).' '.__('If not, you will need to either remove data from this table, or contact your hosting company to request more resources.', 'updraftplus'), 'warning', 'manyrows_'.$this->whichdb_suffix.$table);
* This function will return a SQL WHERE clause to exclude updraft jobdata
* @param array $where - an array of where clauses to add to
* @param string $table - the table we want to add a where clause for
* @return array - returns an array of where clauses for the table
public function backup_exclude_jobdata($where, $table) {
// Don't include the job data for any backups - so that when the database is restored, it doesn't continue an apparently incomplete backup
$table_prefix = $updraftplus->get_table_prefix(false); // or we can just use $this->table_prefix_raw ??
if ('wp' == $this->whichdb && (!empty($table_prefix) && strtolower($table_prefix.'sitemeta') == strtolower($table))) {
$where[] = 'meta_key NOT LIKE "updraft_jobdata_%"';
} elseif ('wp' == $this->whichdb && (!empty($table_prefix) && strtolower($table_prefix.'options') == strtolower($table))) {
// These might look similar, but the quotes are different
if ('win' == strtolower(substr(PHP_OS, 0, 3))) {
$updraft_jobdata = "'updraft_jobdata_%'";
$site_transient_update = "'_site_transient_update_%'";
$updraft_jobdata = '"updraft_jobdata_%"';
$site_transient_update = '"_site_transient_update_%"';
$where[] = 'option_name NOT LIKE '.$updraft_jobdata.' AND option_name NOT LIKE '.$site_transient_update.'';
* Produce a dump of the table using a mysqldump binary
* @param String $potsql - the path to the mysqldump binary
* @param String $table_name - the name of the table being dumped
* @return Boolean - success status
private function backup_table_bindump($potsql, $table_name) {
$microtime = microtime(true);
global $updraftplus, $wpdb;
// Deal with Windows/old MySQL setups with erroneous table prefixes differing in case
// Can't get binary mysqldump to make this transformation
// $dump_as_table = ($this->duplicate_tables_exist == false && stripos($table, $this->table_prefix) === 0 && strpos($table, $this->table_prefix) !== 0) ? $this->table_prefix.substr($table, strlen($this->table_prefix)) : $table;
$pfile = md5(time().rand()).'.tmp';
file_put_contents($this->updraft_dir.'/'.$pfile, "[mysqldump]\npassword=\"".addslashes($this->dbinfo['pass'])."\"\n");
$where_array = apply_filters('updraftplus_backup_table_sql_where', array(), $table_name, $this);
if ('win' === strtolower(substr(PHP_OS, 0, 3))) {
// On Windows, the PHP escapeshellarg() replaces % char with white space, so we change the % char to [percent_sign] but change it back later after escapeshellarg finish processsing it
$where_array = str_replace('%', '[percent_sign]', $where_array);
if (!empty($where_array) && is_array($where_array)) {
// N.B. Don't add a WHERE prefix here; most versions of mysqldump silently strip it out, but one was encountered that didn't.
foreach ($where_array as $condition) {
if (!$first_loop) $where .= " AND ";
// Note: escapeshellarg() adds quotes around the string
if ($where) $where = "--where=".escapeshellarg($where);
if ('' !== $where && 'win' === strtolower(substr(PHP_OS, 0, 3))) {
// change the [percent_sign] back to % char
$where = str_replace('[percent_sign]', '%', $where);
if (strtolower(substr(PHP_OS, 0, 3)) == 'win') {
$exec = "cd ".escapeshellarg(str_replace('/', '\\', $this->updraft_dir))." & ";
$exec = "cd ".escapeshellarg($this->updraft_dir)."; ";
// Allow --max_allowed_packet to be configured via constant. Experience has shown some customers with complex CMS or pagebuilder setups can have very large postmeta entries.
$msqld_max_allowed_packet = (defined('UPDRAFTPLUS_MYSQLDUMP_MAX_ALLOWED_PACKET') && (is_int(UPDRAFTPLUS_MYSQLDUMP_MAX_ALLOWED_PACKET) || is_string(UPDRAFTPLUS_MYSQLDUMP_MAX_ALLOWED_PACKET))) ? UPDRAFTPLUS_MYSQLDUMP_MAX_ALLOWED_PACKET : '12M';
$exec .= "$potsql --defaults-file=$pfile $where --max-allowed-packet=$msqld_max_allowed_packet --quote-names --add-drop-table";
static $mysql_version = null;
if (null === $mysql_version) {
$mysql_version = $wpdb->get_var('SELECT VERSION()');
if ('' == $mysql_version) $mysql_version = $wpdb->db_version();
if ($mysql_version && version_compare($mysql_version, '5.1', '>=')) {
$exec .= " --no-tablespaces";
$exec .= " --skip-comments --skip-set-charset --allow-keywords --dump-date --extended-insert --user=".escapeshellarg($this->dbinfo['user'])." ";
$host = $this->dbinfo['host'];
if (preg_match('#^(.*):(\d+)$#', $host, $matches)) {
// The escapeshellarg() on $matches[2] is only to avoid tripping static analysis tools
$exec .= "--host=".escapeshellarg($matches[1])." --port=".escapeshellarg($matches[2])." ";
} elseif (preg_match('#^(.*):(.*)$#', $host, $matches) && file_exists($matches[2])) {
$exec .= "--host=".escapeshellarg($matches[1])." --socket=".escapeshellarg($matches[2])." ";
$exec .= "--host=".escapeshellarg($host)." ";
$exec .= $this->dbinfo['name']." ".escapeshellarg($table_name);
$handle = function_exists('popen') ? popen($exec, 'r') : false;
$w = fgets($handle, 1048576);
if (is_string($w) && $w) {
$write_bytes += strlen($w);
// The manual page for pclose() claims that only -1 indicates an error, but this is untrue
$updraftplus->log("Binary mysqldump: error (code: $ret)");
// Keep counter of failures? Change value of binsqldump?
$updraftplus->log("Table $table_name: binary mysqldump finished (writes: $writes, bytes $write_bytes, return code $ret) in ".sprintf("%.02f", max(microtime(true)-$microtime, 0.00001))." seconds");
$updraftplus->log("Binary mysqldump error: bindump popen failed");
@unlink($this->updraft_dir.'/'.$pfile);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
* Write out the initial backup information for a table to the currently open file
* @param String $table - Full name of database table to backup
* @param String $dump_as_table - Table name to use when writing out
* @param String $table_type - Table type - 'VIEW' is supported; otherwise it is treated as an ordinary table
* @param Array $table_structure - Table structure as returned by a DESCRIBE command
private function write_table_backup_beginning($table, $dump_as_table, $table_type, $table_structure) {
$this->stow("\n# Delete any existing table ".UpdraftPlus_Manipulation_Functions::backquote($table)."\n\nDROP TABLE IF EXISTS " . UpdraftPlus_Manipulation_Functions::backquote($dump_as_table).";\n");
if ('VIEW' == $table_type) {
$this->stow("DROP VIEW IF EXISTS " . UpdraftPlus_Manipulation_Functions::backquote($dump_as_table) . ";\n");
$description = ('VIEW' == $table_type) ? 'view' : 'table';
$this->stow("\n# Table structure of $description ".UpdraftPlus_Manipulation_Functions::backquote($table)."\n\n");
$create_table = $this->wpdb_obj->get_results("SHOW CREATE TABLE ".UpdraftPlus_Manipulation_Functions::backquote($table), ARRAY_N);
if (false === $create_table) {
$this->stow("#\n# Error with SHOW CREATE TABLE for $table\n#\n");
$create_line = UpdraftPlus_Manipulation_Functions::str_lreplace('TYPE=', 'ENGINE=', $create_table[0][1]);
// Remove PAGE_CHECKSUM parameter from MyISAM - was internal, undocumented, later removed (so causes errors on import)
if (preg_match('/ENGINE=([^\s;]+)/', $create_line, $eng_match)) {
if ('myisam' == strtolower($engine)) {
$create_line = preg_replace('/PAGE_CHECKSUM=\d\s?/', '', $create_line, 1);
if ($dump_as_table !== $table) $create_line = UpdraftPlus_Manipulation_Functions::str_replace_once($table, $dump_as_table, $create_line);
$this->stow($create_line.' ;');
if (false === $table_structure) {
$this->stow("#\n# Error getting $description structure of $table\n#\n");
// Add a comment preceding the beginning of the data
$this->stow("\n\n# ".sprintf("Data contents of $description %s", UpdraftPlus_Manipulation_Functions::backquote($table))."\n\n");
* Suggest a beginning value for how many rows to fetch in each SELECT statement (before taking into account resumptions)
* @param String $table - the full table name
private function get_rows_on_first_fetch($table) {
// In future, we could run over the table definition; if it is all non-massive defined lengths, we could base a calculation on that.
if ($this->table_prefix_raw.'term_relationships' == $table) {
// This table is known to have very small data lengths
} elseif (preg_match('/meta$/i', $table)) {
// Meta-data rows tend to be short *on average*. 10MB / 4000 rows = 2.6KB/row, so this is still quite conservative.
// The very conservative default
* Suggest how many rows to fetch in each SELECT statement
* @param String $table - the table being fetched
* @param Boolean $allow_further_reductions - whether to enable a second level of reductions (i.e. even less rows)
* @param Boolean $is_first_fetch_for_table - whether this is the first fetch on this table
* @param Integer|Boolean $expected_rows - if an integer, an estimate of the number of rows
* @param Boolean $expected_via_count - if $expected_rows is an integer, then this indicates whether the estimate was made via a SELECT COUNT() statement
private function number_of_rows_to_fetch($table, $allow_further_reductions, $is_first_fetch_for_table, $expected_rows = false, $expected_via_count = false) {
// This used to be fixed at 500; but we (after a long time) saw a case that looked like an out-of-memory even at this level. Now that we have implemented resumptions, the risk of timeouts is much lower (we just need to process enough rows).
// October 2020: added further reductions
// Listed in increasing order due to the handling below. At the end it gets quite drastic. Note, though, that currently we don't store this in the job-data.
// A future improvement could, when things get drastic, grab and log data on the size of what is required, so that we can respond more dynamically. The strategy currently here will run out of road if memory falls short multiple times. See: https://stackoverflow.com/questions/4524019/how-to-get-the-byte-size-of-resultset-in-an-sql-query
$fetch_rows_reductions = array(500, 250, 200, 100);
$default_on_first_fetch = $this->get_rows_on_first_fetch($table);
$known_bigger_than_table = (!is_bool($expected_rows) && $expected_rows && $expected_via_count && $default_on_first_fetch > 2 * $expected_rows);
if ($known_bigger_than_table) $allow_further_reductions = true;
if ($allow_further_reductions) {
// If we're relying on LIMIT with offsets, then we have to be mindful of how that performs
$fetch_rows_reductions = array_merge($fetch_rows_reductions, array(50, 20, 5));
// Remove any that are far out of range
if ($known_bigger_than_table) {
foreach ($fetch_rows_reductions as $k => $reduce_to) {
if ($reduce_to > $expected_rows * 2 && count($fetch_rows_reductions) > 2) {
unset($fetch_rows_reductions[$k]);
// If this is not the first fetch on a table, then get what was stored last time we set it (if we ever did). On the first fetch, reset back to the starting value (we presume problems are table-specific).
// This means that the same value will persist whilst the table is being backed up, both during the current resumption, and subsequent ones
$fetch_rows = $is_first_fetch_for_table ? $default_on_first_fetch : $updraftplus->jobdata_get('fetch_rows', $default_on_first_fetch);
$fetch_rows_at_start = $fetch_rows;
$resumptions_since_last_successful = $updraftplus->current_resumption - $updraftplus->last_successful_resumption;
// Do we need to reduce the number of rows we attempt to fetch?
// If something useful has happened on this run, then we don't try any reductions (we save them for a resumption after one on which nothing useful happened)
if ($known_bigger_than_table || (!$updraftplus->something_useful_happened && !empty($updraftplus->current_resumption) && $resumptions_since_last_successful > 1)) {
$break_after = $is_first_fetch_for_table ? max($resumptions_since_last_successful - 1, 1) : 1;
foreach ($fetch_rows_reductions as $reduce_to) {
if ($fetch_rows > $reduce_to) {
$fetch_rows = $reduce_to;
if ($break_after < 1) break;
$log_start = $updraftplus->current_resumption ? "Last successful resumption was $resumptions_since_last_successful runs ago" : "Table is relatively small";
$updraftplus->log("$log_start; fetch_rows will thus be: $fetch_rows (allow_further_reductions=$allow_further_reductions, is_first_fetch=$is_first_fetch_for_table, known_bigger_than_table=$known_bigger_than_table)");
// If it has changed, then preserve it in the job for the next resumption (of this table)
if ($fetch_rows_at_start !== $fetch_rows || $is_first_fetch_for_table) $updraftplus->jobdata_set('fetch_rows', $fetch_rows);
* Return a list of primary keys (N.B. the method should not be called unless the caller already knows that the table has a single/simple primary key) for rows that have "over-sized" data.
* Currently this only examines the "posts" table and any other table with a longtext type, which are the primary causes of problems. If others are revealed in future, it can be generalised (e.g. examine the whole definition/all cells).
* @param String $table - the full table name
* @param Array $structure - the table structure, as from WPDB::get_results("DESCRIBE ...");
* @param String $primary_key - the primary key to use; required if $structure is set (slightly redundant, since it can be derived from structure)
* @return Array - list of IDs
private function get_oversized_rows($table, $structure = array(), $primary_key = '') {
if ($this->table_prefix_raw.'posts' != $table) {
if (empty($structure) || '' === $primary_key) return array();
foreach ($structure as $item) {
if ('' !== $item->Field && 'longtext' === $item->Type) {
$use_field = $item->Field;
$use_field = 'post_content';
if (!isset($use_field)) return array();
// Look for the jobdata_delete() call elsewhere in this class - the key name needs to match
$jobdata_key = 'oversized_rows_'.$table;
$oversized_list = $updraftplus->jobdata_get($jobdata_key);
if (is_array($oversized_list)) return $oversized_list;
$oversized_list = array();
// Allow over-ride via a constant
$oversized_row_size = defined('UPDRAFTPLUS_OVERSIZED_ROW_SIZE') ? UPDRAFTPLUS_OVERSIZED_ROW_SIZE : 2048576;
$sql = $this->wpdb_obj->prepare("SELECT ".UpdraftPlus_Manipulation_Functions::backquote($primary_key)." FROM ".UpdraftPlus_Manipulation_Functions::backquote($table)." WHERE LENGTH(".UpdraftPlus_Manipulation_Functions::backquote($use_field).") > %d ORDER BY ".UpdraftPlus_Manipulation_Functions::backquote($primary_key)." ASC", $oversized_row_size);
$oversized_rows = $this->wpdb_obj->get_col($sql);
// Upon an error, just return an empty list
if (!is_array($oversized_rows)) return array();
$updraftplus->jobdata_set($jobdata_key, $oversized_rows);
* Original version taken partially from phpMyAdmin and partially from Alain Wolf, Zurich - Switzerland to use the WordPress $wpdb object
* Website: http://restkultur.ch/personal/wolf/scripts/db_backup/
* Modified by Scott Merrill (http://www.skippy.net/)
* Subsequently heavily improved and modified
* This method should be called in a loop for a complete table backup (see the information for the returned parameter). The method may implement whatever strategy it likes for deciding when to return (the assumption is that when it does return with some results, the caller should register that something useful happened).
* @param String $table - Full name of database table to backup
* @param String $table_type - Table type - 'VIEW' is supported; otherwise it is treated as an ordinary table
* @param Integer|Boolean $start_record - Specify the starting record, or true to start at the beginning. Our internal page size is fixed at 1000 (though within that we might actually query in smaller batches).
* @param Boolean $can_use_primary_key - Whether it is allowed to perform quicker SELECTS based on the primary key. The intended use case for false is to support backups running during a version upgrade. N.B. This "can" is not absolute; there may be other constraints dealt with within this method.
* @return Integer|Array|WP_Error - a WP_Error to indicate an error; an array indicates that it finished (if it includes 'next_record' that means it finished via producing something); an integer to indicate the next page the case that there are more to do.
private function backup_table($table, $table_type = 'BASE TABLE', $start_record = true, $can_use_primary_key = true) {
// Preserve the passed-in value
$original_start_record = $start_record;
$microtime = microtime(true);
// Deal with Windows/old MySQL setups with erroneous table prefixes differing in case
$dump_as_table = (false == $this->duplicate_tables_exist && 0 === stripos($table, $this->table_prefix) && 0 !== strpos($table, $this->table_prefix)) ? $this->table_prefix.substr($table, strlen($this->table_prefix)) : $table;
$table_structure = $this->wpdb_obj->get_results("DESCRIBE ".UpdraftPlus_Manipulation_Functions::backquote($table));
// $updraftplus->log(__('Error getting table details', 'updraftplus') . ": $table", 'error');
if ($this->wpdb_obj->last_error) $error_message .= ' ('.$this->wpdb_obj->last_error.')';
return new WP_Error('table_details_error', $error_message);
// If at the beginning of the dump for a table, then add the DROP and CREATE statements
if (true === $start_record) {
$this->write_table_backup_beginning($table, $dump_as_table, $table_type, $table_structure);
// Some tables have optional data, and should be skipped if they do not work
$table_sans_prefix = substr($table, strlen($this->table_prefix_raw));
$data_optional_tables = ('wp' == $this->whichdb) ? apply_filters('updraftplus_data_optional_tables', explode(',', UPDRAFTPLUS_DATA_OPTIONAL_TABLES)) : array();
if (in_array($table_sans_prefix, $data_optional_tables)) {
if (!$updraftplus->something_useful_happened && !empty($updraftplus->current_resumption) && ($updraftplus->current_resumption - $updraftplus->last_successful_resumption > 2)) {
$updraftplus->log("Table $table: Data skipped (previous attempts failed, and table is marked as non-essential)");
if ('VIEW' != $table_type) {
$integer_fields = array();
$binary_fields = array();
$bit_field_exists = false;
// false means "not yet set"; a string means what it was set to; null means that there are multiple (and so not useful to us). If it is not a string, then $primary_key_type is invalid and should not be used.
$primary_key_type = false;
// $table_structure was from "DESCRIBE $table"
foreach ($table_structure as $struct) {
if (isset($struct->Key) && 'PRI' == $struct->Key && '' != $struct->Field) {
$primary_key = (false === $primary_key) ? $struct->Field : null;
$primary_key_type = $struct->Type;
if ((0 === strpos($struct->Type, 'tinyint')) || (0 === strpos(strtolower($struct->Type), 'smallint'))
|| (0 === strpos(strtolower($struct->Type), 'mediumint')) || (0 === strpos(strtolower($struct->Type), 'int')) || (0 === strpos(strtolower($struct->Type), 'bigint'))
$defs[strtolower($struct->Field)] = (null === $struct->Default) ? 'NULL' : $struct->Default;
$integer_fields[strtolower($struct->Field)] = true;
if ((0 === strpos(strtolower($struct->Type), 'binary')) || (0 === strpos(strtolower($struct->Type), 'varbinary')) || (0 === strpos(strtolower($struct->Type), 'tinyblob')) || (0 === strpos(strtolower($struct->Type), 'mediumblob')) || (0 === strpos(strtolower($struct->Type), 'blob')) || (0 === strpos(strtolower($struct->Type), 'longblob'))) {
$binary_fields[strtolower($struct->Field)] = true;
if (preg_match('/^bit(?:\(([0-9]+)\))?$/i', trim($struct->Type), $matches)) {
if (!$bit_field_exists) $bit_field_exists = true;
$bit_fields[strtolower($struct->Field)] = !empty($matches[1]) ? max(1, (int) $matches[1]) : 1;
// the reason why if bit fields are found then the fields need to be cast into binary type is that if mysqli_query function is being used, mysql will convert the bit field value to a decimal number and represent it in a string format whereas, if mysql_query function is being used, mysql will not convert it to a decimal number but instead will keep it retained as it is
$struct->Field = "CAST(".UpdraftPlus_Manipulation_Functions::backquote(str_replace('`', '``', $struct->Field))." AS BINARY) AS ".UpdraftPlus_Manipulation_Functions::backquote(str_replace('`', '``', $struct->Field));
$fields[] = $struct->Field;
$fields[] = UpdraftPlus_Manipulation_Functions::backquote(str_replace('`', '``', $struct->Field));
$expected_via_count = false;
// N.B. At this stage this is for optimisation, mainly targets what is used on the core WP tables (bigint(20)); a value can be relied upon, but false is not definitive. N.B. https://docs.oracle.com/cd/E17952_01/mysql-8.0-en/numeric-type-syntax.html (retrieved Aug 2021): "As of MySQL 8.0.17, the display width attribute is deprecated for integer data types; you should expect support for it to be removed in a future version of MySQL." MySQL 8.0.20 is not returning it.
$use_primary_key = false;
if ($can_use_primary_key && is_string($primary_key) && preg_match('#^(small|medium|big)?int(\(| |$)#i', $primary_key_type)) {
// We don't bother re-counting if it's likely to be so large that we're not going to do anything with the result
if (is_bool($this->expected_rows) || $this->expected_rows < 1000) {
$expected_rows = $this->wpdb_obj->get_var('SELECT COUNT('.UpdraftPlus_Manipulation_Functions::backquote($primary_key).') FROM '.UpdraftPlus_Manipulation_Functions::backquote($table));
if (!is_bool($expected_rows)) {
$this->expected_rows = $expected_rows;
$expected_via_count = true;
$oversized_rows = $this->get_oversized_rows($table, $table_structure, $primary_key);
if (preg_match('# unsigned$#i', $primary_key_type)) {
if (true === $start_record) $start_record = -1;
if (true === $start_record) {
$min_value = $this->wpdb_obj->get_var('SELECT MIN('.UpdraftPlus_Manipulation_Functions::backquote($primary_key).') FROM '.UpdraftPlus_Manipulation_Functions::backquote($table));
$start_record = (is_numeric($min_value) && $min_value) ? (int) $min_value - 1 : -1;
if (!is_bool($this->expected_rows)) {
$this->log_expected_rows($table, $this->expected_rows, $expected_via_count);
$search = array("\x00", "\x0a", "\x0d", "\x1a");
$replace = array('\0', '\n', '\r', '\Z');
$where_array = apply_filters('updraftplus_backup_table_sql_where', array(), $table, $this);
if (!empty($where_array) && is_array($where_array)) {
$where = 'WHERE '.implode(' AND ', $where_array);
// Experimentation here shows that on large tables (we tested with 180,000 rows) on MyISAM, 1000 makes the table dump out 3x faster than the previous value of 100. After that, the benefit diminishes (increasing to 4000 only saved another 12%)
$fetch_rows = $this->number_of_rows_to_fetch($table, $use_primary_key || $start_record < 500000, true === $original_start_record, $this->expected_rows, $expected_via_count);
if (!is_bool($this->expected_rows)) $this->expected_rows = true;