@unlink($cache_file_base.'-zfb.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
$info_array = array('makezip_recursive_batchedbytes' => $this->makezip_recursive_batchedbytes);
if (!file_put_contents($cache_file_base.'-info.tmp', serialize($info_array))) {
@unlink($cache_file_base.'-zfs.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
@unlink($cache_file_base.'-zfb.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
if ($dhandle = gzopen($cache_file_base.'-zfd.gz.tmp', 'w')) {
if (!gzwrite($dhandle, serialize($this->zipfiles_dirbatched))) {
$aborted_on_dirbatched = true;
$aborted_on_dirbatched = true;
if (!empty($aborted_on_dirbatched)) {
@unlink($cache_file_base.'-zfs.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
@unlink($cache_file_base.'-zfd.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
@unlink($cache_file_base.'-zfb.gz.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
@unlink($cache_file_base.'-info.tmp');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
// @codingStandardsIgnoreLine
Class variables that get altered:
makezip_recursive_batchedbytes
zipfiles_skipped_notaltered
Class variables that the result depends upon (other than the state of the filesystem):
// Any not yet dispatched? Under our present scheme, at this point nothing has yet been despatched. And since the enumerating of all files can take a while, we can at this point do a further modification check to reduce the chance of overlaps.
// This relies on us *not* touch()ing the zip file to indicate to any resumption 'behind us' that we're already here. Rather, we're relying on the combined facts that a) if it takes us a while to search the directory tree, then it should do for the one behind us too (though they'll have the benefit of cache, so could catch very fast) and b) we touch *immediately* after finishing the enumeration of the files to add.
// $retry_on_error is here being used as a proxy for 'not the second time around, when there might be the remains of the file on the first time around'
if ($retry_on_error) $updraftplus->check_recent_modification($destination);
// Here we're relying on the fact that both PclZip and ZipArchive will happily operate on an empty file. Note that BinZip *won't* (for that, may need a new strategy - e.g. add the very first file on its own, in order to 'lay down a marker')
if (empty($do_bump_index)) touch($destination);
if (count($this->zipfiles_dirbatched) > 0 || count($this->zipfiles_batched) > 0) {
$updraftplus->log(sprintf("Total entities for the zip file: %d directories, %d files (%d skipped as non-modified), %s MB", count($this->zipfiles_dirbatched), count($this->zipfiles_batched), count($this->zipfiles_skipped_notaltered), round($this->makezip_recursive_batchedbytes/1048576, 1)));
// No need to warn if we're going to retry anyway. (And if we get killed, the zip will be rescanned for its contents upon resumption).
$warn_on_failures = ($retry_on_error) ? false : true;
$add_them = $this->makezip_addfiles($warn_on_failures);
if (is_wp_error($add_them)) {
foreach ($add_them->get_error_messages() as $msg) {
$updraftplus->log("Error returned from makezip_addfiles: ".$msg);
} elseif (false === $add_them) {
$updraftplus->log("Error: makezip_addfiles returned false");
// Reset these variables because the index may have changed since we began
$itext = empty($this->index) ? '' : $this->index+1;
$destination_base = $backup_file_basename.'-'.$whichone.$itext.'.zip.tmp';
$destination = $this->updraft_dir.'/'.$destination_base;
// ZipArchive::addFile sometimes fails - there's nothing when we expected something.
// Did not used to have || $error_occured here. But it is better to retry, than to simply warn the user to check his logs.
if (((file_exists($destination) || $this->index == $original_index) && @filesize($destination) < 90 && 'UpdraftPlus_ZipArchive' == $this->use_zip_object) || ($error_occurred && $retry_on_error)) {// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
// This can be made more sophisticated if feedback justifies it. Currently we just switch to PclZip. But, it may have been a BinZip failure, so we could then try ZipArchive if that is available. If doing that, make sure that an infinite recursion isn't made possible.
$updraftplus->log("makezip_addfiles(".$this->use_zip_object.") apparently failed (file=".basename($destination).", type=$whichone, size=".filesize($destination).") - retrying with PclZip");
$saved_zip_object = $this->use_zip_object;
$this->use_zip_object = 'UpdraftPlus_PclZip';
$ret = $this->make_zipfile($source, $backup_file_basename, $whichone, false);
$this->use_zip_object = $saved_zip_object;
// zipfiles_added > 0 means that $zip->close() has been called. i.e. An attempt was made to add something: something _should_ be there.
// Why return true even if $error_occurred may be set? 1) Because in that case, a warning has already been logged. 2) Because returning false causes an error to be logged, which means it'll all be retried again. Also 3) this has been the pattern of the code for a long time, and the algorithm has been proven in the real-world: don't change what's not broken.
// (file_exists($destination) || $this->index == $original_index) might be an alternative to $this->zipfiles_added > 0 - ? But, don't change what's not broken.
if (false == $error_occurred || $this->zipfiles_added > 0) {
$updraftplus->log("makezip failure: zipfiles_added=".$this->zipfiles_added.", error_occurred=".$error_occurred." (method=".$this->use_zip_object.")");
* This function is an ugly, conservative workaround for https://bugs.php.net/bug.php?id=62119. It does not aim to always work-around, but to ensure that nothing is made worse.
private function basename($element) {
$dirname = dirname($element);
$basename_manual = preg_replace('#^[\\/]+#', '', substr($element, strlen($dirname)));
$basename = basename($element);
if ($basename_manual != $basename) {
$locale = setlocale(LC_CTYPE, "0");
setlocale(LC_CTYPE, 'en_US.UTF8');
$basename_new = basename($element);
if ($basename_new == $basename_manual) $basename = $basename_new;
setlocale(LC_CTYPE, $locale);
* Determine if a file should be stored without compression
* @param String $file - the filename
private function file_should_be_stored_without_compression($file) {
if (!is_array($this->extensions_to_not_compress)) return false;
foreach ($this->extensions_to_not_compress as $ext) {
if (strtolower(substr($file, -$ext_len, $ext_len)) == $ext) return true;
* This method will add a manifest file to the backup zip
* @param String $whichone - the type of backup (e.g. 'plugins', 'themes')
* @return Boolean - success/failure status
private function updraftplus_include_manifest($whichone) {
$manifest_name = "updraftplus-manifest.json";
$manifest = trailingslashit($this->updraft_dir).$manifest_name;
$updraftplus->log(sprintf("Creating file manifest ($manifest_name) for incremental backup (included: %d, skipped: %d)", count($this->zipfiles_batched), count($this->zipfiles_skipped_notaltered)));
if (false === ($handle = fopen($manifest, 'w+'))) return $updraftplus->log("Failed to open manifest file ($manifest_name)");
$this->manifest_path = $manifest;
$go_to_levels = apply_filters('updraftplus_manifest_go_to_level', $go_to_levels, $whichone);
$go_to_level = isset($go_to_levels[$whichone]) ? $go_to_levels[$whichone] : 'all';
if ('more' == $whichone) {
foreach ($this->zipfiles_batched as $index => $dir) {
$directory = '"directory":"' . dirname($index) . '",';
if (false === fwrite($handle, '{"version":'.$version.',"type":"'.$whichone.'",'.$directory.'"listed_levels":"'.$go_to_level.'","contents":{"directories":[')) $updraftplus->log("First write to manifest file failed ($manifest_name)");
// First loop: find out which is the last entry, so that we don't write the comma after it
foreach ($this->zipfiles_dirbatched as $index => $dir) {
if ('all' !== $go_to_level && substr_count($dir, '/') > $go_to_level - 1) continue;
$last_dir_index = $index;
// Second loop: write out the entry
foreach ($this->zipfiles_dirbatched as $index => $dir) {
if ('all' !== $go_to_level && substr_count($dir, '/') > $go_to_level - 1) continue;
fwrite($handle, json_encode($dir).(($index != $last_dir_index) ? ',' : ''));
// Now do the same for files
fwrite($handle, '],"files":[');
$last_file_index = false;
foreach ($this->zipfiles_batched as $store_as) {
if ('all' !== $go_to_level && substr_count($store_as, '/') > $go_to_level - 1) continue;
$last_file_index = $store_as;
foreach ($this->zipfiles_skipped_notaltered as $store_as) {
if ('all' !== $go_to_level && substr_count($store_as, '/') > $go_to_level - 1) continue;
$last_file_index = $store_as;
foreach ($this->zipfiles_batched as $store_as) {
if ('all' !== $go_to_level && substr_count($store_as, '/') > $go_to_level - 1) continue;
fwrite($handle, json_encode($store_as).(($store_as != $last_file_index) ? ',' : ''));
foreach ($this->zipfiles_skipped_notaltered as $store_as) {
if ('all' !== $go_to_level && substr_count($store_as, '/') > $go_to_level - 1) continue;
fwrite($handle, json_encode($store_as).(($store_as != $last_file_index) ? ',' : ''));
$this->zipfiles_batched[$manifest] = $manifest_name;
$updraftplus->log("Successfully created file manifest (size: ".filesize($manifest).")");
// Q. Why don't we only open and close the zip file just once?
// A. Because apparently PHP doesn't write out until the final close, and it will return an error if anything file has vanished in the meantime. So going directory-by-directory reduces our chances of hitting an error if the filesystem is changing underneath us (which is very possible if dealing with e.g. 1GB of files)
* We batch up the files, rather than do them one at a time. So we are more efficient than open,one-write,close.
* To call into here, the array $this->zipfiles_batched must be populated (keys=paths, values=add-to-zip-as values). It gets reset upon exit from here.
* @param Boolean $warn_on_failures See if it warns on faliures or not
* @return Boolean|WP_Error
private function makezip_addfiles($warn_on_failures) {
// Used to detect requests to bump the size
$zipfile = $this->zip_basename.((0 == $this->index) ? '' : ($this->index+1)).'.zip.tmp';
$maxzipbatch = $updraftplus->jobdata_get('maxzipbatch', 26214400);
if ((int) $maxzipbatch < 1024) $maxzipbatch = 26214400;
// Short-circuit the null case, because we want to detect later if something useful happenned
if (count($this->zipfiles_dirbatched) == 0 && count($this->zipfiles_batched) == 0) return true;
// If on PclZip, then if possible short-circuit to a quicker method (makes a huge time difference - on a folder of 1500 small files, 2.6s instead of 76.6)
// This assumes that makezip_addfiles() is only called once so that we know about all needed files (the new style)
// This is rather conservative - because it assumes zero compression. But we can't know that in advance.
if (0 == $this->index && $this->makezip_recursive_batchedbytes < $this->zip_split_every) {
// So far, we only have a processor for this for PclZip; but that check can be removed - need to address the below items
// TODO: Is this really what we want? Always go all-in-one for < 500MB???? Should be more conservative? Or, is it always faster to go all-in-one? What about situations where we might want to auto-split because of slowness - check that that is still working.
// TODO: Test this new method for PclZip - are we still getting the performance gains? Test for ZipArchive too.
if ('UpdraftPlus_PclZip' == $this->use_zip_object && ($this->makezip_recursive_batchedbytes < 512*1048576 || (defined('UPDRAFTPLUS_PCLZIP_FORCEALLINONE') && UPDRAFTPLUS_PCLZIP_FORCEALLINONE == true && 'UpdraftPlus_PclZip' == $this->use_zip_object))) {
$updraftplus->log("Only one archive required (".$this->use_zip_object.") - will attempt to do in single operation (data: ".round($this->makezip_recursive_batchedbytes/1024, 1)." KB, split: ".round($this->zip_split_every/1024, 1)." KB)");
// $updraftplus->log("PclZip, and only one archive required - will attempt to do in single operation (data: ".round($this->makezip_recursive_batchedbytes/1024, 1)." KB, split: ".round($this->zip_split_every/1024, 1)." KB)");
// if(!class_exists('PclZip')) require_once(ABSPATH.'/wp-admin/includes/class-pclzip.php');
// $zip = new PclZip($zipfile);
// $remove_path = ($this->whichone == 'wpcore') ? untrailingslashit(ABSPATH) : WP_CONTENT_DIR;
// $backupable_entities = $updraftplus->get_backupable_file_entities(true);
// if (isset($backupable_entities[$this->whichone])) {
// if ('plugins' == $this->whichone || 'themes' == $this->whichone || 'uploads' == $this->whichone) {
// $remove_path = dirname($backupable_entities[$this->whichone]);
// To normalise instead of removing (which binzip doesn't support, so we don't do it), you'd remove the dirname() in the above line, and uncomment the below one.
// #$add_path = $this->whichone;
// $remove_path = $backupable_entities[$this->whichone];
// $zipcode = $zip->create($this->source, PCLZIP_OPT_REMOVE_PATH, $remove_path, PCLZIP_OPT_ADD_PATH, $add_path);
// $zipcode = $zip->create($this->source, PCLZIP_OPT_REMOVE_PATH, $remove_path);
// $updraftplus->log("PclZip Error: ".$zip->errorInfo(true), 'warning');
// return $zip->errorCode();
// UpdraftPlus_Job_Scheduler::something_useful_happened();
// 05-Mar-2013 - added a new check on the total data added; it appears that things fall over if too much data is contained in the cumulative total of files that were addFile'd without a close-open cycle; presumably data is being stored in memory. In the case in question, it was a batch of MP3 files of around 100MB each - 25 of those equals 2.5GB!
$data_added_since_reopen = 0;
// static $data_added_this_resumption = 0;
// $max_data_added_any_resumption = $updraftplus->jobdata_get('max_data_added_any_resumption', 0);
// The following array is used only for error reporting if ZipArchive::close fails (since that method itself reports no error messages - we have to track manually what we were attempting to add)
$files_zipadded_since_open = array();
$zip = new $this->use_zip_object;
if (file_exists($zipfile)) {
$opencode = $zip->open($zipfile);
$original_size = filesize($zipfile);
$create_code = (version_compare(PHP_VERSION, '5.2.12', '>') && defined('ZIPARCHIVE::CREATE')) ? ZIPARCHIVE::CREATE : 1;
$opencode = $zip->open($zipfile, $create_code);
if (true !== $opencode) return new WP_Error('no_open', sprintf(__('Failed to open the zip file (%s) - %s', 'updraftplus'), $zipfile, $zip->last_error));
if (apply_filters('updraftplus_include_manifest', false, $this->whichone, $this)) {
$this->updraftplus_include_manifest($this->whichone);
// Make sure all directories are created before we start creating files
while ($dir = array_pop($this->zipfiles_dirbatched)) {
$zipfiles_added_thisbatch = 0;
// Go through all those batched files
foreach ($this->zipfiles_batched as $file => $add_as) {
if (!file_exists($file)) {
$updraftplus->log("File has vanished from underneath us; dropping: $add_as");
$fsize = filesize($file);
$large_file_warning_key = 'vlargefile_'.md5($this->whichone.'#'.$add_as);
if (defined('UPDRAFTPLUS_SKIP_FILE_OVER_SIZE') && UPDRAFTPLUS_SKIP_FILE_OVER_SIZE && $fsize > UPDRAFTPLUS_SKIP_FILE_OVER_SIZE) {// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
$updraftplus->log("File is larger than the user-configured (UPDRAFTPLUS_SKIP_FILE_OVER_SIZE) maximum (is: ".round($fsize/1024, 1)." KB); will skip: ".$add_as);
} elseif ($fsize > UPDRAFTPLUS_WARN_FILE_SIZE) {
$log_msg = __('A very large file was encountered: %s (size: %s Mb)', 'updraftplus');
// Was this warned about on a previous run?
if ($updraftplus->warning_exists($large_file_warning_key)) {
$updraftplus->log_remove_warning($large_file_warning_key);
$large_file_warning_key .= '-2';
$log_msg .= ' - '.__('a second attempt is being made (upon further failure it will be skipped)', 'updraftplus');
} elseif ($updraftplus->warning_exists($large_file_warning_key.'-2') || $updraftplus->warning_exists($large_file_warning_key.'-final')) {
$updraftplus->log_remove_warning($large_file_warning_key.'-2');
$large_file_warning_key .= '-final';
$log_msg .= ' - '.__('two unsuccessful attempts were made to include it, and it will now be omitted from the backup', 'updraftplus');
$updraftplus->log(sprintf($log_msg, $add_as, round($fsize/1048576, 1)), 'warning', $large_file_warning_key);
if ('-final' == substr($large_file_warning_key, -6, 6)) {
// Skips files that are already added
if (!isset($this->existing_files[$add_as]) || $this->existing_files[$add_as] != $fsize) {
@touch($zipfile);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
$zip->addFile($file, $add_as);
$zipfiles_added_thisbatch++;
if (method_exists($zip, 'setCompressionName') && $this->file_should_be_stored_without_compression($add_as) && false == $zip->setCompressionName($add_as, ZipArchive::CM_STORE)) {
$updraftplus->log("Zip: setCompressionName failed on: $add_as");
// N.B., Since makezip_addfiles() can get called more than once if there were errors detected, potentially $zipfiles_added_thisrun can exceed the total number of batched files (if they get processed twice).
$this->zipfiles_added_thisrun++;
$files_zipadded_since_open[] = array('file' => $file, 'addas' => $add_as);
$data_added_since_reopen += $fsize;
// $data_added_this_resumption += $fsize;
/* Conditions for forcing a write-out and re-open:
- more than $maxzipbatch bytes have been batched
- more than 2.0 seconds have passed since the last time we wrote
- that adding this batch of data is likely already enough to take us over the split limit (and if that happens, then do actually split - to prevent a scenario of progressively tinier writes as we approach but don't actually reach the limit)
- more than 500 files batched (should perhaps intelligently lower this as the zip file gets bigger - not yet needed)
// Add 10% margin. It only really matters when the OS has a file size limit, exceeding which causes failure (e.g. 2GB on 32-bit)
// Since we don't test before the file has been created (so that zip_last_ratio has meaningful data), we rely on max_zip_batch being less than zip_split_every - which should always be the case
$reaching_split_limit = ($this->zip_last_ratio > 0 && $original_size>0 && ($original_size + 1.1*$data_added_since_reopen*$this->zip_last_ratio) > $this->zip_split_every) ? true : false;
if (!$force_allinone && ($zipfiles_added_thisbatch > UPDRAFTPLUS_MAXBATCHFILES || $reaching_split_limit || $data_added_since_reopen > $maxzipbatch || (time() - $this->zipfiles_lastwritetime) > 2)) {
// We are coming towards a limit and about to close the zip, check if this is a more file backup and the manifest file has made it into this zip if not add it
if (apply_filters('updraftplus_include_manifest', false, $this->whichone, $this)) {
foreach ($files_zipadded_since_open as $info) {
if ('updraftplus-manifest.json' == $info['file']) $manifest = true;
@touch($zipfile);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
$path = array_search('updraftplus-manifest.json', $this->zipfiles_batched);
$zip->addFile($path, 'updraftplus-manifest.json');
$zipfiles_added_thisbatch++;
if (method_exists($zip, 'setCompressionName') && $this->file_should_be_stored_without_compression($this->zipfiles_batched[$path])) {
if (false == $zip->setCompressionName($this->zipfiles_batched[$path], ZipArchive::CM_STORE)) {
$updraftplus->log("Zip: setCompressionName failed on: $this->zipfiles_batched[$path]");
// N.B., Since makezip_addfiles() can get called more than once if there were errors detected, potentially $zipfiles_added_thisrun can exceed the total number of batched files (if they get processed twice).
$this->zipfiles_added_thisrun++;
$files_zipadded_since_open[] = array('file' => $path, 'addas' => 'updraftplus-manifest.json');
$data_added_since_reopen += filesize($path);
// $data_added_this_resumption += filesize($path);
if (function_exists('set_time_limit')) @set_time_limit(UPDRAFTPLUS_SET_TIME_LIMIT);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged
$something_useful_sizetest = false;
if ($data_added_since_reopen > $maxzipbatch) {
$something_useful_sizetest = true;
$updraftplus->log("Adding batch to zip file (".$this->use_zip_object."): over ".round($maxzipbatch/1048576, 1)." MB added on this batch (".round($data_added_since_reopen/1048576, 1)." MB, ".count($this->zipfiles_batched)." files batched, $zipfiles_added_thisbatch (".$this->zipfiles_added_thisrun.") added so far); re-opening (prior size: ".round($original_size/1024, 1).' KB)');
} elseif ($zipfiles_added_thisbatch > UPDRAFTPLUS_MAXBATCHFILES) {
$updraftplus->log("Adding batch to zip file (".$this->use_zip_object."): over ".UPDRAFTPLUS_MAXBATCHFILES." files added on this batch (".round($data_added_since_reopen/1048576, 1)." MB, ".count($this->zipfiles_batched)." files batched, $zipfiles_added_thisbatch (".$this->zipfiles_added_thisrun.") added so far); re-opening (prior size: ".round($original_size/1024, 1).' KB)');
} elseif (!$reaching_split_limit) {
$updraftplus->log("Adding batch to zip file (".$this->use_zip_object."): over 2.0 seconds have passed since the last write (".round($data_added_since_reopen/1048576, 1)." MB, $zipfiles_added_thisbatch (".$this->zipfiles_added_thisrun.") files added so far); re-opening (prior size: ".round($original_size/1024, 1).' KB)');
$updraftplus->log("Adding batch to zip file (".$this->use_zip_object."): possibly approaching split limit (".round($data_added_since_reopen/1048576, 1)." MB, $zipfiles_added_thisbatch (".$this->zipfiles_added_thisrun.") files added so far); last ratio: ".round($this->zip_last_ratio, 4)."; re-opening (prior size: ".round($original_size/1024, 1).' KB)');
// Though we will continue processing the files we've got, the final error code will be false, to allow a second attempt on the failed ones. This also keeps us consistent with a negative result for $zip->close() further down. We don't just retry here, because we have seen cases (with BinZip) where upon failure, the existing zip had actually been deleted. So, to be safe we need to re-scan the existing zips.
$this->record_zip_error($files_zipadded_since_open, $zip->last_error, $warn_on_failures);
// if ($data_added_this_resumption > $max_data_added_any_resumption) {
// $max_data_added_any_resumption = $data_added_this_resumption;
// $updraftplus->jobdata_set('max_data_added_any_resumption', $max_data_added_any_resumption);
$zipfiles_added_thisbatch = 0;
// This triggers a re-open, later
$files_zipadded_since_open = array();
// Call here, in case we've got so many big files that we don't complete the whole routine
if (filesize($zipfile) > $original_size) {
// It is essential that this does not go above 1, even though in reality (and this can happen at the start, if just 1 file is added (e.g. due to >2.0s detection) the 'compressed' zip file may be *bigger* than the files stored in it. When that happens, if the ratio is big enough, it can then fire the "approaching split limit" detection (very) prematurely
$this->zip_last_ratio = ($data_added_since_reopen > 0) ? min((filesize($zipfile) - $original_size)/$data_added_since_reopen, 1) : 1;
// We need a rolling update of this
$original_size = filesize($zipfile);
if ($reaching_split_limit || filesize($zipfile) > $this->zip_split_every) {
// Take the filesize now because later we wanted to know we did clearstatcache()
$bumped_at = round(filesize($zipfile)/1048576, 1);
// Need to make sure that something_useful_happened() is always called
// How long since the current run began? If it's taken long (and we're in danger of not making it at all), or if that is forseeable in future because of general slowness, then we should reduce the parameters.
if (!$something_useful_sizetest) {
UpdraftPlus_Job_Scheduler::something_useful_happened();
// Do this as early as possible
UpdraftPlus_Job_Scheduler::something_useful_happened();
$time_since_began = max(microtime(true)- $this->zipfiles_lastwritetime, 0.000001);
$normalised_time_since_began = $time_since_began*($maxzipbatch/$data_added_since_reopen);
// Don't measure speed until after ZipArchive::close()
$rate = round($data_added_since_reopen/$time_since_began, 1);
$updraftplus->log(sprintf("A useful amount of data was added after this amount of zip processing: %s s (normalised: %s s, rate: %s KB/s)", round($time_since_began, 1), round($normalised_time_since_began, 1), round($rate/1024, 1)));
// We want to detect not only that we need to reduce the size of batches, but also the capability to increase them. This is particularly important because of ZipArchive()'s (understandable, given the tendency of PHP processes being terminated without notice) practice of first creating a temporary zip file via copying before acting on that zip file (so the information is atomic). Unfortunately, once the size of the zip file gets over 100MB, the copy operation beguns to be significant. By the time you've hit 500MB on many web hosts the copy is the majority of the time taken. So we want to do more in between these copies if possible.
/* "Could have done more" - detect as:
- A batch operation would still leave a "good chunk" of time in a run
- "Good chunk" means that the time we took to add the batch is less than 50% of a run time
- We can do that on any run after the first (when at least one ceiling on the maximum time is known)
- But in the case where a max_execution_time is long (so that resumptions are never needed), and we're always on run 0, we will automatically increase chunk size if the batch took less than 6 seconds.
// At one stage we had a strategy of not allowing check-ins to have more than 20s between them. However, once the zip file got to a certain size, PHP's habit of copying the entire zip file first meant that it *always* went over 18s, and thence a drop in the max size was inevitable - which was bad, because with the copy time being something that only grew, the outcome was less data being copied every time
// Gather the data. We try not to do this unless necessary (may be time-sensitive)
if ($updraftplus->current_resumption >= 1) {
$time_passed = $updraftplus->jobdata_get('run_times');