Merge branch 'unpack-progress' into calamares
This commit is contained in:
commit
063482339f
12
CHANGES
12
CHANGES
@ -22,8 +22,16 @@ This release contains contributions from (alphabetically by first name):
|
||||
- The *machineid* module, which generates UUIDs for systemd and dbus
|
||||
and can generate entropy files (filled from `/dev/urandom` in the host
|
||||
system) now supports more than one entropy file; generate them as needed
|
||||
(or copy a fixed value to all, depending on *entropy-copy*). Deprecate
|
||||
*entropy* (which generates a specific output file) as too inflexible.
|
||||
(or copy a fixed value to all, depending on *entropy-copy*). Deprecate
|
||||
*entropy* (which generates a specific output file) as too inflexible.
|
||||
- Progress reporting from the *unpackfs* module has been revamped:
|
||||
it reports more often now, so that it is more obvious that files
|
||||
are being transferred even when the percentage progress does not
|
||||
change.
|
||||
- The *unpackfs* module now supports a *weight* setting for each
|
||||
of the unpack entries. For a single entry this does not matter,
|
||||
but if there are multiple entries it allows tweaking the relative
|
||||
progress between each entry.
|
||||
|
||||
|
||||
# 3.2.30 (2020-09-03) #
|
||||
|
@ -48,8 +48,8 @@ class UnpackEntry:
|
||||
:param sourcefs:
|
||||
:param destination:
|
||||
"""
|
||||
__slots__ = ['source', 'sourcefs', 'destination', 'copied', 'total', 'exclude', 'excludeFile',
|
||||
'mountPoint']
|
||||
__slots__ = ('source', 'sourcefs', 'destination', 'copied', 'total', 'exclude', 'excludeFile',
|
||||
'mountPoint', 'weight')
|
||||
|
||||
def __init__(self, source, sourcefs, destination):
|
||||
"""
|
||||
@ -71,6 +71,7 @@ class UnpackEntry:
|
||||
self.copied = 0
|
||||
self.total = 0
|
||||
self.mountPoint = None
|
||||
self.weight = 1
|
||||
|
||||
def is_file(self):
|
||||
return self.sourcefs == "file"
|
||||
@ -162,6 +163,8 @@ def file_copy(source, entry, progress_cb):
|
||||
:param progress_cb: A callback function for progress reporting.
|
||||
Takes a number and a total-number.
|
||||
"""
|
||||
import time
|
||||
|
||||
dest = entry.destination
|
||||
|
||||
# Environment used for executing rsync properly
|
||||
@ -190,12 +193,14 @@ def file_copy(source, entry, progress_cb):
|
||||
args, env=at_env,
|
||||
stdout=subprocess.PIPE, close_fds=ON_POSIX
|
||||
)
|
||||
# last_num_files_copied trails num_files_copied, and whenever at least 100 more
|
||||
# files have been copied, progress is reported and last_num_files_copied is updated.
|
||||
# last_num_files_copied trails num_files_copied, and whenever at least 107 more
|
||||
# files (file_count_chunk) have been copied, progress is reported and
|
||||
# last_num_files_copied is updated. The chunk size isn't "tidy"
|
||||
# so that all the digits of the progress-reported number change.
|
||||
#
|
||||
last_num_files_copied = 0
|
||||
file_count_chunk = entry.total / 100
|
||||
if file_count_chunk < 100:
|
||||
file_count_chunk = 100
|
||||
last_timestamp_reported = time.time()
|
||||
file_count_chunk = 107
|
||||
|
||||
for line in iter(process.stdout.readline, b''):
|
||||
# rsync outputs progress in parentheses. Each line will have an
|
||||
@ -220,9 +225,10 @@ def file_copy(source, entry, progress_cb):
|
||||
# adjusting the offset so that progressbar can be continuesly drawn
|
||||
num_files_copied = num_files_total_local - num_files_remaining
|
||||
|
||||
# Update about once every 1% of this entry
|
||||
if num_files_copied - last_num_files_copied >= file_count_chunk:
|
||||
now = time.time()
|
||||
if (num_files_copied - last_num_files_copied >= file_count_chunk) or (now - last_timestamp_reported > 0.5):
|
||||
last_num_files_copied = num_files_copied
|
||||
last_timestamp_reported = now
|
||||
progress_cb(num_files_copied, num_files_total_local)
|
||||
|
||||
process.wait()
|
||||
@ -260,6 +266,7 @@ class UnpackOperation:
|
||||
def __init__(self, entries):
|
||||
self.entries = entries
|
||||
self.entry_for_source = dict((x.source, x) for x in self.entries)
|
||||
self.total_weight = sum([e.weight for e in entries])
|
||||
|
||||
def report_progress(self):
|
||||
"""
|
||||
@ -267,30 +274,29 @@ class UnpackOperation:
|
||||
"""
|
||||
progress = float(0)
|
||||
|
||||
done = 0 # Done and total apply to the entry now-unpacking
|
||||
total = 0
|
||||
complete = 0 # This many are already finished
|
||||
current_total = 0
|
||||
current_done = 0 # Files count in the current entry
|
||||
complete_count = 0
|
||||
complete_weight = 0 # This much weight already finished
|
||||
for entry in self.entries:
|
||||
if entry.total == 0:
|
||||
# Total 0 hasn't counted yet
|
||||
continue
|
||||
if entry.total == entry.copied:
|
||||
complete += 1
|
||||
complete_weight += entry.weight
|
||||
complete_count += 1
|
||||
else:
|
||||
# There is at most *one* entry in-progress
|
||||
total = entry.total
|
||||
done = entry.copied
|
||||
current_total = entry.total
|
||||
current_done = entry.copied
|
||||
complete_weight += entry.weight * ( 1.0 * current_done ) / current_total
|
||||
break
|
||||
|
||||
if total > 0:
|
||||
# Pretend that each entry represents an equal amount of work;
|
||||
# the complete ones count as 100% of their own fraction
|
||||
# (and have *not* been counted in total or done), while
|
||||
# total/done represents the fraction of the current fraction.
|
||||
progress = ( ( 1.0 * complete ) / len(self.entries) ) + ( ( 1.0 / len(self.entries) ) * ( 1.0 * done / total ) )
|
||||
if current_total > 0:
|
||||
progress = ( 1.0 * complete_weight ) / self.total_weight
|
||||
|
||||
global status
|
||||
status = _("Unpacking image {}/{}, file {}/{}").format((complete+1),len(self.entries),done, total)
|
||||
status = _("Unpacking image {}/{}, file {}/{}").format((complete_count+1), len(self.entries), current_done, current_total)
|
||||
job.setprogress(progress)
|
||||
|
||||
def run(self):
|
||||
@ -395,6 +401,24 @@ def repair_root_permissions(root_mount_point):
|
||||
# But ignore it
|
||||
|
||||
|
||||
def extract_weight(entry):
|
||||
"""
|
||||
Given @p entry, a dict representing a single entry in
|
||||
the *unpack* list, returns its weight (1, or whatever is
|
||||
set if it is sensible).
|
||||
"""
|
||||
w = entry.get("weight", None)
|
||||
if w:
|
||||
try:
|
||||
wi = int(w)
|
||||
return wi if wi > 0 else 1
|
||||
except ValueError:
|
||||
utils.warning("*weight* setting {!r} is not valid.".format(w))
|
||||
except TypeError:
|
||||
utils.warning("*weight* setting {!r} must be number.".format(w))
|
||||
return 1
|
||||
|
||||
|
||||
def run():
|
||||
"""
|
||||
Unsquash filesystem.
|
||||
@ -461,6 +485,7 @@ def run():
|
||||
unpack[-1].exclude = entry["exclude"]
|
||||
if entry.get("excludeFile", None):
|
||||
unpack[-1].excludeFile = entry["excludeFile"]
|
||||
unpack[-1].weight = extract_weight(entry)
|
||||
|
||||
is_first = False
|
||||
|
||||
|
@ -32,6 +32,12 @@
|
||||
# - *excludeFile* is a single file that is passed to rsync as an
|
||||
# --exclude-file argument. This should be a full pathname
|
||||
# inside the **host** filesystem.
|
||||
# - *weight* is useful when the entries take wildly different
|
||||
# times to unpack (e.g. with a squashfs, and one single file)
|
||||
# and the total weight of this module should be distributed
|
||||
# differently between the entries. (This is only relevant when
|
||||
# there is more than one entry; by default all the entries
|
||||
# have the same weight, 1)
|
||||
#
|
||||
# EXAMPLES
|
||||
#
|
||||
@ -85,8 +91,10 @@ unpack:
|
||||
- source: ../CHANGES
|
||||
sourcefs: file
|
||||
destination: "/tmp/changes.txt"
|
||||
weight: 1 # Single file
|
||||
- source: src/qml/calamares/slideshow
|
||||
sourcefs: file
|
||||
destination: "/tmp/slideshow/"
|
||||
exclude: [ "*.qmlc", "qmldir" ]
|
||||
weight: 5 # Lots of files
|
||||
# excludeFile: /etc/calamares/modules/unpackfs/exclude-list.txt
|
||||
|
Loading…
Reference in New Issue
Block a user