diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 28ce15d1d..935416f27 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -63,6 +63,8 @@ include:
project: QubesOS/qubes-continuous-integration
- file: /r4.3/gitlab-host.yml
project: QubesOS/qubes-continuous-integration
+- file: /r4.3/gitlab-host-openqa.yml
+ project: QubesOS/qubes-continuous-integration
lint:
extends: .lint
diff --git a/.pylintrc b/.pylintrc
index 00a9b6dda..bb5d0fc64 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -58,19 +58,19 @@ const-rgx=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$
class-rgx=([A-Z_][a-zA-Z0-9]+|TC_\d\d_[a-zA-Z0-9_]+)$
# Regular expression which should only match correct function names
-function-rgx=[a-z_][a-z0-9_]{2,30}$
+function-rgx=(test_[0-9]{3}_[a-z0-9_]{2,50}|[a-z_][a-z0-9_]{2,40})$
# Regular expression which should only match correct method names
-method-rgx=[a-z_][a-z0-9_]{2,30}$
+method-rgx=(test_[0-9]{3}_[a-z0-9_]{2,50}|[a-z_][a-z0-9_]{2,40})$
# Regular expression which should only match correct instance attribute names
-attr-rgx=[a-z_][a-z0-9_]{2,30}$
+attr-rgx=[a-z_][a-z0-9_]{2,40}$
# Regular expression which should only match correct argument names
-argument-rgx=[a-z_][a-z0-9_]{2,30}$
+argument-rgx=[a-z_][a-z0-9_]{2,40}$
# Regular expression which should only match correct variable names
-variable-rgx=[a-z_][a-z0-9_]{2,30}$
+variable-rgx=[a-z_][a-z0-9_]{2,40}$
# Regular expression which should only match correct list comprehension /
# generator expression variable names
diff --git a/Makefile b/Makefile
index ceefb386e..ac12f0d05 100644
--- a/Makefile
+++ b/Makefile
@@ -179,6 +179,7 @@ all:
install:
ifeq ($(OS),Linux)
+ $(MAKE) install -C linux/autostart
$(MAKE) install -C linux/systemd
$(MAKE) install -C linux/aux-tools
$(MAKE) install -C linux/system-config
diff --git a/linux/autostart/Makefile b/linux/autostart/Makefile
new file mode 100644
index 000000000..f0191a651
--- /dev/null
+++ b/linux/autostart/Makefile
@@ -0,0 +1,6 @@
+all:
+ true
+
+install:
+ mkdir -p $(DESTDIR)/etc/xdg/autostart
+ cp qubes-preload-dispvm.desktop $(DESTDIR)/etc/xdg/autostart
diff --git a/linux/autostart/qubes-preload-dispvm.desktop b/linux/autostart/qubes-preload-dispvm.desktop
new file mode 100644
index 000000000..b70597791
--- /dev/null
+++ b/linux/autostart/qubes-preload-dispvm.desktop
@@ -0,0 +1,9 @@
+[Desktop Entry]
+Icon=qubes
+Name=Qubes Preload Disposables
+Comment=Workaround for session monitoring with qubes.WaitForSession
+Categories=System;Monitor;
+Exec=systemctl start qubes-preload-dispvm.service
+Terminal=false
+NoDisplay=true
+Type=Application
diff --git a/linux/aux-tools/Makefile b/linux/aux-tools/Makefile
index d919c84f7..89e0a577a 100644
--- a/linux/aux-tools/Makefile
+++ b/linux/aux-tools/Makefile
@@ -3,6 +3,7 @@ all:
install:
mkdir -p $(DESTDIR)/usr/lib/qubes
- cp cleanup-dispvms $(DESTDIR)/usr/lib/qubes
- cp startup-misc.sh $(DESTDIR)/usr/lib/qubes
+ cp preload-dispvm $(DESTDIR)/usr/lib/qubes/
+ cp cleanup-dispvms $(DESTDIR)/usr/lib/qubes/
+ cp startup-misc.sh $(DESTDIR)/usr/lib/qubes/
cp fix-dir-perms.sh $(DESTDIR)/usr/lib/qubes/
diff --git a/linux/aux-tools/preload-dispvm b/linux/aux-tools/preload-dispvm
new file mode 100755
index 000000000..6bf3f57b4
--- /dev/null
+++ b/linux/aux-tools/preload-dispvm
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+
+import asyncio
+import concurrent.futures
+import qubesadmin
+
+
+def get_max(qube):
+ return int(qube.features.get("preload-dispvm-max", 0) or 0)
+
+
+async def main():
+ domains = qubesadmin.Qubes().domains
+ appvms = [
+ qube
+ for qube in domains
+ if get_max(qube) > 0
+ and qube.klass == "AppVM"
+ and getattr(qube, "template_for_dispvms", False)
+ ]
+ method = "admin.vm.CreateDisposable"
+ loop = asyncio.get_running_loop()
+ tasks = []
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ for qube in appvms:
+ maximum = get_max(qube)
+ print(f"{qube}:{maximum}")
+ exec_args = qube.qubesd_call, qube.name, method, "preload-autostart"
+ future = loop.run_in_executor(executor, *exec_args)
+ tasks.append(future)
+ await asyncio.gather(*tasks)
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/linux/systemd/Makefile b/linux/systemd/Makefile
index e0cbbf609..761e980b2 100644
--- a/linux/systemd/Makefile
+++ b/linux/systemd/Makefile
@@ -9,6 +9,7 @@ install:
cp qubes-vm@.service $(DESTDIR)$(UNITDIR)
cp qubes-qmemman.service $(DESTDIR)$(UNITDIR)
cp qubesd.service $(DESTDIR)$(UNITDIR)
+ cp qubes-preload-dispvm.service $(DESTDIR)$(UNITDIR)
install -d $(DESTDIR)$(UNITDIR)/lvm2-pvscan@.service.d
install -m 0644 lvm2-pvscan@.service.d_30_qubes.conf \
$(DESTDIR)$(UNITDIR)/lvm2-pvscan@.service.d/30_qubes.conf
diff --git a/linux/systemd/qubes-preload-dispvm.service b/linux/systemd/qubes-preload-dispvm.service
new file mode 100644
index 000000000..3b7e1b336
--- /dev/null
+++ b/linux/systemd/qubes-preload-dispvm.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Preload Qubes DispVMs
+ConditionKernelCommandLine=!qubes.skip_autostart
+# After qmemman so the daemon can create the file containing available memory.
+After=qubesd.service qubes-meminfo-writer-dom0.service
+
+[Service]
+Type=oneshot
+ExecStart=/usr/lib/qubes/preload-dispvm
+Group=qubes
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/linux/systemd/qubes-vm@.service b/linux/systemd/qubes-vm@.service
index 7ccfb53b7..d27b769ce 100644
--- a/linux/systemd/qubes-vm@.service
+++ b/linux/systemd/qubes-vm@.service
@@ -1,12 +1,12 @@
[Unit]
Description=Start Qubes VM %i
After=qubesd.service qubes-meminfo-writer-dom0.service
+Before=qubes-preload-dispvm.service
ConditionKernelCommandLine=!qubes.skip_autostart
[Service]
Type=oneshot
-Environment=DISPLAY=:0
-ExecStart=/usr/bin/qvm-start --skip-if-running %i
+ExecStart=/usr/bin/qvm-start --skip-if-running -- %i
Group=qubes
RemainAfterExit=yes
diff --git a/qubes/api/admin.py b/qubes/api/admin.py
index 654f04bc0..4543c6525 100644
--- a/qubes/api/admin.py
+++ b/qubes/api/admin.py
@@ -24,9 +24,10 @@
import asyncio
import functools
import os
+import pathlib
+import re
import string
import subprocess
-import pathlib
from ctypes import CDLL
@@ -1159,9 +1160,18 @@ async def vm_feature_remove(self):
@qubes.api.method("admin.vm.feature.Set", scope="local", write=True)
async def vm_feature_set(self, untrusted_payload):
- # validation of self.arg done by qrexec-policy is enough
- value = untrusted_payload.decode("ascii", errors="strict")
+ untrusted_value = untrusted_payload.decode("ascii", errors="strict")
del untrusted_payload
+ if re.match(r"\A[a-zA-Z0-9_.-]+\Z", self.arg) is None:
+ raise qubes.exc.QubesValueError(
+ "feature name contains illegal characters"
+ )
+ if re.match(r"\A[\x20-\x7E]*\Z", untrusted_value) is None:
+ raise qubes.exc.QubesValueError(
+ f"{self.arg} value contains illegal characters"
+ )
+ value = untrusted_value
+ del untrusted_value
self.fire_event_for_permission(value=value)
self.dest.features[self.arg] = value
@@ -1296,7 +1306,14 @@ async def _vm_create(
@qubes.api.method("admin.vm.CreateDisposable", scope="global", write=True)
async def create_disposable(self, untrusted_payload):
- self.enforce(not self.arg)
+ """
+ Create a disposable. If the RPC argument is ``preload-autostart``,
+ cleanse the preload list and start preloading fresh disposables.
+ """
+ self.enforce(self.arg in [None, "", "preload-autostart"])
+ preload_autostart = False
+ if self.arg == "preload-autostart":
+ preload_autostart = True
if untrusted_payload not in (b"", b"uuid"):
raise qubes.exc.QubesValueError(
"Invalid payload for admin.vm.CreateDisposable: "
@@ -1304,17 +1321,18 @@ async def create_disposable(self, untrusted_payload):
)
if self.dest.name == "dom0":
- dispvm_template = self.src.default_dispvm
+ appvm = self.src.default_dispvm
else:
- dispvm_template = self.dest
+ appvm = self.dest
- self.fire_event_for_permission(dispvm_template=dispvm_template)
-
- dispvm = await qubes.vm.dispvm.DispVM.from_appvm(dispvm_template)
+ self.fire_event_for_permission(dispvm_template=appvm)
+ if preload_autostart:
+ await appvm.fire_event_async("domain-preload-dispvm-autostart")
+ return
+ dispvm = await qubes.vm.dispvm.DispVM.from_appvm(appvm)
# TODO: move this to extension (in race-free fashion, better than here)
dispvm.tags.add("created-by-" + str(self.src))
dispvm.tags.add("disp-created-by-" + str(self.src))
-
return (
("uuid:" + str(dispvm.uuid)) if untrusted_payload else dispvm.name
)
@@ -1659,7 +1677,10 @@ async def vm_device_set_required(self, endpoint, untrusted_payload):
self.app.save()
@qubes.api.method(
- "admin.vm.device.denied.List", no_payload=True, scope="local", read=True
+ "admin.vm.device.denied.List",
+ no_payload=True,
+ scope="local",
+ read=True,
)
async def vm_device_denied_list(self):
"""
@@ -1767,7 +1788,10 @@ async def vm_firewall_set(self, untrusted_payload):
self.dest.firewall.save()
@qubes.api.method(
- "admin.vm.firewall.Reload", no_payload=True, scope="local", execute=True
+ "admin.vm.firewall.Reload",
+ no_payload=True,
+ scope="local",
+ execute=True,
)
async def vm_firewall_reload(self):
self.enforce(not self.arg)
diff --git a/qubes/api/internal.py b/qubes/api/internal.py
index 870b43c6e..11de1d722 100644
--- a/qubes/api/internal.py
+++ b/qubes/api/internal.py
@@ -18,7 +18,7 @@
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see .
-""" Internal interface for dom0 components to communicate with qubesd. """
+"""Internal interface for dom0 components to communicate with qubesd."""
import asyncio
import json
@@ -45,6 +45,8 @@ class SystemInfoCache:
"domain-shutdown",
"domain-tag-add:*",
"domain-tag-delete:*",
+ "domain-feature-set:internal",
+ "domain-feature-delete:internal",
"property-set:template_for_dispvms",
"property-reset:template_for_dispvms",
"property-set:default_dispvm",
@@ -117,6 +119,7 @@ def get_system_info(cls, app):
system_info = {
"domains": {
domain.name: {
+ "internal": domain.features.get("internal", None),
"tags": list(domain.tags),
"type": domain.__class__.__name__,
"template_for_dispvms": getattr(
@@ -262,6 +265,12 @@ async def suspend_pre(self):
:return:
"""
+ preload_templates = qubes.vm.dispvm.get_preload_templates(
+ self.app.domains
+ )
+ for qube in preload_templates:
+ qube.remove_preload_excess(0)
+
# first keep track of VMs which were paused before suspending
previously_paused = [
vm.name
@@ -406,3 +415,11 @@ async def suspend_post(self):
qubes.config.suspend_timeout,
"qubes.SuspendPostAll",
)
+
+ preload_templates = qubes.vm.dispvm.get_preload_templates(
+ self.app.domains
+ )
+ for qube in preload_templates:
+ asyncio.ensure_future(
+ qube.fire_event_async("domain-preload-dispvm-autostart")
+ )
diff --git a/qubes/config.py b/qubes/config.py
index 2a54b1f11..56cf1ce74 100644
--- a/qubes/config.py
+++ b/qubes/config.py
@@ -106,3 +106,7 @@ class Defaults(TypedDict):
qubes_ipv6_prefix = "fd09:24ef:4179:0000"
suspend_timeout = 60
+
+#: amount of available memory on the system. Beware that the use of a file is
+# subject to change.
+qmemman_avail_mem_file = "/var/run/qubes/qmemman-avail-mem"
diff --git a/qubes/ext/gui.py b/qubes/ext/gui.py
index 37caae9cb..a78aa82b8 100644
--- a/qubes/ext/gui.py
+++ b/qubes/ext/gui.py
@@ -93,6 +93,14 @@ def on_domain_qdb_create(self, vm, event):
except KeyError:
pass
+ vm.untrusted_qdb.write(
+ "/qubes-gui-enabled",
+ str(
+ bool(
+ getattr(vm, "guivm", None) and vm.features.get("gui", True)
+ )
+ ),
+ )
# Add GuiVM Xen ID for gui-daemon
if getattr(vm, "guivm", None):
if vm != vm.guivm:
@@ -129,6 +137,10 @@ async def on_domain_start(self, vm, event, **kwargs):
domain for domain in self.attached_vms(vm) if domain.is_running()
]
for attached_vm in attached_vms:
+ attached_vm.untrusted_qdb.write(
+ "/qubes-gui-enabled",
+ str(bool(attached_vm.features.get("gui", True))),
+ )
attached_vm.untrusted_qdb.write(
"/qubes-gui-domain-xid", str(vm.xid)
)
diff --git a/qubes/log.py b/qubes/log.py
index 7530314c9..25b50c5db 100644
--- a/qubes/log.py
+++ b/qubes/log.py
@@ -33,7 +33,7 @@ def __init__(self, *args, debug=False, **kwargs):
self.debug = debug
def formatMessage(self, record):
- fmt = ""
+ fmt = "%(levelname)s: "
if self.debug:
fmt += "[%(processName)s %(module)s.%(funcName)s:%(lineno)d] "
if self.debug or record.name.startswith("vm."):
diff --git a/qubes/qmemman/algo.py b/qubes/qmemman/algo.py
index c9fa31adb..685a30eb0 100644
--- a/qubes/qmemman/algo.py
+++ b/qubes/qmemman/algo.py
@@ -247,13 +247,12 @@ def balance_when_low_on_memory(
return donors_rq + acceptors_rq
-# redistribute memory across domains
-# called when one of domains update its 'meminfo' xenstore key
-# return the list of (domain, memory_target) pairs to be passed to
-# "xm memset" equivalent
-def balance(xen_free_memory, domain_dictionary):
+# get memory information
+# called before and after domain balances
+# return a dictionary of various memory data points
+def memory_info(xen_free_memory, domain_dictionary):
log.debug(
- "balance(xen_free_memory={!r}, domain_dictionary={!r})".format(
+ "memory_info(xen_free_memory={!r}, domain_dictionary={!r})".format(
xen_free_memory, domain_dictionary
)
)
@@ -296,18 +295,42 @@ def balance(xen_free_memory, domain_dictionary):
total_mem_pref += prefmem(domain_dictionary[i])
total_available_memory = xen_free_memory - total_memory_needed
- if total_available_memory > 0:
+
+ mem_dictionary = {}
+ mem_dictionary["domain_dictionary"] = domain_dictionary
+ mem_dictionary["total_available_memory"] = total_available_memory
+ mem_dictionary["xen_free_memory"] = xen_free_memory
+ mem_dictionary["total_mem_pref"] = total_mem_pref
+ mem_dictionary["total_mem_pref_acceptors"] = total_mem_pref_acceptors
+ mem_dictionary["donors"] = donors
+ mem_dictionary["acceptors"] = acceptors
+ return mem_dictionary
+
+
+# redistribute memory across domains
+# called when one of domains update its 'meminfo' xenstore key
+# return the list of (domain, memory_target) pairs to be passed to
+# "xm memset" equivalent
+def balance(xen_free_memory, domain_dictionary):
+ log.debug(
+ "balance(xen_free_memory={!r}, domain_dictionary={!r})".format(
+ xen_free_memory, domain_dictionary
+ )
+ )
+ memory_dictionary = memory_info(xen_free_memory, domain_dictionary)
+
+ if memory_dictionary["total_available_memory"] > 0:
return balance_when_enough_memory(
- domain_dictionary,
- xen_free_memory,
- total_mem_pref,
- total_available_memory,
+ memory_dictionary["domain_dictionary"],
+ memory_dictionary["xen_free_memory"],
+ memory_dictionary["total_mem_pref"],
+ memory_dictionary["total_available_memory"],
)
else:
return balance_when_low_on_memory(
- domain_dictionary,
- xen_free_memory,
- total_mem_pref_acceptors,
- donors,
- acceptors,
+ memory_dictionary["domain_dictionary"],
+ memory_dictionary["xen_free_memory"],
+ memory_dictionary["total_mem_pref_acceptors"],
+ memory_dictionary["donors"],
+ memory_dictionary["acceptors"],
)
diff --git a/qubes/qmemman/systemstate.py b/qubes/qmemman/systemstate.py
index 8bb3fac07..a12011183 100644
--- a/qubes/qmemman/systemstate.py
+++ b/qubes/qmemman/systemstate.py
@@ -24,6 +24,7 @@
import time
import xen.lowlevel
+from pathlib import Path
import qubes.qmemman
from qubes.qmemman.domainstate import DomainState
@@ -434,6 +435,17 @@ def do_balance(self):
self.mem_set(dom, mem)
+ xenfree = self.get_free_xen_memory()
+ memory_dictionary = qubes.qmemman.algo.memory_info(
+ xenfree - self.XEN_FREE_MEM_LEFT, self.domdict
+ )
+ avail_mem_file = qubes.config.qmemman_avail_mem_file
+ avail_mem_file_tmp = Path(avail_mem_file).with_suffix(".tmp")
+ with open(avail_mem_file_tmp, "w", encoding="ascii") as file:
+ file.write(str(memory_dictionary["total_available_memory"]))
+ os.chmod(avail_mem_file_tmp, 0o644)
+ os.replace(avail_mem_file_tmp, avail_mem_file)
+
# for i in self.domdict.keys():
# print 'domain ', i, ' meminfo=', self.domdict[i].mem_used, 'actual mem', self.domdict[i].memory_actual
diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py
index 7620b2e57..456107529 100644
--- a/qubes/tests/__init__.py
+++ b/qubes/tests/__init__.py
@@ -1799,6 +1799,7 @@ def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
"qubes.tests.ext",
"qubes.tests.vm.qubesvm",
"qubes.tests.vm.mix.net",
+ "qubes.tests.vm.mix.dvmtemplate",
"qubes.tests.vm.adminvm",
"qubes.tests.vm.appvm",
"qubes.tests.vm.dispvm",
diff --git a/qubes/tests/api_admin.py b/qubes/tests/api_admin.py
index 8391facf5..b4abb78c3 100644
--- a/qubes/tests/api_admin.py
+++ b/qubes/tests/api_admin.py
@@ -18,7 +18,7 @@
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see .
-""" Tests for management calls endpoints """
+"""Tests for management calls endpoints"""
import asyncio
import operator
@@ -167,6 +167,18 @@ def call_internal_mgmt_func(self, method, dest, arg=b"", payload=b""):
# noinspection PyUnresolvedReferences
class TC_00_VMs(AdminAPITestCase):
+ def _test_event_handler(
+ self, vm, event, *args, **kwargs
+ ): # pylint: disable=unused-argument
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ self.event_handler.setdefault(vm.name, {})[event] = True
+
+ def _test_event_was_handled(self, vm, event):
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ return self.event_handler.get(vm, {}).get(event)
+
def test_000_vm_list(self):
value = self.call_mgmt_func(b"admin.vm.List", b"dom0")
self.assertEqual(
@@ -672,7 +684,9 @@ def test_090_vm_volume_listsnapshots_invalid_volume(self):
self.vm.volumes.configure_mock(**volumes_conf)
with self.assertRaises(qubes.exc.PermissionDenied):
self.call_mgmt_func(
- b"admin.vm.volume.ListSnapshots", b"test-vm1", b"no-such-volume"
+ b"admin.vm.volume.ListSnapshots",
+ b"test-vm1",
+ b"no-such-volume",
)
self.assertEqual(
self.vm.volumes.mock_calls, [unittest.mock.call.keys()]
@@ -1293,7 +1307,10 @@ def test_200_label_create_invalid_name(self):
)
with self.assertRaises(qubes.exc.PermissionDenied):
self.call_mgmt_func(
- b"admin.label.Create", b"dom0", b"strange-name!@#$", b"0xff0000"
+ b"admin.label.Create",
+ b"dom0",
+ b"strange-name!@#$",
+ b"0xff0000",
)
self.assertEqual(self.app.get_label.mock_calls, [])
@@ -1697,7 +1714,9 @@ def test_316_feature_checkwithnetvm_netvm(self):
def test_317_feature_checkwithnetvm_none(self):
with self.assertRaises(qubes.exc.QubesFeatureNotFoundError):
self.call_mgmt_func(
- b"admin.vm.feature.CheckWithNetvm", b"test-vm1", b"test-feature"
+ b"admin.vm.feature.CheckWithNetvm",
+ b"test-vm1",
+ b"test-feature",
)
self.assertFalse(self.app.save.called)
@@ -1730,7 +1749,10 @@ def test_319_feature_checkwithtpladminvm(self):
def test_320_feature_set(self):
value = self.call_mgmt_func(
- b"admin.vm.feature.Set", b"test-vm1", b"test-feature", b"some-value"
+ b"admin.vm.feature.Set",
+ b"test-vm1",
+ b"test-feature",
+ b"some-value",
)
self.assertIsNone(value)
self.assertEqual(self.vm.features["test-feature"], "some-value")
@@ -1983,7 +2005,8 @@ def test_340_vm_create_in_pool_app(self, storage_mock):
# setting pool= affect only volumes actually created for this VM,
# not used from a template or so
self.assertEqual(
- vm.volume_config["root"]["pool"], self.template.volumes["root"].pool
+ vm.volume_config["root"]["pool"],
+ self.template.volumes["root"].pool,
)
self.assertEqual(vm.volume_config["private"]["pool"], "test")
self.assertEqual(vm.volume_config["volatile"]["pool"], "test")
@@ -2015,11 +2038,13 @@ def test_341_vm_create_in_pool_private(self, storage_mock):
self.assertEqual(vm.label, self.app.get_label("red"))
self.assertEqual(vm.template, self.app.domains["test-template"])
self.assertEqual(
- vm.volume_config["root"]["pool"], self.template.volumes["root"].pool
+ vm.volume_config["root"]["pool"],
+ self.template.volumes["root"].pool,
)
self.assertEqual(vm.volume_config["private"]["pool"], "test")
self.assertEqual(
- vm.volume_config["volatile"]["pool"], self.app.default_pool_volatile
+ vm.volume_config["volatile"]["pool"],
+ self.app.default_pool_volatile,
)
self.assertEqual(vm.volume_config["kernel"]["pool"], "linux-kernel")
self.assertEqual(
@@ -2376,7 +2401,9 @@ def test_473_vm_device_list_assigned_specific(self):
self.vm.devices["testclass"].assign(assignment)
)
value = self.call_mgmt_func(
- b"admin.vm.device.testclass.Assigned", b"test-vm1", b"test-vm1+1234"
+ b"admin.vm.device.testclass.Assigned",
+ b"test-vm1",
+ b"test-vm1+1234",
)
self.assertEqual(
value,
@@ -2403,7 +2430,9 @@ def test_474_vm_device_list_attached_specific(self):
self.device_list_multiple_attached_testclass,
)
value = self.call_mgmt_func(
- b"admin.vm.device.testclass.Attached", b"test-vm1", b"test-vm1+1234"
+ b"admin.vm.device.testclass.Attached",
+ b"test-vm1",
+ b"test-vm1+1234",
)
self.assertEqual(
value,
@@ -3039,9 +3068,10 @@ def test_516_vm_volume_import_fire_event_with_size(self):
)
def test_510_vm_volume_import_end_success(self):
- import_data_end_mock, self.vm.storage.import_data_end = (
- self.coroutine_mock()
- )
+ (
+ import_data_end_mock,
+ self.vm.storage.import_data_end,
+ ) = self.coroutine_mock()
self.call_internal_mgmt_func(
b"internal.vm.volume.ImportEnd",
b"test-vm1",
@@ -3054,9 +3084,10 @@ def test_510_vm_volume_import_end_success(self):
)
def test_510_vm_volume_import_end_failure(self):
- import_data_end_mock, self.vm.storage.import_data_end = (
- self.coroutine_mock()
- )
+ (
+ import_data_end_mock,
+ self.vm.storage.import_data_end,
+ ) = self.coroutine_mock()
with self.assertRaisesRegex(qubes.exc.QubesException, "error message"):
self.call_internal_mgmt_func(
b"internal.vm.volume.ImportEnd",
@@ -3809,6 +3840,60 @@ def test_642_vm_create_disposable_not_allowed(self, storage_mock):
self.call_mgmt_func(b"admin.vm.CreateDisposable", b"test-vm1")
self.assertFalse(self.app.save.called)
+ @unittest.mock.patch("qubes.vm.dispvm.DispVM._bare_cleanup")
+ @unittest.mock.patch("qubes.vm.dispvm.DispVM.start")
+ @unittest.mock.patch("qubes.storage.Storage.verify")
+ @unittest.mock.patch("qubes.storage.Storage.create")
+ def test_643_vm_create_disposable_preload_autostart(
+ self,
+ mock_storage_create,
+ mock_storage_verify,
+ mock_dispvm_start,
+ mock_bare_cleanup,
+ ):
+ mock_storage_create.side_effect = self.dummy_coro
+ mock_storage_verify.side_effect = self.dummy_coro
+ mock_dispvm_start.side_effect = self.dummy_coro
+ mock_bare_cleanup.side_effect = self.dummy_coro
+ self.vm.template_for_dispvms = True
+ self.app.default_dispvm = self.vm
+ self.vm.add_handler(
+ "domain-preload-dispvm-autostart", self._test_event_handler
+ )
+ self.vm.features["qrexec"] = "1"
+ self.vm.features["supported-rpc.qubes.WaitForSession"] = "1"
+ self.vm.features["preload-dispvm-max"] = "1"
+ for _ in range(10):
+ if len(self.vm.get_feat_preload()) == 1:
+ break
+ self.loop.run_until_complete(asyncio.sleep(1))
+ else:
+ self.fail("didn't preload in time")
+ old_preload = self.vm.get_feat_preload()
+ retval = self.call_mgmt_func(
+ b"admin.vm.CreateDisposable", b"dom0", arg=b"preload-autostart"
+ )
+ self.assertTrue(
+ self._test_event_was_handled(
+ self.vm.name, "domain-preload-dispvm-autostart"
+ )
+ )
+ for _ in range(10):
+ if (
+ old_preload != self.vm.get_feat_preload()
+ and self.vm.get_feat_preload() != []
+ ):
+ break
+ self.loop.run_until_complete(asyncio.sleep(1))
+ else:
+ self.fail("didn't preload again in time")
+ dispvm_preload = self.vm.get_feat_preload()
+ self.assertEqual(len(dispvm_preload), 1)
+ self.assertIsNone(retval)
+ self.assertEqual(2, mock_storage_create.call_count)
+ self.assertEqual(2, mock_dispvm_start.call_count)
+ self.assertTrue(self.app.save.called)
+
def test_650_vm_device_set_mode_required(self):
assignment = DeviceAssignment(
VirtualDevice(Port(self.vm, "1234", "testclass"), device_id="bee"),
@@ -4158,7 +4243,10 @@ def test_702_pool_set_revisions_to_keep_not_a_number(self):
def test_703_pool_set_ephemeral(self):
self.app.pools["test-pool"] = unittest.mock.Mock()
value = self.call_mgmt_func(
- b"admin.pool.Set.ephemeral_volatile", b"dom0", b"test-pool", b"true"
+ b"admin.pool.Set.ephemeral_volatile",
+ b"dom0",
+ b"test-pool",
+ b"true",
)
self.assertIsNone(value)
self.assertEqual(self.app.pools["test-pool"].mock_calls, [])
@@ -4327,7 +4415,10 @@ def test_731_vm_console_not_running(self):
"\n"
)
self.vm._libvirt_domain.configure_mock(
- **{"XMLDesc.return_value": xml_desc, "isActive.return_value": False}
+ **{
+ "XMLDesc.return_value": xml_desc,
+ "isActive.return_value": False,
+ }
)
with self.assertRaises(qubes.exc.QubesVMNotRunningError):
self.call_mgmt_func(b"admin.vm.Console", b"test-vm1")
@@ -4407,7 +4498,8 @@ def test_901_current_state_changed(self):
self.vm.get_power_state = lambda: "Running"
value = self.call_mgmt_func(b"admin.vm.CurrentState", b"test-vm1")
self.assertEqual(
- value, "mem=512 mem_static_max=1024 cputime=100 power_state=Running"
+ value,
+ "mem=512 mem_static_max=1024 cputime=100 power_state=Running",
)
def test_990_vm_unexpected_payload(self):
diff --git a/qubes/tests/api_internal.py b/qubes/tests/api_internal.py
index d2a733ccb..5646c2523 100644
--- a/qubes/tests/api_internal.py
+++ b/qubes/tests/api_internal.py
@@ -44,6 +44,7 @@ def setUp(self):
self.app = mock.NonCallableMock()
self.dom0 = mock.NonCallableMock(spec=qubes.vm.adminvm.AdminVM)
self.dom0.name = "dom0"
+ self.dom0.features = {}
self.domains = {
"dom0": self.dom0,
}
@@ -64,6 +65,7 @@ def create_mockvm(self, features=None):
features = {}
vm = mock.Mock()
vm.features.check_with_template.side_effect = features.get
+ vm.features.get.side_effect = features.get
vm.run_service.return_value.wait = mock_coro(
vm.run_service.return_value.wait
)
@@ -203,6 +205,7 @@ def test_001_suspend_post(self):
def test_010_get_system_info(self):
self.dom0.name = "dom0"
+ self.dom0.features = {}
self.dom0.tags = ["tag1", "tag2"]
self.dom0.default_dispvm = None
self.dom0.template_for_dispvms = False
@@ -213,6 +216,7 @@ def test_010_get_system_info(self):
vm = mock.NonCallableMock(spec=qubes.vm.qubesvm.QubesVM)
vm.name = "vm"
+ vm.features = {"internal": 1}
vm.tags = ["tag3", "tag4"]
vm.default_dispvm = vm
vm.template_for_dispvms = True
@@ -230,6 +234,7 @@ def test_010_get_system_info(self):
"default_dispvm": None,
"template_for_dispvms": False,
"icon": "icon-dom0",
+ "internal": None,
"guivm": None,
"power_state": "Running",
"relayvm": None,
@@ -242,6 +247,7 @@ def test_010_get_system_info(self):
"default_dispvm": "vm",
"template_for_dispvms": True,
"icon": "icon-vm",
+ "internal": 1,
"guivm": "vm",
"power_state": "Halted",
"relayvm": None,
diff --git a/qubes/tests/integ/dispvm.py b/qubes/tests/integ/dispvm.py
index 0d656b40f..017d3590c 100644
--- a/qubes/tests/integ/dispvm.py
+++ b/qubes/tests/integ/dispvm.py
@@ -22,22 +22,20 @@
import os
import pwd
import subprocess
-import tempfile
import time
import unittest
from contextlib import suppress
-
from distutils import spawn
-
+from unittest.mock import patch, mock_open
import asyncio
-
import sys
+import qubes.config
import qubes.tests
+import qubesadmin.exc
class TC_04_DispVM(qubes.tests.SystemTestCase):
-
def setUp(self):
super(TC_04_DispVM, self).setUp()
self.init_default_template()
@@ -184,8 +182,7 @@ def test_011_failed_start_timeout(self):
class TC_20_DispVMMixin(object):
-
- def setUp(self):
+ def setUp(self): # pylint: disable=invalid-name
super(TC_20_DispVMMixin, self).setUp()
if "whonix-g" in self.template:
self.skipTest(
@@ -201,12 +198,134 @@ def setUp(self):
self.loop.run_until_complete(self.disp_base.create_on_disk())
self.app.default_dispvm = self.disp_base
self.app.save()
+ self.preload_cmd = [
+ "qvm-run",
+ "-p",
+ f"--dispvm={self.disp_base.name}",
+ "--",
+ "qubesdb-read /name | tr -d '\n'",
+ ]
- def tearDown(self):
+ def tearDown(self): # pylint: disable=invalid-name
+ if "gui" in self.disp_base.features:
+ del self.disp_base.features["gui"]
+ old_preload = self.disp_base.get_feat_preload()
self.app.default_dispvm = None
+ tasks = [self.app.domains[x].cleanup() for x in old_preload]
+ self.loop.run_until_complete(asyncio.gather(*tasks))
+ self.disp_base.features["preload-dispvm-max"] = False
super(TC_20_DispVMMixin, self).tearDown()
- def test_010_simple_dvm_run(self):
+ def _test_event_handler(
+ self, vm, event, *args, **kwargs
+ ): # pylint: disable=unused-argument
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ self.event_handler.setdefault(vm.name, {})[event] = True
+
+ def _test_event_was_handled(self, vm, event):
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ return self.event_handler.get(vm, {}).get(event)
+
+ async def no_preload(self):
+ # Trick to gather this function as an async task.
+ await asyncio.sleep(0)
+ self.disp_base.features["preload-dispvm-max"] = False
+
+ async def run_preload_proc(self):
+ proc = await asyncio.create_subprocess_exec(
+ *self.preload_cmd,
+ stdout=asyncio.subprocess.PIPE,
+ )
+ try:
+ stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=60)
+ return stdout.decode()
+ except asyncio.TimeoutError:
+ proc.terminate()
+ await proc.wait()
+ raise
+
+ async def run_preload(self):
+ appvm = self.disp_base
+ dispvm = appvm.get_feat_preload()[0]
+ dispvm = self.app.domains[dispvm]
+ self.assertTrue(dispvm.is_preload)
+ self.assertTrue(dispvm.features.get("internal", False))
+ appvm.add_handler(
+ "domain-preload-dispvm-autostart", self._test_event_handler
+ )
+ appvm.add_handler(
+ "domain-preload-dispvm-start", self._test_event_handler
+ )
+ appvm.add_handler(
+ "domain-preload-dispvm-used", self._test_event_handler
+ )
+ dispvm.add_handler("domain-paused", self._test_event_handler)
+ dispvm.add_handler("domain-unpaused", self._test_event_handler)
+ dispvm.add_handler(
+ "domain-feature-set:preload-dispvm-completed",
+ self._test_event_handler,
+ )
+ dispvm.add_handler(
+ "domain-feature-set:preload-dispvm-in-progress",
+ self._test_event_handler,
+ )
+ dispvm.add_handler(
+ "domain-feature-delete:preload-dispvm-in-progress",
+ self._test_event_handler,
+ )
+ dispvm.add_handler(
+ "domain-feature-delete:internal", self._test_event_handler
+ )
+ dispvm_name = dispvm.name
+ stdout = await self.run_preload_proc()
+ self.assertEqual(stdout, dispvm_name)
+ self.assertFalse(
+ self._test_event_was_handled(
+ appvm.name, "domain-preload-dispvm-autostart"
+ )
+ )
+ self.assertFalse(
+ self._test_event_was_handled(
+ appvm.name, "domain-preload-dispvm-start"
+ )
+ )
+ self.assertTrue(
+ self._test_event_was_handled(
+ appvm.name, "domain-preload-dispvm-used"
+ )
+ )
+ self.assertTrue(
+ self._test_event_was_handled(
+ dispvm_name, "domain-feature-set:preload-dispvm-completed"
+ )
+ )
+ self.assertTrue(
+ self._test_event_was_handled(
+ dispvm_name, "domain-feature-set:preload-dispvm-in-progress"
+ )
+ )
+ self.assertTrue(
+ self._test_event_was_handled(
+ dispvm_name, "domain-feature-delete:preload-dispvm-in-progress"
+ )
+ )
+ if self._test_event_was_handled(dispvm_name, "domain-paused"):
+ self.assertTrue(
+ self._test_event_was_handled(dispvm_name, "domain-unpaused")
+ )
+ if not appvm.features.get("internal", False):
+ self.assertTrue(
+ self._test_event_was_handled(
+ dispvm_name, "domain-feature-delete:internal"
+ )
+ )
+ next_preload_list = appvm.get_feat_preload()
+ self.assertTrue(next_preload_list)
+ self.assertNotIn(dispvm_name, next_preload_list)
+
+ def test_010_dvm_run_simple(self):
dispvm = self.loop.run_until_complete(
qubes.vm.dispvm.DispVM.from_appvm(self.disp_base)
)
@@ -221,6 +340,158 @@ def test_010_simple_dvm_run(self):
finally:
self.loop.run_until_complete(dispvm.cleanup())
+ def test_011_dvm_run_preload_reject_max(self):
+ """Test preloading when max has been reached"""
+ self.loop.run_until_complete(
+ qubes.vm.dispvm.DispVM.from_appvm(self.disp_base, preload=True)
+ )
+ self.assertEqual(0, len(self.disp_base.get_feat_preload()))
+
+ def test_012_dvm_run_preload_low_mem(self):
+ """Test preloading with low memory"""
+ self.loop.run_until_complete(self._test_012_dvm_run_preload_low_mem())
+
+ async def _test_012_dvm_run_preload_low_mem(self):
+ # pylint: disable=unspecified-encoding
+ unpatched_open = open
+
+ def mock_open_mem(file, *args, **kwargs):
+ if file == qubes.config.qmemman_avail_mem_file:
+ memory = str(getattr(self.disp_base, "memory", 0) * 1024 * 1024)
+ return mock_open(read_data=memory)()
+ return unpatched_open(file, *args, **kwargs)
+
+ with patch("builtins.open", side_effect=mock_open_mem):
+ self.disp_base.features["preload-dispvm-max"] = "2"
+ for _ in range(15):
+ if len(self.disp_base.get_feat_preload()) == 2:
+ break
+ await asyncio.sleep(1)
+ self.assertEqual(1, len(self.disp_base.get_feat_preload()))
+
+ def test_013_dvm_run_preload_gui(self):
+ """Test preloading with GUI feature enabled"""
+ self.loop.run_until_complete(self._test_013_dvm_run_preload_gui())
+
+ async def _test_013_dvm_run_preload_gui(self):
+ self.disp_base.features["gui"] = True
+ self.disp_base.features["preload-dispvm-max"] = "1"
+ for _ in range(10):
+ if len(self.disp_base.get_feat_preload()) == 1:
+ break
+ await asyncio.sleep(1)
+ else:
+ self.fail("didn't preload in time")
+ await self.run_preload()
+
+ def test_014_dvm_run_preload_nogui(self):
+ """Test preloading with GUI feature disabled"""
+ self.loop.run_until_complete(self._test_014_dvm_run_preload_nogui())
+
+ async def _test_014_dvm_run_preload_nogui(self):
+ self.disp_base.features["gui"] = False
+ self.disp_base.features["preload-dispvm-max"] = "1"
+ for _ in range(10):
+ if len(self.disp_base.get_feat_preload()) == 1:
+ break
+ await asyncio.sleep(1)
+ else:
+ self.fail("didn't preload in time")
+ self.preload_cmd.insert(1, "--no-gui")
+ await self.run_preload()
+
+ def test_015_dvm_run_preload_race_more(self):
+ """Test race requesting multiple preloaded qubes"""
+ self.loop.run_until_complete(self._test_015_dvm_run_preload_race_more())
+
+ async def _test_preload_wait_pause(self, preload_max):
+ """Waiting for pause avoids objects leaking."""
+ for _ in range(60):
+ if len(self.disp_base.get_feat_preload()) == preload_max:
+ break
+ await asyncio.sleep(1)
+ else:
+ self.fail("didn't preload in time")
+ preload_dispvm = self.disp_base.get_feat_preload()
+ preload_unfinished = preload_dispvm
+ for _ in range(60):
+ for qube in preload_unfinished.copy():
+ if self.app.domains[qube].is_paused():
+ preload_unfinished.remove(qube)
+ continue
+ if not preload_unfinished:
+ break
+ await asyncio.sleep(1)
+ else:
+ self.fail("last preloaded didn't pause in time")
+
+ async def _test_015_dvm_run_preload_race_more(self):
+ # The limiting factor is how much memory is available on OpenQA and the
+ # unreasonable memory allocated before the qube is paused due to:
+ # https://github.com/QubesOS/qubes-issues/issues/9917
+ # Whonix (Kicksecure) 17 fail more due to memory consumption. From the
+ # templates deployed by default, only Debian and Fedora survives due to
+ # using less memory than the other OSes.
+ preload_max = 4
+ os_dist = self.disp_base.features.check_with_template("os-distribution")
+ if os_dist in ["whonix", "kicksecure"]:
+ preload_max -= 1
+ self.disp_base.features["preload-dispvm-max"] = str(preload_max)
+ await self._test_preload_wait_pause(preload_max)
+ old_preload = self.disp_base.get_feat_preload()
+ tasks = [self.run_preload_proc() for _ in range(preload_max)]
+ targets = await asyncio.gather(*tasks)
+ await self._test_preload_wait_pause(preload_max)
+ preload_dispvm = self.disp_base.get_feat_preload()
+ self.assertTrue(set(old_preload).isdisjoint(preload_dispvm))
+ self.assertEqual(len(targets), preload_max)
+ self.assertEqual(len(targets), len(set(targets)))
+
+ def test_016_dvm_run_preload_race_less(self):
+ """Test race requesting preloaded qube while the maximum is zeroed."""
+ self.loop.run_until_complete(self._test_016_dvm_run_preload_race_less())
+
+ async def _test_016_dvm_run_preload_race_less(self):
+ self.disp_base.features["preload-dispvm-max"] = "1"
+ for _ in range(60):
+ if len(self.disp_base.get_feat_preload()) == 1:
+ break
+ await asyncio.sleep(1)
+ else:
+ self.fail("didn't preload in time")
+ tasks = [self.run_preload_proc(), self.no_preload()]
+ target = await asyncio.gather(*tasks)
+ target_dispvm = target[0]
+ self.assertTrue(target_dispvm.startswith("disp"))
+
+ def test_017_dvm_run_preload_autostart(self):
+ proc = self.loop.run_until_complete(
+ asyncio.create_subprocess_exec("/usr/lib/qubes/preload-dispvm")
+ )
+ self.loop.run_until_complete(
+ asyncio.wait_for(proc.communicate(), timeout=10)
+ )
+ self.assertEqual(self.disp_base.get_feat_preload(), [])
+ self.disp_base.features["preload-dispvm-max"] = "1"
+ for _ in range(10):
+ if len(self.disp_base.get_feat_preload()) == 1:
+ break
+ self.loop.run_until_complete(asyncio.sleep(1))
+ else:
+ self.fail("didn't preload in time")
+ old_preload = self.disp_base.get_feat_preload()
+ proc = self.loop.run_until_complete(
+ asyncio.create_subprocess_exec("/usr/lib/qubes/preload-dispvm")
+ )
+ self.loop.run_until_complete(asyncio.wait_for(proc.wait(), timeout=30))
+ preload_dispvm = self.disp_base.get_feat_preload()
+ self.assertEqual(len(old_preload), 1)
+ self.assertEqual(len(preload_dispvm), 1)
+ self.assertTrue(
+ set(old_preload).isdisjoint(preload_dispvm),
+ f"old_preload={old_preload} preload_dispvm={preload_dispvm}",
+ )
+
@unittest.skipUnless(
spawn.find_executable("xdotool"), "xdotool not installed"
)
@@ -287,7 +558,10 @@ def _handle_editor(self, winid, copy=False):
["xdotool", "getwindowname", winid], stdout=subprocess.PIPE
).communicate()
window_title = (
- window_title.decode().strip().replace("(", "\(").replace(")", "\)")
+ window_title.decode()
+ .strip()
+ .replace("(", r"\(")
+ .replace(")", r"\)")
)
time.sleep(1)
if (
diff --git a/qubes/tests/run.py b/qubes/tests/run.py
index 576e9c375..bfd1fd0bb 100755
--- a/qubes/tests/run.py
+++ b/qubes/tests/run.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
#
# The Qubes OS Project, https://www.qubes-os.org/
#
@@ -303,10 +304,14 @@ def test_6_unexpected_success(self):
return runner.run(suite).wasSuccessful()
+epilog = """\
+When running only specific tests, write their names like in the log format:
+MODULE+"/"+CLASS+"/"+FUNCTION.
+Example: qubes.tests.basic/TC_00_Basic/test_000_create
+"""
+
parser = argparse.ArgumentParser(
- epilog="""When running only specific tests, write their names like in log,
- in format: MODULE+"/"+CLASS+"/"+FUNCTION. MODULE should omit initial
- "qubes.tests.". Example: basic/TC_00_Basic/test_000_create"""
+ formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog
)
parser.add_argument(
diff --git a/qubes/tests/vm/dispvm.py b/qubes/tests/vm/dispvm.py
index aae58ddfa..118899014 100644
--- a/qubes/tests/vm/dispvm.py
+++ b/qubes/tests/vm/dispvm.py
@@ -23,6 +23,7 @@
import asyncio
+import qubes.events
import qubes.vm.dispvm
import qubes.vm.appvm
import qubes.vm.templatevm
@@ -60,6 +61,7 @@ def setUp(self):
self.template = self.app.add_new_vm(
qubes.vm.templatevm.TemplateVM, name="test-template", label="red"
)
+ self.template.features["qrexec"] = True
self.appvm = self.app.add_new_vm(
qubes.vm.appvm.AppVM,
name="test-vm",
@@ -69,6 +71,11 @@ def setUp(self):
self.app.domains[self.appvm.name] = self.appvm
self.app.domains[self.appvm] = self.appvm
self.addCleanup(self.cleanup_dispvm)
+ self.emitter = qubes.tests.TestEmitter()
+
+ def tearDown(self):
+ del self.emitter
+ super(TC_00_DispVM, self).tearDown()
def cleanup_dispvm(self):
if hasattr(self, "dispvm"):
@@ -84,6 +91,18 @@ def cleanup_dispvm(self):
async def mock_coro(self, *args, **kwargs):
pass
+ def _test_event_handler(
+ self, vm, event, *args, **kwargs
+ ): # pylint: disable=unused-argument
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ self.event_handler.setdefault(vm.name, {})[event] = True
+
+ def _test_event_was_handled(self, vm, event):
+ if not hasattr(self, "event_handler"):
+ self.event_handler = {}
+ return self.event_handler.get(vm, {}).get(event)
+
@mock.patch("os.symlink")
@mock.patch("os.makedirs")
@mock.patch("qubes.storage.Storage")
@@ -107,13 +126,97 @@ def test_000_from_appvm(self, mock_storage, mock_makedirs, mock_symlink):
self.assertEqual(dispvm.name, "disp42")
self.assertEqual(dispvm.template, self.appvm)
self.assertEqual(dispvm.label, self.appvm.label)
- self.assertEqual(dispvm.label, self.appvm.label)
self.assertEqual(dispvm.auto_cleanup, True)
mock_makedirs.assert_called_once_with(
"/var/lib/qubes/appvms/" + dispvm.name, mode=0o775, exist_ok=True
)
mock_symlink.assert_not_called()
+ @mock.patch("qubes.storage.Storage")
+ def test_000_from_appvm_preload_reject_max(self, mock_storage):
+ mock_storage.return_value.create.side_effect = self.mock_coro
+ self.appvm.template_for_dispvms = True
+ orig_getitem = self.app.domains.__getitem__
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ self.appvm.features["preload-dispvm-max"] = "0"
+ with mock.patch.object(
+ self.app, "domains", wraps=self.app.domains
+ ) as mock_domains:
+ mock_domains.configure_mock(
+ **{
+ "get_new_unused_dispid": mock.Mock(return_value=42),
+ "__getitem__.side_effect": orig_getitem,
+ }
+ )
+ self.loop.run_until_complete(
+ qubes.vm.dispvm.DispVM.from_appvm(self.appvm, preload=True)
+ )
+ mock_domains.get_new_unused_dispid.assert_not_called()
+
+ @mock.patch("qubes.vm.qubesvm.QubesVM.start")
+ @mock.patch("os.symlink")
+ @mock.patch("os.makedirs")
+ @mock.patch("qubes.storage.Storage")
+ def test_000_from_appvm_preload_use(
+ self,
+ mock_storage,
+ mock_makedirs,
+ mock_symlink,
+ mock_start,
+ ):
+ mock_storage.return_value.create.side_effect = self.mock_coro
+ mock_start.side_effect = self.mock_coro
+ self.appvm.template_for_dispvms = True
+
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ self.appvm.features["preload-dispvm-max"] = "1"
+ orig_getitem = self.app.domains.__getitem__
+ with mock.patch.object(
+ self.app, "domains", wraps=self.app.domains
+ ) as mock_domains:
+ mock_qube = mock.Mock()
+ mock_qube.template = self.appvm
+ mock_qube.qrexec_timeout = self.appvm.qrexec_timeout
+ mock_qube.preload_complete = mock.Mock(spec=asyncio.Event)
+ mock_qube.preload_complete.is_set.return_value = True
+ mock_qube.preload_complete.set = self.mock_coro
+ mock_qube.preload_complete.clear = self.mock_coro
+ mock_qube.preload_complete.wait = self.mock_coro
+ mock_domains.configure_mock(
+ **{
+ "get_new_unused_dispid": mock.Mock(return_value=42),
+ "__contains__.return_value": True,
+ "__getitem__.side_effect": lambda key: (
+ mock_qube if key == "disp42" else orig_getitem(key)
+ ),
+ }
+ )
+ dispvm = self.loop.run_until_complete(
+ qubes.vm.dispvm.DispVM.from_appvm(self.appvm, preload=True)
+ )
+ self.assertEqual(self.appvm.get_feat_preload(), ["disp42"])
+ self.assertTrue(dispvm.is_preload)
+ self.assertTrue(dispvm.features.get("internal", False))
+ self.assertEqual(dispvm.name, "disp42")
+ self.assertEqual(dispvm.template, self.appvm)
+ self.assertEqual(dispvm.label, self.appvm.label)
+ self.assertEqual(dispvm.auto_cleanup, True)
+ mock_qube.name = dispvm.name
+ mock_qube.features = dispvm.features
+ mock_qube.unpause = self.mock_coro
+ fresh_dispvm = self.loop.run_until_complete(
+ qubes.vm.dispvm.DispVM.from_appvm(self.appvm)
+ )
+ mock_domains.get_new_unused_dispid.assert_called_once_with()
+ mock_start.assert_called_once_with()
+ mock_makedirs.assert_called_once_with(
+ "/var/lib/qubes/appvms/" + dispvm.name, mode=0o775, exist_ok=True
+ )
+ mock_symlink.assert_not_called()
+ # Marking as preloaded is done on integration tests, checking if we
+ # can use the same qube that was preloaded is enough for unit tests.
+ self.assertEqual(dispvm.name, fresh_dispvm.name)
+
def test_001_from_appvm_reject_not_allowed(self):
with self.assertRaises(qubes.exc.QubesException):
dispvm = self.loop.run_until_complete(
@@ -287,7 +390,8 @@ def test_020_copy_storage_pool(self, mock_makedirs, mock_symlink):
dispvm.volumes["root"].pool, self.appvm.volumes["root"].pool
)
self.assertIs(
- dispvm.volumes["volatile"].pool, self.appvm.volumes["volatile"].pool
+ dispvm.volumes["volatile"].pool,
+ self.appvm.volumes["volatile"].pool,
)
self.assertFalse(dispvm.volumes["volatile"].ephemeral)
@@ -336,7 +440,8 @@ def test_021_storage_template_change(self):
self.appvm.volume_config["root"]["source"],
)
self.assertIs(
- vm.volume_config["private"]["source"], self.appvm.volumes["private"]
+ vm.volume_config["private"]["source"],
+ self.appvm.volumes["private"],
)
def test_022_storage_app_change(self):
@@ -394,7 +499,8 @@ def test_022_storage_app_change(self):
self.appvm.volumes["root"].source,
)
self.assertNotEqual(
- vm.volume_config["private"]["source"], self.appvm.volumes["private"]
+ vm.volume_config["private"]["source"],
+ self.appvm.volumes["private"],
)
self.assertIs(
vm.volume_config["root"]["source"], template2.volumes["root"]
diff --git a/qubes/tests/vm/mix/dvmtemplate.py b/qubes/tests/vm/mix/dvmtemplate.py
new file mode 100755
index 000000000..652fa100b
--- /dev/null
+++ b/qubes/tests/vm/mix/dvmtemplate.py
@@ -0,0 +1,208 @@
+# pylint: disable=protected-access
+
+#
+# The Qubes OS Project, https://www.qubes-os.org/
+#
+# Copyright (C) 2025 Benjamin Grande M. S.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see .
+#
+
+from unittest import mock
+
+import qubes
+import qubes.vm.qubesvm
+
+import qubes.tests
+import qubes.tests.vm
+import qubes.tests.vm.appvm
+import qubes.tests.vm.qubesvm
+import qubes.vm.mix.dvmtemplate
+
+
+class TestApp(qubes.tests.vm.TestApp):
+ def __init__(self):
+ super(TestApp, self).__init__()
+ self.qid_counter = 1
+
+ def add_new_vm(self, cls, **kwargs):
+ qid = self.qid_counter
+ self.qid_counter += 1
+ vm = cls(self, None, qid=qid, **kwargs)
+ self.domains[vm.name] = vm
+ self.domains[vm] = vm
+ return vm
+
+
+class TC_00_DVMTemplateMixin(
+ qubes.tests.vm.qubesvm.QubesVMTestsMixin,
+ qubes.tests.QubesTestCase,
+):
+ def setUp(self):
+ super(TC_00_DVMTemplateMixin, self).setUp()
+ self.app = TestApp()
+ self.app.save = mock.Mock()
+ self.app.pools["default"] = qubes.tests.vm.appvm.TestPool(
+ name="default"
+ )
+ self.app.pools["linux-kernel"] = qubes.tests.vm.appvm.TestPool(
+ name="linux-kernel"
+ )
+ self.app.vmm.offline_mode = True
+ self.template = self.app.add_new_vm(
+ qubes.vm.templatevm.TemplateVM, name="test-template", label="red"
+ )
+ self.appvm = self.app.add_new_vm(
+ qubes.vm.appvm.AppVM,
+ name="test-vm",
+ template=self.template,
+ label="red",
+ )
+ self.appvm.template_for_dispvms = True
+ self.app.domains[self.appvm.name] = self.appvm
+ self.app.domains[self.appvm] = self.appvm
+ self.addCleanup(self.cleanup_dispvm)
+ self.emitter = qubes.tests.TestEmitter()
+
+ def tearDown(self):
+ del self.emitter
+ super(TC_00_DVMTemplateMixin, self).tearDown()
+
+ def cleanup_dispvm(self):
+ if hasattr(self, "dispvm"):
+ self.dispvm.close()
+ del self.dispvm
+ self.template.close()
+ self.appvm.close()
+ del self.template
+ del self.appvm
+ self.app.domains.clear()
+ self.app.pools.clear()
+
+ async def mock_coro(self, *args, **kwargs):
+ pass
+
+ def test_010_dvm_preload_get_max(self):
+ self.appvm.features["qrexec"] = True
+ self.appvm.features["gui"] = False
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ cases = [
+ (None, 0),
+ (False, 0),
+ ("0", 0),
+ ("2", 2),
+ ("10000", 10000),
+ ]
+ self.assertEqual(self.appvm.get_feat_preload_max(), 0)
+ for value, expected_value in cases:
+ with self.subTest(value=value, expected_value=expected_value):
+ self.appvm.features["preload-dispvm-max"] = value
+ self.assertEqual(
+ self.appvm.get_feat_preload_max(), expected_value
+ )
+
+ self.appvm.features["qrexec"] = False
+ with self.assertRaises(qubes.exc.QubesValueError):
+ self.appvm.features["preload-dispvm-max"] = "1"
+ self.appvm.features["qrexec"] = True
+ self.appvm.features["gui"] = False
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = False
+ with self.assertRaises(qubes.exc.QubesValueError):
+ self.appvm.features["preload-dispvm-max"] = "1"
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ self.appvm.features["preload-dispvm-max"] = "1"
+ cases_invalid = ["a", "-1", "1 1"]
+ for value in cases_invalid:
+ with self.subTest(value=value):
+ with self.assertRaises(qubes.exc.QubesValueError):
+ self.appvm.features["preload-dispvm-max"] = value
+
+ @mock.patch("os.symlink")
+ @mock.patch("os.makedirs")
+ @mock.patch("qubes.storage.Storage")
+ def test_010_dvm_preload_get_list(
+ self, mock_storage, mock_makedirs, mock_symlink
+ ):
+ mock_storage.return_value.create.side_effect = self.mock_coro
+ mock_makedirs.return_value.create.side_effect = self.mock_coro
+ mock_symlink.return_value.create.side_effect = self.mock_coro
+ self.appvm.features["qrexec"] = True
+ self.appvm.features["gui"] = False
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ self.assertEqual(self.appvm.get_feat_preload(), [])
+ orig_getitem = self.app.domains.__getitem__
+ with mock.patch.object(
+ self.app, "domains", wraps=self.app.domains
+ ) as mock_domains:
+ mock_qube = mock.Mock()
+ mock_qube.template = self.appvm
+ mock_domains.configure_mock(
+ **{
+ "get_new_unused_dispid": mock.Mock(return_value=42),
+ "__contains__.return_value": True,
+ "__getitem__.side_effect": lambda key: (
+ mock_qube if key == "disp42" else orig_getitem(key)
+ ),
+ }
+ )
+ self.appvm.features["preload-dispvm-max"] = "0"
+ dispvm = self.loop.run_until_complete(
+ qubes.vm.dispvm.DispVM.from_appvm(self.appvm)
+ )
+ with self.assertRaises(qubes.exc.QubesValueError):
+ # over max
+ self.appvm.features["preload-dispvm"] = f"{dispvm.name}"
+ self.appvm.features["preload-dispvm-max"] = "2"
+ cases_invalid = [
+ f"{self.appvm}", # not derived from wanted appvm
+ f"{dispvm.name} {dispvm.name}", # duplicate
+ ]
+ for value in cases_invalid:
+ with self.subTest(value=value):
+ with self.assertRaises(qubes.exc.QubesValueError):
+ self.appvm.features["preload-dispvm"] = value
+
+ cases = [
+ (None, []),
+ (False, []),
+ ("", []),
+ (f"{dispvm.name}", [dispvm.name]),
+ ]
+ for value, expected_value in cases:
+ with self.subTest(value=value, expected_value=expected_value):
+ self.appvm.features["preload-dispvm"] = value
+ self.assertEqual(
+ self.appvm.get_feat_preload(), expected_value
+ )
+
+ def test_010_dvm_preload_can(self):
+ self.appvm.features["qrexec"] = True
+ self.appvm.features["gui"] = False
+ self.appvm.features["supported-rpc.qubes.WaitForRunningSystem"] = True
+ self.assertFalse(self.appvm.can_preload())
+ self.appvm.features["preload-dispvm-max"] = 1
+ cases = [
+ ("", "", False),
+ (0, "", False),
+ (1, "", True),
+ ]
+ for preload_max, preload_list, expected_value in cases:
+ with self.subTest(
+ preload_max=preload_max,
+ preload_list=preload_list,
+ expected_value=expected_value,
+ ):
+ self.appvm.features["preload-dispvm-max"] = preload_max
+ self.appvm.features["preload-dispvm"] = preload_list
+ self.assertEqual(self.appvm.can_preload(), expected_value)
diff --git a/qubes/tests/vm/qubesvm.py b/qubes/tests/vm/qubesvm.py
index 886c940fe..11e6fa2e0 100644
--- a/qubes/tests/vm/qubesvm.py
+++ b/qubes/tests/vm/qubesvm.py
@@ -2381,6 +2381,7 @@ def test_620_qdb_standalone(
"/qubes-vm-updateable": "True",
"/qubes-block-devices": "",
"/qubes-usb-devices": "",
+ "/qubes-gui-enabled": "False",
"/qubes-iptables": "reload",
"/qubes-iptables-error": "",
"/qubes-iptables-header": iptables_header,
@@ -2476,6 +2477,7 @@ def test_621_qdb_vm_with_network(
"/qubes-ip": "10.137.0.3",
"/qubes-netmask": "255.255.255.255",
"/qubes-gateway": "10.137.0.2",
+ "/qubes-gui-enabled": "False",
"/qubes-primary-dns": "10.139.1.1",
"/qubes-secondary-dns": "10.139.1.2",
"/connected-ips": "",
@@ -2604,6 +2606,7 @@ def test_622_qdb_guivm_keyboard_layout(
"/default-user": "user",
"/keyboard-layout": "fr++",
"/qubes-vm-type": "AppVM",
+ "/qubes-gui-enabled": "True",
"/qubes-gui-domain-xid": "{}".format(guivm.xid),
"/qubes-debug-mode": "0",
"/qubes-base-template": "test-inst-template",
@@ -2664,6 +2667,7 @@ def test_623_qdb_audiovm(self, mock_qubesdb, mock_urandom, mock_timezone):
"/qubes-vm-type": "AppVM",
"/qubes-audio-domain-xid": "{}".format(audiovm.xid),
"/qubes-debug-mode": "0",
+ "/qubes-gui-enabled": "False",
"/qubes-base-template": "test-inst-template",
"/qubes-timezone": "UTC",
"/qubes-random-seed": base64.b64encode(b"A" * 64),
@@ -2733,6 +2737,7 @@ def test_624_qdb_audiovm_change_to_new_and_none(
"/qubes-vm-type": "AppVM",
"/qubes-audio-domain-xid": "{}".format(audiovm.xid),
"/qubes-debug-mode": "0",
+ "/qubes-gui-enabled": "False",
"/qubes-base-template": "test-inst-template",
"/qubes-timezone": "UTC",
"/qubes-random-seed": base64.b64encode(b"A" * 64),
@@ -2836,6 +2841,7 @@ def test_626_qdb_keyboard_layout_change(
"/default-user": "user",
"/keyboard-layout": "fr++",
"/qubes-vm-type": "AppVM",
+ "/qubes-gui-enabled": "True",
"/qubes-gui-domain-xid": "{}".format(guivm.xid),
"/qubes-debug-mode": "0",
"/qubes-base-template": "test-inst-template",
diff --git a/qubes/vm/dispvm.py b/qubes/vm/dispvm.py
index 189373cb0..4a388e9e5 100644
--- a/qubes/vm/dispvm.py
+++ b/qubes/vm/dispvm.py
@@ -18,13 +18,15 @@
# License along with this library; if not, see .
#
-""" A disposable vm implementation """
+"""A disposable vm implementation"""
+import asyncio
import copy
+import subprocess
-import qubes.vm.qubesvm
-import qubes.vm.appvm
import qubes.config
+import qubes.vm.appvm
+import qubes.vm.qubesvm
def _setter_template(self, prop, value):
@@ -38,8 +40,111 @@ def _setter_template(self, prop, value):
return value
+def get_preload_templates(domains) -> list:
+ return [
+ qube
+ for qube in domains
+ if int(qube.features.get("preload-dispvm-max", 0) or 0) > 0
+ and qube.klass == "AppVM"
+ and getattr(qube, "template_for_dispvms", False)
+ ]
+
+
class DispVM(qubes.vm.qubesvm.QubesVM):
- """Disposable VM"""
+ """Disposable VM
+
+ Preloading
+ ----------
+ Preloaded disposables are started in the background and kept hidden from the
+ user when not in use. They are interrupted (paused or suspended, as
+ appropriate) and resumed (transparently) when a disposable qube is requested
+ by the user.
+
+ **Goals**:
+
+ - **Fast**: Usage must be always instantaneous from user perspective when
+ requesting the use of disposables. Pause/suspend must be skipped if qube
+ is requested before the interrupt can be performed.
+
+ - **Easy-to-use**: Preloading requires a single qube feature
+ (*preload-dispvm-max*), and its use must be transparent, indistinguishable
+ from working with normal (non-preloaded) unnamed disposable qubes.
+
+ - **Reliable**:
+
+ - Avoid race conditions: Marking a qube as preloaded or marking the
+ preloaded as used must be synchronous.
+
+ - Recovery from failed or incomplete preload: The system must attempt to
+ preload qubes even if previous preloading attempts failed due to errors,
+ qubesd restart or lack of available memory, regardless of whether
+ preloaded disposable qubes have been requested on this instance. If
+ current qube list is invalid, it must be cleaned up before being used.
+
+ - Avoid copy of invalid attributes: Qube operation (in particular cloning,
+ renaming or creating a standalone based on a template) must not result
+ in properties that are invalid on the target.
+
+ - Full start: Preloaded disposable must only be interrupted
+ (paused/suspended) or used after all basic services in it have been
+ started. Autostarted applications allows user interaction before the it
+ should, that is a bug.
+
+ - **Prevents accidental tampering**:
+
+ - Preloaded qubes have the *internal* feature set when they are created.
+ This feature hides the qube from GUI tools and discourages user
+ tampering. It is unset when the qube is marked as used. Remember to
+ validate if all GUI applications correctly react to setting and removing
+ the *internal* feature (optionally, the *is_preload* property can be
+ helpful). GUI applications may react to *domain-add* before the
+ *internal* feature is set and the qube entry may briefly appear on some
+ GUI applications, that is a bug because features cannot be set before
+ that event.
+
+ - Preloaded qubes must be marked as used when prior to being
+ unpaused/resumed, even if it was not requested. The goal of
+ pause/suspend in case of preloaded disposables is mostly detecting
+ whether a qube was used or not, and not managing resource consumption;
+ thus, even with abundant system resources, they should not be
+ unpaused/resumed without being requested.
+
+ **Features and properties relationship on stages**:
+
+ - Properties indicate the runtime stage of preloaded qubes and intentionally
+ lost on qubesd restart.
+ - Features indicate that a preloaded qube has reached certain stage at any
+ qubesd cycle.
+ - Comparing the value of certain features and properties can indicate that
+ there were qubes being preloaded or requested but qubesd restarted between
+ the stages, interrupting the process. The only stage that should conserve
+ the preloaded qubes is a qubes that has completed preloading but has not
+ been requested.
+
+ **Stages**:
+
+ - **Preload**: The qube is created and marked as preloaded. Qube is not
+ visible in GUI applications.
+
+ - **Startup**: Begins qube startup, start basic services in it and attempt
+ to interrupt (suspend/pause).
+
+ - **Request**: The qube is removed from the preload list. If *startup* has
+ not yet reached interrupt, the latter is skipped.
+
+ - **Used**: The qube is marked as used and may be unpaused/resumed (if
+ applicable). Only in this phase, GUI applications treat the qube as any
+ other unnamed disposable and the qube object is returned to the caller if
+ requested.
+
+ **Outstanding bugs**:
+
+ - GUI applications set to autostart can appear on the screen and be
+ interactive for a brief moment before the qube is allowed to be used
+ followed by a sudden freeze.
+ - Can't interrupt qubes before the GUI session has started if the qube's
+ usage will require a GUI (GUI daemon cannot handle an interrupted qube).
+ """
template = qubes.VMProperty(
"template",
@@ -112,6 +217,7 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
def __init__(self, app, xml, *args, **kwargs):
self.volume_config = copy.deepcopy(self.default_volume_config)
template = kwargs.get("template", None)
+ self.preload_complete = asyncio.Event()
if xml is None:
assert template is not None
@@ -184,14 +290,130 @@ def __init__(self, app, xml, *args, **kwargs):
)
self.firewall.clone(template.firewall)
- self.features.update(template.features)
+ self.features.update(
+ [
+ (key, value)
+ for key, value in template.features.items()
+ if not key.startswith("preload-dispvm")
+ ]
+ )
self.tags.update(template.tags)
+ @property
+ def preload_requested(self):
+ if not hasattr(self, "_preload_requested"):
+ return None
+ return self._preload_requested
+
+ @preload_requested.setter
+ def preload_requested(self, value):
+ self._preload_requested = value
+ self.fire_event("property-reset:is_preload", name="is_preload")
+
+ @preload_requested.deleter
+ def preload_requested(self):
+ del self._preload_requested
+ self.fire_event("property-reset:is_preload", name="is_preload")
+
+ @qubes.stateless_property
+ def is_preload(self) -> bool:
+ """Returns True if qube is a preloaded disposable."""
+ appvm = self.template
+ preload_dispvm = appvm.get_feat_preload()
+ if self.name in preload_dispvm or self.preload_requested:
+ return True
+ return False
+
@qubes.events.handler("domain-load")
def on_domain_loaded(self, event):
"""When domain is loaded assert that this vm has a template.""" # pylint: disable=unused-argument
assert self.template
+ @qubes.events.handler("domain-start")
+ async def on_domain_started_dispvm(
+ self,
+ event,
+ **kwargs,
+ ): # pylint: disable=unused-argument
+ """
+ Awaits for basic services to be started on preloaded domains and
+ interrupts the domain if the qube has not been requested yet.
+ """
+ if not self.is_preload:
+ return
+ # TODO: pause is late for autostarted GUI applications
+ # https://github.com/QubesOS/qubes-issues/issues/9907
+ timeout = self.qrexec_timeout
+ gui = bool(self.guivm and self.features.get("gui", True))
+ service = "qubes.WaitForSession"
+ if not gui:
+ # https://github.com/QubesOS/qubes-issues/issues/9964
+ # service = "qubes.WaitForRunningSystem"
+ rpc = "qubes.WaitForRunningSystem"
+ path = "/run/qubes-rpc:/usr/local/etc/qubes-rpc:/etc/qubes-rpc"
+ service = '$(PATH="' + path + '" command -v ' + rpc + ")"
+ try:
+ self.log.info(
+ "Preload startup waiting '%s' with '%d' seconds timeout",
+ service,
+ timeout,
+ )
+ runner = self.run_service_for_stdio if gui else self.run_for_stdio
+ await asyncio.wait_for(
+ runner(
+ service,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ ),
+ timeout=timeout,
+ )
+ except asyncio.TimeoutError:
+ # TODO: if pause occurs before the GUI session starts (on boot
+ # before login manager), results in an unusable GUI for the qube:
+ # https://github.com/QubesOS/qubes-issues/issues/9940
+ raise qubes.exc.QubesException(
+ "Timed out Qrexec call to '%s' after '%d' seconds during "
+ "preload startup" % (service, timeout)
+ )
+ except (subprocess.CalledProcessError, qubes.exc.QubesException):
+ raise qubes.exc.QubesException(
+ "Error on Qrexec call to '%s' during preload startup" % service
+ )
+
+ if not self.preload_requested:
+ await self.pause()
+ self.log.info("Preloading finished")
+ self.features["preload-dispvm-completed"] = True
+ if not self.preload_requested:
+ self.features["preload-dispvm-in-progress"] = False
+ self.app.save()
+ self.preload_complete.set()
+
+ @qubes.events.handler("domain-paused")
+ def on_domain_paused(
+ self, event, **kwargs
+ ): # pylint: disable=unused-argument
+ """Log preloaded domains when paused."""
+ if self.is_preload:
+ self.log.info("Paused preloaded qube")
+
+ @qubes.events.handler("domain-pre-unpaused")
+ def on_domain_pre_unpaused(
+ self, event, **kwargs
+ ): # pylint: disable=unused-argument
+ """Mark preloaded domains as used before being unpaused."""
+ # Qube start triggers unpause via 'libvirt_domain.resume()'.
+ if self.is_preload and self.is_fully_usable():
+ self.log.info("Unpaused preloaded qube will be marked as used")
+ self.use_preload()
+
+ @qubes.events.handler("domain-shutdown")
+ async def on_domain_shutdown(
+ self, _event, **_kwargs
+ ): # pylint: disable=invalid-overridden-method
+ """Do auto cleanup if enabled"""
+ await self._auto_cleanup()
+
@qubes.events.handler("property-pre-reset:template")
def on_property_pre_reset_template(self, event, name, oldvalue=None):
"""Forbid deleting template of VM""" # pylint: disable=unused-argument
@@ -214,25 +436,13 @@ def on_property_set_template(self, event, name, newvalue, oldvalue=None):
""" # pylint: disable=unused-argument
qubes.vm.appvm.template_changed_update_storage(self)
- @qubes.events.handler("domain-shutdown")
- async def on_domain_shutdown(
- self, _event, **_kwargs
- ): # pylint: disable=invalid-overridden-method
- await self._auto_cleanup()
-
- async def _auto_cleanup(self):
- """Do auto cleanup if enabled"""
- if self.auto_cleanup and self in self.app.domains:
- del self.app.domains[self]
- await self.remove_from_disk()
- self.app.save()
-
@classmethod
- async def from_appvm(cls, appvm, **kwargs):
+ async def from_appvm(cls, appvm, preload=False, **kwargs):
"""Create a new instance from given AppVM
:param qubes.vm.appvm.AppVM appvm: template from which the VM should \
be created
+ :param bool preload: Whether to preload a disposable
:returns: new disposable vm
*kwargs* are passed to the newly created VM
@@ -243,7 +453,8 @@ async def from_appvm(cls, appvm, **kwargs):
>>> dispvm.cleanup()
This method modifies :file:`qubes.xml` file.
- The qube returned is not started.
+ The qube returned is not started unless the ``preload`` argument is
+ ``True``.
"""
if not getattr(appvm, "template_for_dispvms", False):
raise qubes.exc.QubesException(
@@ -251,32 +462,142 @@ async def from_appvm(cls, appvm, **kwargs):
"template_for_dispvms=False"
)
app = appvm.app
+
+ if preload and not appvm.can_preload():
+ # Using an exception clutters the log when 'used' event is
+ # simultaneously called.
+ appvm.log.warning(
+ "Failed to create preloaded disposable, limit reached"
+ )
+ return
+
+ if not preload and appvm.can_preload():
+ # Not necessary to await for this event as its intent is to fill
+ # gaps and not relevant for this run.
+ asyncio.ensure_future(
+ appvm.fire_event_async("domain-preload-dispvm-start")
+ )
+
+ if not preload and (preload_dispvm := appvm.get_feat_preload()):
+ dispvm = app.domains[preload_dispvm[0]]
+ dispvm.log.info("Requesting preloaded qube")
+ # The property "preload_requested" offloads "preload-dispvm" and
+ # thus avoids various race condition:
+ # - Decreasing maximum feature will not remove the qube;
+ # - Another request to this function will not return the same qube.
+ dispvm.features["preload-dispvm-in-progress"] = True
+ appvm.remove_preload_from_list([dispvm.name])
+ dispvm.preload_requested = True
+ app.save()
+ timeout = int(dispvm.qrexec_timeout * 1.2)
+ try:
+ if not dispvm.features.get("preload-dispvm-completed", False):
+ dispvm.log.info(
+ "Waiting preload completion with '%s' seconds timeout",
+ timeout,
+ )
+ async with asyncio.timeout(timeout):
+ await dispvm.preload_complete.wait()
+ if dispvm.is_paused():
+ await dispvm.unpause()
+ else:
+ dispvm.use_preload()
+ app.save()
+ return dispvm
+ except asyncio.TimeoutError:
+ dispvm.log.warning(
+ "Requested preloaded qube but failed to finish preloading "
+ "after '%d' seconds, falling back to normal disposable",
+ int(timeout),
+ )
+ asyncio.ensure_future(dispvm.cleanup())
+
dispvm = app.add_new_vm(
cls, template=appvm, auto_cleanup=True, **kwargs
)
+
+ if preload:
+ dispvm.log.info("Marking preloaded qube")
+ dispvm.features["preload-dispvm-in-progress"] = True
+ preload_dispvm = appvm.get_feat_preload()
+ preload_dispvm.append(dispvm.name)
+ appvm.features["preload-dispvm"] = " ".join(preload_dispvm or [])
+ dispvm.features["internal"] = True
+ app.save()
await dispvm.create_on_disk()
+ if preload:
+ await dispvm.start()
app.save()
return dispvm
+ def use_preload(self):
+ """
+ Marks preloaded DispVM as used (tainted).
+
+ :return:
+ """
+ if not self.is_preload:
+ raise qubes.exc.QubesException("DispVM is not preloaded")
+ appvm = self.template
+ if self.preload_requested:
+ self.log.info("Using preloaded qube")
+ if not appvm.features.get("internal", None):
+ del self.features["internal"]
+ self.preload_requested = None
+ del self.features["preload-dispvm-in-progress"]
+ else:
+ # Happens when unpause/resume occurs without qube being requested.
+ self.log.warning("Using a preloaded qube before requesting it")
+ if not appvm.features.get("internal", None):
+ del self.features["internal"]
+ appvm.remove_preload_from_list([self.name])
+ self.features["preload-dispvm-in-progress"] = False
+ self.app.save()
+ asyncio.ensure_future(
+ appvm.fire_event_async("domain-preload-dispvm-used", dispvm=self)
+ )
+
+ async def _bare_cleanup(self):
+ """Cleanup bare DispVM objects."""
+ if self in self.app.domains:
+ del self.app.domains[self]
+ await self.remove_from_disk()
+ self.app.save()
+
+ def _preload_cleanup(self):
+ """Cleanup preload from list"""
+ if self.name in self.template.get_feat_preload():
+ self.log.info("Automatic cleanup removes qube from preload list")
+ self.template.remove_preload_from_list([self.name])
+
async def cleanup(self):
"""Clean up after the DispVM
This stops the disposable qube and removes it from the store.
This method modifies :file:`qubes.xml` file.
"""
+ if self not in self.app.domains:
+ return
try:
await self.kill()
except qubes.exc.QubesVMNotStartedError:
pass
- # if auto_cleanup is set, this will be done automatically
+ # This will be done automatically if event 'domain-shutdown' is
+ # triggered and 'auto_cleanup' evaluates to 'True'.
if not self.auto_cleanup:
- del self.app.domains[self]
- await self.remove_from_disk()
- self.app.save()
+ self._preload_cleanup()
+ if self in self.app.domains:
+ await self._bare_cleanup()
+
+ async def _auto_cleanup(self):
+ """Do auto cleanup if enabled"""
+ if self.auto_cleanup:
+ self._preload_cleanup()
+ if self in self.app.domains:
+ await self._bare_cleanup()
async def start(self, **kwargs):
# pylint: disable=arguments-differ
-
try:
# sanity check, if template_for_dispvm got changed in the meantime
if not self.template.template_for_dispvms:
@@ -284,10 +605,13 @@ async def start(self, **kwargs):
"template for DispVM ({}) needs to have "
"template_for_dispvms=True".format(self.template.name)
)
-
await super().start(**kwargs)
except:
# Cleanup also on failed startup
+ try:
+ await self.kill()
+ except qubes.exc.QubesVMNotStartedError:
+ pass
await self._auto_cleanup()
raise
diff --git a/qubes/vm/mix/dvmtemplate.py b/qubes/vm/mix/dvmtemplate.py
index 6f8330b6d..c6cc1f908 100644
--- a/qubes/vm/mix/dvmtemplate.py
+++ b/qubes/vm/mix/dvmtemplate.py
@@ -18,7 +18,12 @@
# You should have received a copy of the GNU General Public License along
# with this program; if not, see .
+import asyncio
+from typing import Optional
+
+import qubes.config
import qubes.events
+import qubes.vm.dispvm
class DVMTemplateMixin(qubes.events.Emitter):
@@ -34,6 +39,157 @@ class DVMTemplateMixin(qubes.events.Emitter):
doc="Should this VM be allowed to start as Disposable VM",
)
+ @property
+ def dispvms(self):
+ """Returns a generator containing all Disposable VMs based on the
+ current AppVM.
+ """
+ for vm in self.app.domains:
+ if getattr(vm, "template", None) == self:
+ yield vm
+
+ @qubes.events.handler("domain-load")
+ def on_domain_loaded(self, event): # pylint: disable=unused-argument
+ """Cleanup invalid preloaded qubes when domain is loaded."""
+ changes = False
+ # Preloading began and host rebooted and autostart event didn't run yet.
+ old_preload = self.get_feat_preload()
+ clean_preload = old_preload.copy()
+ for unwanted_disp in old_preload:
+ if unwanted_disp not in self.app.domains:
+ clean_preload.remove(unwanted_disp)
+ if absent := list(set(old_preload) - set(clean_preload)):
+ changes = True
+ self.log.info(
+ "Removing absent preloaded qube(s): '%s'",
+ ", ".join(absent),
+ )
+ self.features["preload-dispvm"] = " ".join(clean_preload or [])
+
+ # Preloading was in progress (either preloading but not completed or
+ # requested but not delivered) and qubesd stopped.
+ preload_in_progress = [
+ qube
+ for qube in self.dispvms
+ if qube.features.get("preload-dispvm-in-progress", False)
+ ]
+ if preload_in_progress:
+ changes = True
+ self.log.info(
+ "Removing in progress preloaded qube(s): '%s'",
+ ", ".join(map(str, preload_in_progress)),
+ )
+ self.remove_preload_from_list(
+ [qube.name for qube in preload_in_progress]
+ )
+ for dispvm in preload_in_progress:
+ asyncio.ensure_future(dispvm.cleanup())
+ if changes:
+ self.app.save()
+
+ @qubes.events.handler("domain-feature-delete:preload-dispvm-max")
+ def on_feature_delete_preload_dispvm_max(
+ self, event, feature
+ ): # pylint: disable=unused-argument
+ self.remove_preload_excess(0)
+
+ @qubes.events.handler("domain-feature-pre-set:preload-dispvm-max")
+ def on_feature_pre_set_preload_dispvm_max(
+ self, event, feature, value, oldvalue=None
+ ): # pylint: disable=unused-argument
+ if not self.features.check_with_template("qrexec", None):
+ raise qubes.exc.QubesValueError("Qube does not support qrexec")
+
+ gui = bool(self.guivm and self.features.get("gui", True))
+ if gui:
+ service = "qubes.WaitForSession"
+ else:
+ service = "qubes.WaitForRunningSystem"
+ supported_service = "supported-rpc." + service
+ if not self.features.check_with_template(supported_service, False):
+ raise qubes.exc.QubesValueError(
+ "Qube GUI is '%s' and does not support the RPC '%s'"
+ % (gui, service)
+ )
+
+ value = value or "0"
+ if not value.isdigit():
+ raise qubes.exc.QubesValueError(
+ "Invalid preload-dispvm-max value: not a digit"
+ )
+
+ @qubes.events.handler("domain-feature-set:preload-dispvm-max")
+ def on_feature_set_preload_dispvm_max(
+ self, event, feature, value, oldvalue=None
+ ): # pylint: disable=unused-argument
+ asyncio.ensure_future(
+ self.fire_event_async("domain-preload-dispvm-start")
+ )
+
+ @qubes.events.handler("domain-feature-pre-set:preload-dispvm")
+ def on_feature_pre_set_preload_dispvm(
+ self, event, feature, value, oldvalue=None
+ ): # pylint: disable=unused-argument
+ preload_dispvm_max = self.get_feat_preload_max()
+ old_list = oldvalue.split(" ") if oldvalue else []
+ new_list = value.split(" ") if value else []
+ old_len, new_len = len(old_list), len(new_list)
+ error_prefix = "Invalid preload-dispvm value:"
+
+ if sorted(new_list) == sorted(old_list):
+ return
+ if not new_list:
+ return
+
+ # New value can be bigger than maximum permitted as long as it is
+ # smaller than its old value.
+ if new_len > max(preload_dispvm_max, old_len):
+ raise qubes.exc.QubesValueError(
+ f"{error_prefix} can't increment: qube count ({new_len}) is "
+ f"either bigger than old count ({old_len}) or "
+ f"preload-dispvm-max ({preload_dispvm_max})"
+ )
+
+ if new_len != len(set(new_list)):
+ duplicates = [
+ qube for qube in set(new_list) if new_list.count(qube) > 1
+ ]
+ raise qubes.exc.QubesValueError(
+ f"{error_prefix} contain duplicates: '{', '.join(duplicates)}'"
+ )
+
+ new_list_diff = list(set(new_list) - set(old_list))
+ nonqube = [
+ qube for qube in new_list_diff if qube not in self.app.domains
+ ]
+ if nonqube:
+ raise qubes.exc.QubesValueError(
+ f"{error_prefix} non qube(s): '{', '.join(nonqube)}'"
+ )
+
+ nonderived = [
+ qube
+ for qube in new_list_diff
+ if getattr(self.app.domains[qube], "template") != self
+ ]
+ if nonderived:
+ raise qubes.exc.QubesValueError(
+ f"{error_prefix} qube(s) not based on {self.name}: "
+ f"'{', '.join(nonderived)}'"
+ )
+
+ @qubes.events.handler("domain-feature-set:preload-dispvm")
+ def on_feature_set_preload_dispvm(
+ self, event, feature, value, oldvalue=None
+ ): # pylint: disable=unused-argument
+ value = value.split(" ") if value else []
+ oldvalue = oldvalue.split(" ") if oldvalue else []
+ exclusive = list(set(oldvalue).symmetric_difference(value))
+ for qube in exclusive:
+ if qube in self.app.domains:
+ qube = self.app.domains[qube]
+ qube.fire_event("property-reset:is_preload", name="is_preload")
+
@qubes.events.handler("property-pre-set:template_for_dispvms")
def __on_pre_set_dvmtemplate(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
@@ -68,11 +224,123 @@ def __on_property_set_template(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
pass
- @property
- def dispvms(self):
- """Returns a generator containing all Disposable VMs based on the
- current AppVM.
+ @qubes.events.handler(
+ "domain-preload-dispvm-used",
+ "domain-preload-dispvm-autostart",
+ "domain-preload-dispvm-start",
+ )
+ async def on_domain_preload_dispvm_used(
+ self, event, **kwargs
+ ): # pylint: disable=unused-argument
"""
- for vm in self.app.domains:
- if getattr(vm, "template", None) == self:
- yield vm
+ Preloads on vacancy and offloads on excess. If the event suffix is
+ ``autostart``, the preloaded list is emptied before preloading.
+
+ :param event: event which was fired
+ """
+ event = event.removeprefix("domain-preload-dispvm-")
+ event_log = "Received preload event '%s'" % str(event)
+ if event == "used":
+ event_log += " for dispvm '%s'" % str(kwargs.get("dispvm"))
+ self.log.info(event_log)
+
+ if event == "autostart":
+ self.remove_preload_excess(0)
+ elif not self.can_preload():
+ self.remove_preload_excess()
+ # Absent qubes might be removed above.
+ if not self.can_preload():
+ return
+ max_preload = self.get_feat_preload_max()
+ want_preload = max_preload - len(self.get_feat_preload())
+ if want_preload <= 0:
+ self.log.info("Not preloading due to limit hit")
+ return
+
+ avail_mem_file = qubes.config.qmemman_avail_mem_file
+ available_memory = None
+ try:
+ with open(avail_mem_file, "r", encoding="ascii") as file:
+ available_memory = int(file.read())
+ except FileNotFoundError:
+ can_preload = want_preload
+ if available_memory is not None:
+ memory = getattr(self, "memory", 0) * 1024 * 1024
+ unrestricted_preload = int(available_memory / memory)
+ can_preload = min(unrestricted_preload, want_preload)
+ if skip_preload := want_preload - can_preload:
+ self.log.warning(
+ "Not preloading '%d' disposable(s) due to insufficient "
+ "memory",
+ skip_preload,
+ )
+ if can_preload == 0:
+ # The gap is filled when consuming a preloaded qube or
+ # requesting a disposable.
+ return
+
+ self.log.info("Preloading '%d' qube(s)", can_preload)
+ async with asyncio.TaskGroup() as task_group:
+ for _ in range(can_preload):
+ task_group.create_task(
+ qubes.vm.dispvm.DispVM.from_appvm(self, preload=True)
+ )
+
+ def get_feat_preload(self) -> list[str]:
+ """Get the ``preload-dispvm`` feature as a list."""
+ feature = "preload-dispvm"
+ assert isinstance(self, qubes.vm.BaseVM)
+ value = self.features.get(feature, "")
+ return value.split(" ") if value else []
+
+ def get_feat_preload_max(self) -> int:
+ """Get the ``preload-dispvm-max`` feature as an integer."""
+ feature = "preload-dispvm-max"
+ assert isinstance(self, qubes.vm.BaseVM)
+ value = self.features.get(feature, 0)
+ return int(value) if value else 0
+
+ def can_preload(self) -> bool:
+ """Returns ``True`` if there is preload vacancy."""
+ preload_dispvm_max = self.get_feat_preload_max()
+ preload_dispvm = self.get_feat_preload()
+ if len(preload_dispvm) < preload_dispvm_max:
+ return True
+ return False
+
+ def remove_preload_from_list(self, disposables: list[str]) -> None:
+ """Removes list of preload qubes from the list.
+
+ :param disposables: disposable names to remove from the preloaded list.
+ """
+ assert isinstance(self, qubes.vm.BaseVM)
+ old_preload = self.get_feat_preload()
+ preload_dispvm = [
+ qube for qube in old_preload if qube not in disposables
+ ]
+ if dispose := list(set(old_preload) - set(preload_dispvm)):
+ self.log.info(
+ "Removing qube(s) from preloaded list: '%s'",
+ ", ".join(dispose),
+ )
+ self.features["preload-dispvm"] = " ".join(preload_dispvm or [])
+
+ def remove_preload_excess(self, max_preload: Optional[int] = None) -> None:
+ """Removes preloaded qubes that exceeds the maximum."""
+ assert isinstance(self, qubes.vm.BaseVM)
+ if max_preload is None:
+ max_preload = self.get_feat_preload_max()
+ old_preload = self.get_feat_preload()
+ if not old_preload:
+ return
+ new_preload = old_preload[:max_preload]
+ if excess := old_preload[max_preload:]:
+ self.log.info(
+ "Removing excess qube(s) from preloaded list: '%s'",
+ ", ".join(excess),
+ )
+ self.features["preload-dispvm"] = " ".join(new_preload or [])
+ for unwanted_disp in excess:
+ if unwanted_disp in self.app.domains:
+ dispvm = self.app.domains[unwanted_disp]
+ asyncio.ensure_future(dispvm.cleanup())
diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py
index fea53b797..9e05ce37d 100644
--- a/qubes/vm/qubesvm.py
+++ b/qubes/vm/qubesvm.py
@@ -406,6 +406,16 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.LocalVM):
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-start-failed'``)
+ .. event:: domain-pre-paused (subject, event)
+
+ Fired at the beginning of :py:meth:`pause` method and before
+ ``libvirt_domain.suspend()``.
+
+ Handler for this event may be asynchronous.
+
+ :param subject: Event emitter (the qube object)
+ :param event: Event name (``'domain-pre-paused'``)
+
.. event:: domain-paused (subject, event)
Fired when the domain has been paused.
@@ -413,6 +423,16 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.LocalVM):
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-paused'``)
+ .. event:: domain-pre-unpaused (subject, event)
+
+ Fired at the beginning of :py:meth:`unpause` method and before
+ ``libvirt_domain.resume()``.
+
+ Handler for this event may be asynchronous.
+
+ :param subject: Event emitter (the qube object)
+ :param event: Event name (``'domain-pre-unpaused'``)
+
.. event:: domain-unpaused (subject, event)
Fired when the domain has been unpaused.
@@ -736,6 +756,7 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.LocalVM):
"memory",
type=int,
setter=_setter_positive_int,
+ # fmt: off
default=_default_with_template(
"memory",
lambda self: qubes.config.defaults[
@@ -746,6 +767,7 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.LocalVM):
)
],
),
+ # fmt: on
doc="Memory currently available for this VM. TemplateBasedVMs use its "
"template's value by default.",
)
@@ -1365,7 +1387,7 @@ async def start(
f"Qube start is prohibited. Rationale: {prohibit_rationale}"
)
- self.log.info("Starting {}".format(self.name))
+ self.log.info("Starting qube {}".format(self.name))
try:
await self.fire_event_async(
@@ -1484,7 +1506,7 @@ async def start(
"domain-spawn", start_guid=start_guid
)
- self.log.info("Setting Qubes DB info for the VM")
+ self.log.info("Setting Qubes DB info for the qube")
await self.start_qubesdb()
if self.untrusted_qdb is None:
# this can happen if vm.is_running() is False
@@ -1494,7 +1516,10 @@ async def start(
self.create_qdb_entries()
self.start_qdb_watch()
- self.log.warning("Activating the {} VM".format(self.name))
+ self.log.info("Activating qube")
+ await self.fire_event_async(
+ "domain-pre-unpaused", pre_event=True
+ )
self.libvirt_domain.resume()
if (
@@ -1583,9 +1608,7 @@ async def on_domain_stopped(self, _event, **_kwargs):
try:
await self.storage.stop()
except qubes.exc.StoragePoolException:
- self.log.exception(
- "Failed to stop storage for domain %s", self.name
- )
+ self.log.exception("Failed to stop storage")
self._qdb_connection = None
self.fire_event("property-reset:xid", name="xid")
self.fire_event("property-reset:stubdom_xid", name="stubdom_xid")
@@ -1678,17 +1701,15 @@ async def suspend(self):
)
except subprocess.CalledProcessError as e:
self.log.warning(
- "qubes.SuspendPre for %s failed with %d (stderr: %s), "
- "suspending anyway",
- self.name,
+ "qubes.SuspendPre failed with %d (stderr: %s), suspending "
+ "anyway",
e.returncode,
qubes.utils.sanitize_stderr_for_log(e.stderr),
)
except asyncio.TimeoutError:
self.log.warning(
- "qubes.SuspendPre for %s timed out after %d seconds, "
- "suspending anyway",
- self.name,
+ "qubes.SuspendPre timed out after %d seconds, suspending "
+ "anyway",
qubes.config.suspend_timeout,
)
try:
@@ -1698,9 +1719,10 @@ async def suspend(self):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_OPERATION_UNSUPPORTED:
# OS inside doesn't support full suspend, just pause it
+ await self.fire_event_async("domain-pre-paused", pre_event=True)
self.libvirt_domain.suspend()
else:
- self.log.warning("Failed to suspend '%s'", self.name)
+ self.log.warning("Failed to suspend qube")
raise
return self
@@ -1711,6 +1733,7 @@ async def pause(self):
if not self.is_running():
raise qubes.exc.QubesVMNotRunningError(self)
+ await self.fire_event_async("domain-pre-paused", pre_event=True)
self.libvirt_domain.suspend()
return self
@@ -1736,15 +1759,13 @@ async def resume(self):
)
except subprocess.CalledProcessError as e:
self.log.warning(
- "qubes.SuspendPost for %s failed with %d (stderr: %s)",
- self.name,
+ "qubes.SuspendPost failed with %d (stderr: %s)",
e.returncode,
qubes.utils.sanitize_stderr_for_log(e.stderr),
)
except asyncio.TimeoutError:
self.log.warning(
- "qubes.SuspendPost for %s timed out after %d seconds",
- self.name,
+ "qubes.SuspendPost timed out after %d seconds",
qubes.config.suspend_timeout,
)
else:
@@ -1757,6 +1778,7 @@ async def unpause(self):
if not self.is_paused():
raise qubes.exc.QubesVMNotPausedError(self)
+ await self.fire_event_async("domain-pre-unpaused", pre_event=True)
self.libvirt_domain.resume()
return self
@@ -2181,7 +2203,7 @@ async def create_on_disk(self, pool=None, pools=None):
os.rmdir(self.dir_path)
except: # pylint: disable=bare-except
self.log.exception(
- "failed to cleanup {} after failed VM "
+ "failed to cleanup {} after failed qube "
"creation".format(self.dir_path)
)
raise
diff --git a/rpm_spec/core-dom0.spec.in b/rpm_spec/core-dom0.spec.in
index 76aa79a69..26b96bd51 100644
--- a/rpm_spec/core-dom0.spec.in
+++ b/rpm_spec/core-dom0.spec.in
@@ -368,6 +368,7 @@ done
/usr/bin/qubes-*
/usr/bin/qmemmand
/usr/bin/qubesd*
+/etc/xdg/autostart/qubes-preload-dispvm.desktop
%{_mandir}/man1/qubes*.1*
@@ -507,6 +508,7 @@ done
%dir %{python3_sitelib}/qubes/tests/vm/mix/__pycache__
%{python3_sitelib}/qubes/tests/vm/mix/__pycache__/*
%{python3_sitelib}/qubes/tests/vm/mix/__init__.py
+%{python3_sitelib}/qubes/tests/vm/mix/dvmtemplate.py
%{python3_sitelib}/qubes/tests/vm/mix/net.py
%dir %{python3_sitelib}/qubes/tests/tools
@@ -555,6 +557,7 @@ done
%{python3_sitelib}/qubes/qmemman/domainstate.py
%{python3_sitelib}/qubes/qmemman/systemstate.py
+/usr/lib/qubes/preload-dispvm
/usr/lib/qubes/cleanup-dispvms
/usr/lib/qubes/fix-dir-perms.sh
/usr/lib/qubes/startup-misc.sh
@@ -565,6 +568,7 @@ done
%{_unitdir}/qubes-qmemman.service
%{_unitdir}/qubes-vm@.service
%{_unitdir}/qubesd.service
+%{_unitdir}/qubes-preload-dispvm.service
%attr(2770,root,qubes) %dir /var/lib/qubes
%attr(2770,root,qubes) %dir /var/lib/qubes/vm-templates
%attr(2770,root,qubes) %dir /var/lib/qubes/appvms
diff --git a/run-tests b/run-tests
index 5284e1c94..0ea4980c0 100755
--- a/run-tests
+++ b/run-tests
@@ -7,11 +7,25 @@ install_rpm_deps () {
local applications
applications=(lvm2 python3-docutils python3-pyyaml python3-jinja2
python3-lxml btrfs-progs vim-common python3-coverage python3-inotify cryptsetup)
+
+ if test -f /etc/qubes-release; then
+ sudo qubes-dom0-update "${applications[@]}" || :
+ return
+ fi
rpm -q --quiet "${applications[@]}" ||
sudo dnf install "${applications[@]}" ||
: # we don’t actually care if this succeeds
}
+install_apt_deps(){
+ local applications
+ applications=(lvm2 python3-docutils python3-yaml python3-jinja2
+ python3-lxml btrfs-progs python3-coverage python3-inotify
+ python3-libvirt cryptsetup)
+ sudo apt --no-install-recommends -- install "${applications[@]}" ||
+ : # we don’t actually care if this succeeds
+}
if { command -pv rpm && command -pv dnf; }>/dev/null; then install_rpm_deps; fi
+if { command -pv dpkg && command -pv apt; }>/dev/null; then install_apt_deps; fi
CLEANUP_LVM=
name=$(dirname "$0")
if sudo --non-interactive "$name/ci/lvm-manage" setup-lvm vg$$/pool; then
@@ -22,9 +36,12 @@ fi
: "${PYTHON:=python3}"
: "${TESTPYTHONPATH:=test-packages}"
-if [ -d ../core-qrexec/qrexec ]; then
- PYTHONPATH=${PYTHONPATH+"$PYTHONPATH:"}:../core-qrexec
-fi
+for dir in ../core-qrexec/qrexec ../qubes-core-qrexec/qrexec; do
+ if [ -d "$dir" ]; then
+ PYTHONPATH="${PYTHONPATH+"$PYTHONPATH:"}:$dir"
+ fi
+done
+unset dir
PYTHONPATH=${TESTPYTHONPATH}${PYTHONPATH+":${PYTHONPATH}"}
export PYTHONPATH
@@ -33,6 +50,6 @@ export PYTHONPATH
"${PYTHON}" -m coverage run --rcfile=ci/coveragerc -m qubes.tests.run "$@"
retcode=$?
if [ -n "$CLEANUP_LVM" ]; then
- sudo --non-interactive $(dirname "$0")/ci/lvm-manage cleanup-lvm "$DEFAULT_LVM_POOL"
+ sudo --non-interactive "$(dirname "$0")/ci/lvm-manage" cleanup-lvm "$DEFAULT_LVM_POOL"
fi
exit $retcode
diff --git a/test-packages/libvirt.py b/test-packages/libvirt.py
index d68f5eb16..daa655fe4 100644
--- a/test-packages/libvirt.py
+++ b/test-packages/libvirt.py
@@ -24,6 +24,7 @@ def openReadOnly(*args, **kwargs):
def registerErrorHandler(f, ctx):
pass
+VIR_DOMAIN_START_PAUSED = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_PAUSED = 3