Send patches - preferably formatted by git format-patch - to patches at archlinux32 dot org.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml92
-rw-r--r--archinstall/__init__.py4
-rw-r--r--archinstall/lib/disk.py19
-rw-r--r--archinstall/lib/general.py4
-rw-r--r--archinstall/lib/hardware.py61
-rw-r--r--archinstall/lib/installer.py11
-rw-r--r--archinstall/lib/networking.py2
-rw-r--r--archinstall/lib/profiles.py12
-rw-r--r--archinstall/lib/user_interaction.py4
-rw-r--r--docs/conf.py2
-rw-r--r--examples/guided.py9
-rw-r--r--profiles/cutefish.py42
12 files changed, 240 insertions, 22 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 00000000..ca54c552
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,92 @@
+# This file contains GitLab CI/CD configuration for the ArchInstall project.
+# It defines several jobs that get run when a new commit is made, and is comparable to the GitHub workflows.
+# There is an expectation that a runner exists that has the --privileged flag enabled for the build ISO job to run correctly.
+# These jobs should leverage the same tag as that runner. If necessary, change the tag from 'docker' to the one it uses.
+# All jobs will be run in the official archlinux container image, so we will declare that here.
+
+image: archlinux:latest
+
+# This can be used to handle common actions. In this case, we do a pacman -Sy to make sure repos are ready to use.
+before_script:
+ - pacman -Sy
+
+stages:
+ - lint
+ - test
+ - build
+ - publish
+
+mypy:
+ stage: lint
+ tags:
+ - docker
+ script:
+ - pacman --noconfirm -Syu python mypy
+ - mypy . --ignore-missing-imports || exit 0
+
+flake8:
+ stage: lint
+ tags:
+ - docker
+ script:
+ - pacman --noconfirm -Syu python python-pip
+ - python -m pip install --upgrade pip
+ - pip install flake8
+ - flake8 . --count --select=E9,F63,F7 --show-source --statistics
+ - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+
+# We currently do not have unit tests implemented but this stage is written in anticipation of their future usage.
+# When a stage name is preceeded with a '.' it's treated as "disabled" by GitLab and is not executed, so it's fine for it to be declared.
+.pytest:
+ stage: test
+ tags:
+ - docker
+ script:
+ - pacman --noconfirm -Syu python python-pip
+ - python -m pip install --upgrade pip
+ - pip install pytest
+ - pytest
+
+# This stage might fail with exit code 137 on a shared runner. This is probably due to the CPU/memory consumption needed to run the build.
+build_iso:
+ stage: build
+ tags:
+ - docker
+ script:
+ - pwd
+ - find .
+ - cat /etc/os-release
+ - mkdir -p /tmp/archlive/airootfs/root/archinstall-git; cp -r . /tmp/archlive/airootfs/root/archinstall-git
+ - echo "pip uninstall archinstall -y; cd archinstall-git; python setup.py install" > /tmp/archlive/airootfs/root/.zprofile
+ - echo "echo \"This is an unofficial ISO for development and testing of archinstall. No support will be provided.\"" >> /tmp/archlive/airootfs/root/.zprofile
+ - echo "echo \"This ISO was built from Git SHA $CI_COMMIT_SHA\"" >> /tmp/archlive/airootfs/root/.zprofile
+ - echo "echo \"Type archinstall to launch the installer.\"" >> /tmp/archlive/airootfs/root/.zprofile
+ - cat /tmp/archlive/airootfs/root/.zprofile
+ - pacman --noconfirm -S git archiso
+ - cp -r /usr/share/archiso/configs/releng/* /tmp/archlive
+ - echo -e "git\npython\npython-pip\npython-setuptools" >> /tmp/archlive/packages.x86_64
+ - find /tmp/archlive
+ - cd /tmp/archlive; mkarchiso -v -w work/ -o out/ ./
+ artifacts:
+ name: "Arch Live ISO"
+ paths:
+ - /tmp/archlive/out/*.iso
+ expire_in: 1 week
+
+## This job only runs when a tag is created on the master branch. This is because we do not want to try to publish to PyPi every time we commit.
+## The following CI/CD variables need to be set to the PyPi username and password in the GitLab project's settings for this stage to work.
+# * FLIT_USERNAME
+# * FLIT_PASSWORD
+publish_pypi:
+ stage: publish
+ tags:
+ - docker
+ script:
+ - pacman --noconfirm -S python python-pip
+ - python -m pip install --upgrade pip
+ - pip install setuptools wheel flit
+ - flit
+ only:
+ - tags
+ except:
+ - branches
diff --git a/archinstall/__init__.py b/archinstall/__init__.py
index fcd9a706..0b799d5b 100644
--- a/archinstall/__init__.py
+++ b/archinstall/__init__.py
@@ -61,6 +61,10 @@ def initialize_arguments():
arguments = initialize_arguments()
+storage['arguments'] = arguments
+if arguments.get('debug'):
+ log(f"Warning: --debug mode will write certain credentials to {storage['LOG_PATH']}/{storage['LOG_FILE']}!", fg="red", level=logging.WARNING)
+
from .lib.plugins import plugins, load_plugin # This initiates the plugin loading ceremony
if arguments.get('plugin', None):
diff --git a/archinstall/lib/disk.py b/archinstall/lib/disk.py
index f4936d73..de39bafd 100644
--- a/archinstall/lib/disk.py
+++ b/archinstall/lib/disk.py
@@ -254,7 +254,8 @@ class Partition:
return None
def has_content(self):
- if not get_filesystem_type(self.path):
+ fs_type = get_filesystem_type(self.path)
+ if not fs_type or "swap" in fs_type:
return False
temporary_mountpoint = '/tmp/' + hashlib.md5(bytes(f"{time.time()}", 'UTF-8') + os.urandom(12)).hexdigest()
@@ -447,16 +448,16 @@ class Filesystem:
if self.blockdevice.keep_partitions is False:
log(f'Wiping {self.blockdevice} by using partition format {self.mode}', level=logging.DEBUG)
if self.mode == GPT:
- if self.raw_parted(f'{self.blockdevice.device} mklabel gpt').exit_code == 0:
+ if self.parted_mklabel(self.blockdevice.device, "gpt"):
self.blockdevice.flush_cache()
return self
else:
- raise DiskError('Problem setting the partition format to GPT:', f'/usr/bin/parted -s {self.blockdevice.device} mklabel gpt')
+ raise DiskError('Problem setting the disk label type to GPT:', f'/usr/bin/parted -s {self.blockdevice.device} mklabel gpt')
elif self.mode == MBR:
- if SysCommand(f'/usr/bin/parted -s {self.blockdevice.device} mklabel msdos').exit_code == 0:
+ if self.parted_mklabel(self.blockdevice.device, "msdos"):
return self
else:
- raise DiskError('Problem setting the partition format to MBR:', f'/usr/bin/parted -s {self.blockdevice.device} mklabel msdos')
+ raise DiskError('Problem setting the disk label type to msdos:', f'/usr/bin/parted -s {self.blockdevice.device} mklabel msdos')
else:
raise DiskError(f'Unknown mode selected to format in: {self.mode}')
@@ -551,6 +552,14 @@ class Filesystem:
def set(self, partition: int, string: str):
return self.parted(f'{self.blockdevice.device} set {partition + 1} {string}') == 0
+ def parted_mklabel(self, device: str, disk_label: str):
+ # Try to unmount devices before attempting to run mklabel
+ try:
+ SysCommand(f'bash -c "umount {device}?"')
+ except:
+ pass
+ return self.raw_parted(f'{device} mklabel {disk_label}').exit_code == 0
+
def device_state(name, *args, **kwargs):
# Based out of: https://askubuntu.com/questions/528690/how-to-get-list-of-all-non-removable-disk-device-names-ssd-hdd-and-sata-ide-onl/528709#528709
diff --git a/archinstall/lib/general.py b/archinstall/lib/general.py
index 9711382f..831b5451 100644
--- a/archinstall/lib/general.py
+++ b/archinstall/lib/general.py
@@ -13,7 +13,7 @@ from typing import Union
from .exceptions import *
from .output import log
-
+from .storage import storage
def gen_uid(entropy_length=256):
return hashlib.sha512(os.urandom(entropy_length)).hexdigest()
@@ -244,6 +244,8 @@ class SysCommandWorker:
if not self.pid:
try:
os.execve(self.cmd[0], self.cmd, {**os.environ, **self.environment_vars})
+ if storage['arguments'].get('debug'):
+ log(f"Executing: {self.cmd}", level=logging.DEBUG)
except FileNotFoundError:
log(f"{self.cmd[0]} does not exist.", level=logging.ERROR, fg="red")
self.exit_code = 1
diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py
index 6a3b166d..a63155f5 100644
--- a/archinstall/lib/hardware.py
+++ b/archinstall/lib/hardware.py
@@ -48,10 +48,12 @@ AVAILABLE_GFX_DRIVERS = {
"intel-media-driver",
"vulkan-intel",
],
- "Nvidia": {
- "open-source": ["mesa", "xf86-video-nouveau", "libva-mesa-driver"],
- "proprietary": ["nvidia"],
- },
+ "Nvidia (open-source)": [
+ "mesa",
+ "xf86-video-nouveau",
+ "libva-mesa-driver"
+ ],
+ "Nvidia (proprietary)": ["nvidia"],
"VMware / VirtualBox (open-source)": ["mesa", "xf86-video-vmware"],
}
@@ -79,22 +81,22 @@ def has_uefi() -> bool:
def graphics_devices() -> dict:
cards = {}
for line in SysCommand("lspci"):
- if b' VGA ' in line:
+ if b' VGA ' in line or b' 3D ' in line:
_, identifier = line.split(b': ', 1)
- cards[identifier.strip().lower().decode('UTF-8')] = line
+ cards[identifier.strip().decode('UTF-8')] = line
return cards
def has_nvidia_graphics() -> bool:
- return any('nvidia' in x for x in graphics_devices())
+ return any('nvidia' in x.lower() for x in graphics_devices())
def has_amd_graphics() -> bool:
- return any('amd' in x for x in graphics_devices())
+ return any('amd' in x.lower() for x in graphics_devices())
def has_intel_graphics() -> bool:
- return any('intel' in x for x in graphics_devices())
+ return any('intel' in x.lower() for x in graphics_devices())
def cpu_vendor() -> Optional[str]:
@@ -107,6 +109,47 @@ def cpu_vendor() -> Optional[str]:
return None
+def cpu_model() -> Optional[str]:
+ cpu_info_raw = SysCommand("lscpu -J")
+ cpu_info = json.loads(b"".join(cpu_info_raw).decode('UTF-8'))['lscpu']
+
+ for info in cpu_info:
+ if info.get('field', None) == "Model name:":
+ return info.get('data', None)
+ return None
+
+
+def sys_vendor() -> Optional[str]:
+ with open(f"/sys/devices/virtual/dmi/id/sys_vendor") as vendor:
+ return vendor.read().strip()
+
+
+def product_name() -> Optional[str]:
+ with open(f"/sys/devices/virtual/dmi/id/product_name") as product:
+ return product.read().strip()
+
+
+def mem_info():
+ # This implementation is from https://stackoverflow.com/a/28161352
+ return dict((i.split()[0].rstrip(':'), int(i.split()[1])) for i in open('/proc/meminfo').readlines())
+
+
+def mem_available() -> Optional[str]:
+ return mem_info()['MemAvailable']
+
+
+def mem_free() -> Optional[str]:
+ return mem_info()['MemFree']
+
+
+def mem_total() -> Optional[str]:
+ return mem_info()['MemTotal']
+
+
+def virtualization() -> Optional[str]:
+ return str(SysCommand("systemd-detect-virt")).strip('\r\n')
+
+
def is_vm() -> bool:
try:
# systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine
diff --git a/archinstall/lib/installer.py b/archinstall/lib/installer.py
index c2db8337..da6f6a9b 100644
--- a/archinstall/lib/installer.py
+++ b/archinstall/lib/installer.py
@@ -2,8 +2,8 @@ from .disk import *
from .hardware import *
from .locale_helpers import verify_keyboard_layout, verify_x11_keyboard_layout
from .mirrors import *
-from .storage import storage
from .plugins import plugins
+from .storage import storage
from .user_interaction import *
# Any package that the Installer() is responsible for (optional and the default ones)
@@ -54,7 +54,6 @@ class Installer:
for kernel in kernels:
self.base_packages.append(kernel)
-
self.post_base_install = []
storage['session'] = self
@@ -442,6 +441,10 @@ class Installer:
# Fallback, try creating the boot loader without touching the EFI variables
SysCommand(f'/usr/bin/arch-chroot {self.target} bootctl --no-variables --path=/boot install')
+ # Ensure that the /boot/loader directory exists before we try to create files in it
+ if not os.path.exists(f'{self.target}/boot/loader'):
+ os.makedirs(f'{self.target}/boot/loader')
+
# Modify or create a loader.conf
if os.path.isfile(f'{self.target}/boot/loader/loader.conf'):
with open(f'{self.target}/boot/loader/loader.conf', 'r') as loader:
@@ -462,6 +465,10 @@ class Installer:
else:
loader.write(f"{line}\n")
+ # Ensure that the /boot/loader/entries directory exists before we try to create files in it
+ if not os.path.exists(f'{self.target}/boot/loader/entries'):
+ os.makedirs(f'{self.target}/boot/loader/entries')
+
for kernel in self.kernels:
# Setup the loader entry
with open(f'{self.target}/boot/loader/entries/{self.init_time}_{kernel}.conf', 'w') as entry:
diff --git a/archinstall/lib/networking.py b/archinstall/lib/networking.py
index 0643c9cf..49970ec4 100644
--- a/archinstall/lib/networking.py
+++ b/archinstall/lib/networking.py
@@ -31,7 +31,7 @@ def list_interfaces(skip_loopback=True):
def check_mirror_reachable():
if (exit_code := SysCommand("pacman -Sy").exit_code) == 0:
return True
- elif exit_code == 256:
+ elif os.geteuid() != 0:
log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red")
return False
diff --git a/archinstall/lib/profiles.py b/archinstall/lib/profiles.py
index d4913e7e..8434a0ab 100644
--- a/archinstall/lib/profiles.py
+++ b/archinstall/lib/profiles.py
@@ -228,6 +228,18 @@ class Profile(Script):
# since developers like less code - omitting it should assume they want to present it.
return True
+ def get_profile_description(self):
+ with open(self.path, 'r') as source:
+ source_data = source.read()
+
+ if '__description__' in source_data:
+ with self.load_instructions(namespace=f"{self.namespace}.py") as imported:
+ if hasattr(imported, '__description__'):
+ return imported.__description__
+
+ # Default to this string if the profile does not have a description.
+ return "This profile does not have the __description__ attribute set."
+
@property
def packages(self) -> Optional[list]:
"""
diff --git a/archinstall/lib/user_interaction.py b/archinstall/lib/user_interaction.py
index 79919658..b52267d9 100644
--- a/archinstall/lib/user_interaction.py
+++ b/archinstall/lib/user_interaction.py
@@ -575,11 +575,11 @@ def select_profile():
if len(shown_profiles) >= 1:
for index, profile in enumerate(shown_profiles):
- print(f"{index}: {profile}")
+ description = Profile(None, profile).get_profile_description()
+ print(f"{index}: {profile}: {description}")
print(' -- The above list is a set of pre-programmed profiles. --')
print(' -- They might make it easier to install things like desktop environments. --')
- print(' -- The desktop profile will let you select a DE/WM profile, e.g gnome, kde, sway --')
print(' -- (Leave blank and hit enter to skip this step and continue) --')
selected_profile = generic_select(actual_profiles_raw, 'Enter a pre-programmed profile name if you want to install one: ', options_output=False)
diff --git a/docs/conf.py b/docs/conf.py
index 375ff434..add1c5e7 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -41,7 +41,7 @@ copyright = '2020, Anton Hvornum'
author = 'Anton Hvornum'
# The full version, including alpha/beta/rc tags
-release = 'v2.1.0'
+release = 'v2.3.0.dev0'
# -- General configuration ---------------------------------------------------
diff --git a/examples/guided.py b/examples/guided.py
index 42429370..56c054fc 100644
--- a/examples/guided.py
+++ b/examples/guided.py
@@ -5,7 +5,7 @@ import time
import archinstall
from archinstall.lib.general import run_custom_user_commands
-from archinstall.lib.hardware import has_uefi, AVAILABLE_GFX_DRIVERS
+from archinstall.lib.hardware import *
from archinstall.lib.networking import check_mirror_reachable
from archinstall.lib.profiles import Profile
@@ -16,6 +16,13 @@ if os.getuid() != 0:
print("Archinstall requires root privileges to run. See --help for more.")
exit(1)
+# Log various information about hardware before starting the installation. This might assist in troubleshooting
+archinstall.log(f"Hardware model detected: {archinstall.sys_vendor()} {archinstall.product_name()}; UEFI mode: {archinstall.has_uefi()}", level=logging.DEBUG)
+archinstall.log(f"Processor model detected: {archinstall.cpu_model()}", level=logging.DEBUG)
+archinstall.log(f"Memory statistics: {archinstall.mem_available()} available out of {archinstall.mem_total()} total installed", level=logging.DEBUG)
+archinstall.log(f"Virtualization detected: {archinstall.virtualization()}; is VM: {archinstall.is_vm()}", level=logging.DEBUG)
+archinstall.log(f"Graphics devices detected: {archinstall.graphics_devices().keys()}", level=logging.DEBUG)
+
# For support reasons, we'll log the disk layout pre installation to match against post-installation layout
archinstall.log(f"Disk states before installing: {archinstall.disk_layouts()}", level=logging.DEBUG)
diff --git a/profiles/cutefish.py b/profiles/cutefish.py
new file mode 100644
index 00000000..1df2467a
--- /dev/null
+++ b/profiles/cutefish.py
@@ -0,0 +1,42 @@
+# A desktop environment using "Cutefish"
+
+import archinstall
+
+is_top_level_profile = False
+
+__packages__ = [
+ "cutefish",
+ "noto-fonts",
+ "konsole",
+ "sddm"
+]
+
+
+def _prep_function(*args, **kwargs):
+ """
+ Magic function called by the importing installer
+ before continuing any further. It also avoids executing any
+ other code in this stage. So it's a safe way to ask the user
+ for more input before any other installer steps start.
+ """
+
+ # Cutefish requires a functional xorg installation.
+ profile = archinstall.Profile(None, "xorg")
+ with profile.load_instructions(namespace="xorg.py") as imported:
+ if hasattr(imported, "_prep_function"):
+ return imported._prep_function()
+ else:
+ print("Deprecated (??): xorg profile has no _prep_function() anymore")
+
+
+# Ensures that this code only gets executed if executed
+# through importlib.util.spec_from_file_location("cutefish", "/somewhere/cutefish.py")
+# or through conventional import cutefish
+if __name__ == "cutefish":
+ # Install dependency profiles
+ archinstall.storage["installation_session"].install_profile("xorg")
+
+ # Install the Cutefish packages
+ archinstall.storage["installation_session"].add_additional_packages(__packages__)
+
+ archinstall.storage["installation_session"].enable_service("sddm")