diff --git a/machines/sleeper-service/ZFS_SETUP.md b/machines/sleeper-service/ZFS_SETUP.md deleted file mode 100644 index 507ed94..0000000 --- a/machines/sleeper-service/ZFS_SETUP.md +++ /dev/null @@ -1,80 +0,0 @@ -# ZFS Setup for sleeper-service - -## Overview -sleeper-service now uses ZFS for enhanced data integrity, snapshots, and efficient storage management for the file server role. - -## ZFS Pool Structure - -### `filepool` - Main ZFS Pool -This pool contains all system and storage datasets: - -``` -filepool/root # Root filesystem (/) -filepool/nix # Nix store (/nix) -filepool/var # Variable data (/var) -filepool/storage # NFS export storage (/mnt/storage) -``` - -## Storage Layout - -### System Datasets -- **filepool/root**: System root filesystem with snapshots for rollback -- **filepool/nix**: Nix store, can be excluded from frequent snapshots -- **filepool/var**: System logs and variable data - -### Storage Dataset -- **filepool/storage**: Primary NFS export point containing: - - `media/` - Media files shared via NFS - - `downloads/` - Download directory (for Transmission when re-enabled) - - `backups/` - Backup storage - - `shares/` - General file shares - -## ZFS Features Enabled - -### Automatic Services -- **Auto-scrub**: Weekly integrity checks of all data -- **TRIM**: SSD optimization for supported drives -- **Snapshots**: Automatic snapshots for data protection (to be configured) - -### Benefits for File Server -1. **Data Integrity**: Checksumming protects against bit rot -2. **Snapshots**: Point-in-time recovery for user data -3. **Compression**: Efficient storage usage -4. **Send/Receive**: Efficient backup to other ZFS systems -5. **Share Management**: Native NFS sharing support - -## Deployment Notes - -### Before First Boot -The actual ZFS pool creation needs to be done during installation: - -```bash -# Example pool creation (adjust device names) -zpool create -f filepool /dev/sda -zfs create filepool/root -zfs create filepool/nix -zfs create filepool/var -zfs create filepool/storage - -# Set mount points -zfs set mountpoint=/ filepool/root -zfs set mountpoint=/nix filepool/nix -zfs set mountpoint=/var filepool/var -zfs set mountpoint=/mnt/storage filepool/storage - -# Enable compression for storage dataset -zfs set compression=lz4 filepool/storage -``` - -### Network Storage Integration -The `/mnt/storage` ZFS dataset is exported via NFS to the home lab network (10.0.0.0/24), replacing the previous "files.home" server functionality. - -## Migration from Existing Setup -When deploying to the physical server: -1. Backup existing data from current file server -2. Create ZFS pool on target drives -3. Restore data to `/mnt/storage` -4. Update client machines to mount from new IP (10.0.0.8) - -## Culture Reference -Like the GSV *Sleeper Service*, this configuration operates quietly in the background, providing reliable storage services with the redundancy and self-healing capabilities that ZFS brings to the table. diff --git a/machines/sleeper-service/configuration.nix b/machines/sleeper-service/configuration.nix index 5fae8e6..3f491ed 100644 --- a/machines/sleeper-service/configuration.nix +++ b/machines/sleeper-service/configuration.nix @@ -14,21 +14,14 @@ ../../modules/users/sma.nix ]; - # Boot configuration with ZFS support + # Boot configuration boot.loader.grub = { enable = true; - zfsSupport = true; efiSupport = true; efiInstallAsRemovable = true; devices = [ "nodev" ]; }; - # ZFS services for file server - services.zfs = { - autoScrub.enable = true; - trim.enable = true; - }; - # Time and locale time.timeZone = "Europe/Oslo"; i18n.defaultLocale = "en_US.UTF-8"; diff --git a/machines/sleeper-service/hardware-configuration.nix b/machines/sleeper-service/hardware-configuration.nix index 601f42e..1acfca1 100644 --- a/machines/sleeper-service/hardware-configuration.nix +++ b/machines/sleeper-service/hardware-configuration.nix @@ -13,31 +13,14 @@ boot.kernelModules = [ "kvm-intel" ]; boot.extraModulePackages = [ ]; - # Enable ZFS support for storage pool only - boot.supportedFilesystems = [ "zfs" ]; - boot.initrd.supportedFilesystems = [ "zfs" ]; - - # ZFS Configuration - only for storage pool - boot.zfs.extraPools = [ "storage" ]; - services.zfs.autoScrub.enable = true; - services.zfs.trim.enable = true; - - # OS remains on ext4 fileSystems."/" = - { device = "/dev/disk/by-uuid/e7fc0e32-b9e5-4080-859e-fe9dea60823d"; + { device = "/dev/disk/by-uuid/12345678-1234-1234-1234-123456789abc"; fsType = "ext4"; }; - # ZFS storage pool mounted for NFS exports - fileSystems."/mnt/storage" = - { device = "storage"; - fsType = "zfs"; - }; - fileSystems."/boot" = - { device = "/dev/disk/by-uuid/2C7A-9F08"; + { device = "/dev/disk/by-uuid/ABCD-1234"; fsType = "vfat"; - options = [ "fmask=0022" "dmask=0022" ]; }; swapDevices = [ ]; diff --git a/modules/network/network-sleeper-service.nix b/modules/network/network-sleeper-service.nix index 77dc49b..9614a24 100644 --- a/modules/network/network-sleeper-service.nix +++ b/modules/network/network-sleeper-service.nix @@ -10,7 +10,6 @@ # Machine-specific network configuration networking = { hostName = "sleeper-service"; - hostId = "a1b2c3d4"; # Required for ZFS support # Enable systemd-networkd for static networking useNetworkd = true; diff --git a/modules/services/nfs.nix b/modules/services/nfs.nix index cf7b4fc..3be6bac 100644 --- a/modules/services/nfs.nix +++ b/modules/services/nfs.nix @@ -6,7 +6,7 @@ # NFS server configuration services.nfs.server = { enable = true; - # Export the storage directory (ZFS dataset) + # Export the storage directory exports = '' /mnt/storage 10.0.0.0/24(rw,sync,no_subtree_check,no_root_squash) ''; @@ -14,12 +14,12 @@ createMountPoints = true; }; - # Ensure the storage subdirectories exist (ZFS dataset is mounted at /mnt/storage) + # Ensure the storage directory exists systemd.tmpfiles.rules = [ - "d /mnt/storage/media 0755 sma users -" - "d /mnt/storage/downloads 0755 sma users -" - "d /mnt/storage/backups 0755 sma users -" - "d /mnt/storage/shares 0755 sma users -" + "d /mnt/storage 0755 geir users -" + "d /mnt/storage/media 0755 geir users -" + "d /mnt/storage/downloads 0755 geir users -" + "d /mnt/storage/backups 0755 geir users -" ]; # Required packages for NFS diff --git a/modules/system/transmission.nix b/modules/system/transmission.nix index 51bfdd5..9dee4d8 100644 --- a/modules/system/transmission.nix +++ b/modules/system/transmission.nix @@ -5,7 +5,7 @@ # Will re-enable once package is stable services.transmission = { enable = false; - user = "sma"; # Using admin user for server processes + user = "geir"; group = "users"; settings.rpc-port = 9091; settings.rpc-bind-address = "0.0.0.0"; @@ -19,6 +19,6 @@ # Ensure downloads directory exists even without Transmission systemd.tmpfiles.rules = [ - "d /mnt/storage/downloads 0755 sma users -" + "d /mnt/storage/downloads 0755 geir users -" ]; } diff --git a/scripts/setup-zfs-sleeper-service.sh b/scripts/setup-zfs-sleeper-service.sh deleted file mode 100644 index 98f0d5d..0000000 --- a/scripts/setup-zfs-sleeper-service.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash -# ZFS Setup Script for sleeper-service -# This script configures the existing ZFS storage pool for NFS exports - -set -euo pipefail - -echo "=== ZFS Setup for sleeper-service ===" -echo "This script will configure the existing 'storage' pool for NFS exports" -echo "OS will remain on ext4 - only storage pool will be used for media/NFS" -echo "" -echo "Current ZFS pool status:" -zpool status storage -echo "" -echo "Current datasets:" -zfs list -echo "" -echo "The existing storage/media dataset with 903GB of data will be preserved" -echo "We'll set up proper mount points for NFS exports" -echo "" -read -p "Are you sure you want to proceed? (yes/no): " confirm - -if [[ "$confirm" != "yes" ]]; then - echo "Aborted." - exit 1 -fi - -echo "" -echo "=== Step 1: Verifying ZFS tools ===" -if ! command -v zpool &> /dev/null; then - echo "ERROR: ZFS tools not found!" - exit 1 -fi - -echo "" -echo "=== Step 2: Checking existing pool ===" -if ! zpool status storage &> /dev/null; then - echo "ERROR: Storage pool not found!" - exit 1 -fi - -echo "Storage pool found. GUID: $(zpool get -H -o value guid storage)" - -echo "" -echo "=== Step 3: Setting up storage mount points ===" - -# Create mount point directory -echo "Creating /mnt/storage directory..." -mkdir -p /mnt/storage - -# Set proper mount point for storage pool -echo "Setting mount point for storage pool..." -zfs set mountpoint=/mnt/storage storage - -# Ensure media dataset has proper mountpoint -echo "Setting mount point for media dataset..." -zfs set mountpoint=/mnt/storage/media storage/media - -# Create additional directories if needed -echo "Creating additional storage directories..." -mkdir -p /mnt/storage/{downloads,backups,shares} - -# Set proper ownership for sma user -echo "Setting ownership for sma user..." -chown sma:users /mnt/storage/{media,downloads,backups,shares} - -echo "" -echo "=== Step 4: Summary ===" -echo "ZFS storage setup complete!" -echo "" -echo "Storage pool: $(zpool get -H -o value guid storage)" -echo "Mount point: /mnt/storage" -echo "Media data: /mnt/storage/media (preserved)" -echo "Additional directories: downloads, backups, shares" -echo "" -echo "The existing 903GB of media data has been preserved." -echo "NFS exports can now use /mnt/storage/* paths." -echo "" -echo "Next: Deploy NixOS configuration to enable ZFS on boot" - -echo "" -echo "=== ZFS Setup Complete! ===" -echo "Pool status:" -zpool status storage -echo "" -echo "Datasets:" -zfs list -echo "" -echo "You can now deploy the new NixOS configuration that uses ZFS." -echo "Note: The system will need to be rebooted after the deployment." -echo "" -echo "Next steps:" -echo "1. Copy the new Home-lab configuration to the server" -echo "2. Run: sudo nixos-rebuild boot --flake .#sleeper-service" -echo "3. Reboot the system to activate ZFS support"