Files
backup_to_external_m.2/old_scripts/lvm_backup_gui.py
root 72f9838f55 cleanup: Archive old complex scripts and documentation
- Move all old complex backup scripts to old_scripts/
- Archive previous documentation versions
- Clean up temporary files and debian packages
- Update README to focus on new simple system
- Keep only the enhanced simple backup system in main directory

Main directory now contains only:
- simple_backup_gui.py (GUI interface)
- enhanced_simple_backup.sh (CLI interface)
- list_drives.sh (helper)
- simple_backup.sh (basic CLI)
- SIMPLE_BACKUP_README.md (detailed docs)
- README.md (project overview)
2025-10-09 00:30:03 +02:00

1722 lines
83 KiB
Python

#!/usr/bin/env python3
"""
LVM Backup GUI - Professional interface for LVM snapshot backups
Creates block-level clones of LVM volumes with progress monitoring
"""
import tkinter as tk
from tkinter import ttk, messagebox, filedialog
import subprocess
import threading
import datetime
import os
import queue
import json
class BorgConfigDialog(tk.Toplevel):
def __init__(self, parent, callback):
super().__init__(parent)
self.title("Borg Backup Configuration")
self.geometry("550x280")
self.callback = callback
self.repo_path = tk.StringVar(self)
self.passphrase = tk.StringVar(self)
self.create_new = tk.BooleanVar(self)
self.use_target_drive = tk.BooleanVar(self)
self.parent_gui = parent
# Load saved settings
self.load_saved_values()
self.init_ui()
def load_saved_values(self):
"""Load previously saved values"""
settings = self.parent_gui.settings
if settings.get("last_borg_repo"):
self.repo_path.set(settings["last_borg_repo"])
if settings.get("last_passphrase"):
self.passphrase.set(settings["last_passphrase"])
self.create_new.set(settings.get("create_new_default", True))
def init_ui(self):
frame = ttk.Frame(self)
frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
# Repository path section
ttk.Label(frame, text="Borg Repository:").grid(row=0, column=0, sticky=tk.W)
entry = ttk.Entry(frame, textvariable=self.repo_path, width=40)
entry.grid(row=0, column=1, sticky=tk.W)
ttk.Button(frame, text="Browse", command=self.browse_repo).grid(row=0, column=2, padx=5)
# Option to use target drive as base path
target_frame = ttk.Frame(frame)
target_frame.grid(row=1, column=1, sticky=tk.W, pady=5)
ttk.Checkbutton(target_frame, text="Use target drive as repository base",
variable=self.use_target_drive, command=self.toggle_target_drive).pack(side=tk.LEFT)
ttk.Checkbutton(frame, text="Create new repository if it doesn't exist",
variable=self.create_new).grid(row=2, column=1, sticky=tk.W)
# Passphrase section
ttk.Label(frame, text="Borg Passphrase:").grid(row=3, column=0, sticky=tk.W, pady=(10,0))
ttk.Entry(frame, textvariable=self.passphrase, show='*', width=40).grid(row=3, column=1, sticky=tk.W, pady=(10,0))
# Buttons
btn_frame = ttk.Frame(frame)
btn_frame.grid(row=4, column=1, pady=15, sticky=tk.E)
ttk.Button(btn_frame, text="Cancel", command=self.destroy).pack(side=tk.RIGHT, padx=5)
ttk.Button(btn_frame, text="OK", command=self.on_ok).pack(side=tk.RIGHT)
def toggle_target_drive(self):
"""Toggle between manual path and target drive path"""
if self.use_target_drive.get():
# Get target drive from parent GUI
target_vg = self.parent_gui.target_vg.get()
if target_vg and target_vg != "Select Volume Group":
vg_name = target_vg.split()[0]
# Mount target drive and use it as base path
try:
# Create mount point
mount_point = f"/mnt/{vg_name}-borg"
subprocess.run(["mkdir", "-p", mount_point], check=True)
subprocess.run(["mount", f"/dev/{vg_name}/home", mount_point], check=True)
self.repo_path.set(f"{mount_point}/borg-repo")
except:
messagebox.showwarning("Warning", "Could not mount target drive. Please select path manually.")
self.use_target_drive.set(False)
else:
messagebox.showwarning("Warning", "Please select a target drive first in the main window.")
self.use_target_drive.set(False)
def browse_repo(self):
if self.use_target_drive.get():
messagebox.showinfo("Info", "Path is set to target drive. Uncheck the option to browse manually.")
return
path = filedialog.askdirectory(title="Select Borg Repository Location")
if path:
self.repo_path.set(path)
def on_ok(self):
repo = self.repo_path.get().strip()
pw = self.passphrase.get().strip()
create = self.create_new.get()
if not repo:
messagebox.showerror("Error", "Please select a repository path.")
return
# Save settings for next time
self.parent_gui.settings["last_borg_repo"] = repo
self.parent_gui.settings["last_passphrase"] = pw
self.parent_gui.settings["create_new_default"] = create
self.parent_gui.save_settings()
self.callback(repo, pw, create)
self.destroy()
class LVMBackupGUI(tk.Tk):
def __init__(self, root=None):
super().__init__()
self.title("LVM Backup Manager")
self.geometry("900x700")
self.resizable(True, True)
# Configure style
self.setup_styles()
# Variables
self.source_vg = tk.StringVar()
self.target_vg = tk.StringVar()
self.backup_running = False
self.backup_process = None
# Settings file path
self.settings_file = os.path.expanduser("~/.lvm_backup_gui_settings.json")
self.settings = self.load_settings()
# Apply saved geometry
if "window_geometry" in self.settings:
try:
self.geometry(self.settings["window_geometry"])
except:
pass # Use default if geometry is invalid
# Save settings on window close
self.protocol("WM_DELETE_WINDOW", self.on_closing)
# Thread-safe logging queue
self.log_queue = queue.Queue()
# Create GUI
self.create_widgets()
self.refresh_drives()
# Start queue processing
self.process_log_queue()
def setup_styles(self):
"""Configure modern styling"""
style = ttk.Style()
# Configure colors and fonts
self.colors = {
'primary': '#2196F3',
'secondary': '#FFC107',
'success': '#4CAF50',
'danger': '#F44336',
'warning': '#FF9800',
'light': '#F5F5F5',
'dark': '#333333'
}
style.configure('Title.TLabel', font=('Arial', 16, 'bold'))
style.configure('Heading.TLabel', font=('Arial', 12, 'bold'))
style.configure('Info.TLabel', font=('Arial', 10))
def load_settings(self):
"""Load settings from file"""
default_settings = {
"last_borg_repo": "",
"last_passphrase": "",
"create_new_default": True,
"window_geometry": "900x700"
}
try:
if os.path.exists(self.settings_file):
with open(self.settings_file, 'r') as f:
settings = json.load(f)
# Merge with defaults for any missing keys
for key, value in default_settings.items():
if key not in settings:
settings[key] = value
return settings
except Exception as e:
self.log(f"Could not load settings: {e}")
return default_settings
def save_settings(self):
"""Save current settings to file"""
try:
with open(self.settings_file, 'w') as f:
json.dump(self.settings, f, indent=2)
except Exception as e:
self.log(f"Could not save settings: {e}")
def update_window_geometry(self):
"""Update stored window geometry"""
self.settings["window_geometry"] = self.geometry()
self.save_settings()
"""Process messages from background threads"""
try:
while True:
message = self.log_queue.get_nowait()
self.log_text.insert(tk.END, f"[{datetime.datetime.now().strftime('%H:%M:%S')}] {message}\n")
self.log_text.see(tk.END)
self.update_idletasks()
except queue.Empty:
pass
# Schedule next check
self.after(100, self.process_log_queue)
def thread_log(self, message):
"""Thread-safe logging method"""
self.log_queue.put(message)
def process_log_queue(self):
"""Process messages from background threads"""
try:
while True:
message = self.log_queue.get_nowait()
self.log_text.insert(tk.END, f"[{datetime.datetime.now().strftime('%H:%M:%S')}] {message}\n")
self.log_text.see(tk.END)
self.update_idletasks()
except queue.Empty:
pass
# Schedule next check
self.after(100, self.process_log_queue)
def load_settings(self):
"""Load settings from file"""
default_settings = {
"last_borg_repo": "",
"last_passphrase": "",
"create_new_default": True,
"window_geometry": "900x700"
}
try:
if os.path.exists(self.settings_file):
with open(self.settings_file, 'r') as f:
settings = json.load(f)
# Merge with defaults for any missing keys
for key, value in default_settings.items():
if key not in settings:
settings[key] = value
return settings
except Exception as e:
print(f"Could not load settings: {e}")
return default_settings
def save_settings(self):
"""Save current settings to file"""
try:
with open(self.settings_file, 'w') as f:
json.dump(self.settings, f, indent=2)
except Exception as e:
print(f"Could not save settings: {e}")
def update_window_geometry(self):
"""Update stored window geometry"""
self.settings["window_geometry"] = self.geometry()
self.save_settings()
def on_closing(self):
"""Handle window closing"""
self.update_window_geometry()
self.destroy()
def create_widgets(self):
"""Create the main GUI interface"""
# Main container with padding
main_frame = ttk.Frame(self, padding="20")
main_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
# Configure grid weights
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
main_frame.columnconfigure(1, weight=1)
# Title
title_label = ttk.Label(main_frame, text="🛡️ LVM Backup Manager", style='Title.TLabel')
title_label.grid(row=0, column=0, columnspan=3, pady=(0, 20))
# Source drive selection
self.create_drive_selection_frame(main_frame, "Source Drive", 1, self.source_vg, True)
# Arrow
arrow_label = ttk.Label(main_frame, text="⬇️", font=('Arial', 20))
arrow_label.grid(row=2, column=1, pady=10)
# Target drive selection
self.create_drive_selection_frame(main_frame, "Target Drive", 3, self.target_vg, False)
# Backup info frame
self.create_backup_info_frame(main_frame, 4)
# Control buttons
self.create_control_frame(main_frame, 5)
# Progress frame
self.create_progress_frame(main_frame, 6)
# Log frame
self.create_log_frame(main_frame, 7)
def create_drive_selection_frame(self, parent, title, row, var, is_source):
"""Create drive selection section"""
frame = ttk.LabelFrame(parent, text=title, padding="10")
frame.grid(row=row, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=5)
frame.columnconfigure(1, weight=1)
# Drive dropdown
ttk.Label(frame, text="Volume Group:").grid(row=0, column=0, sticky=tk.W, padx=(0, 10))
combo = ttk.Combobox(frame, textvariable=var, state='readonly', width=30)
combo.grid(row=0, column=1, sticky=(tk.W, tk.E), padx=(0, 10))
combo.bind('<<ComboboxSelected>>', lambda e: self.on_selection_change(var, combo))
combo.bind('<Button-1>', lambda e: self.on_combobox_click(combo))
combo.bind('<Button-1>', lambda e: self.on_combo_clicked(e, var))
refresh_btn = ttk.Button(frame, text="🔄 Refresh", command=self.refresh_drives)
refresh_btn.grid(row=0, column=2)
# Drive info labels
info_frame = ttk.Frame(frame)
info_frame.grid(row=1, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=(10, 0))
info_frame.columnconfigure(1, weight=1)
# Store references for updating
setattr(self, f"{title.lower().replace(' ', '_')}_info", info_frame)
if is_source:
self.source_combo = combo
else:
self.target_combo = combo
def create_backup_info_frame(self, parent, row):
"""Create backup information display"""
frame = ttk.LabelFrame(parent, text="📊 Backup Information", padding="10")
frame.grid(row=row, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=5)
frame.columnconfigure(1, weight=1)
self.backup_info_labels = {}
info_items = [
("Total Size:", "total_size"),
("Estimated Time:", "est_time"),
("Transfer Speed:", "speed"),
("Status:", "status")
]
for i, (label, key) in enumerate(info_items):
ttk.Label(frame, text=label).grid(row=i//2, column=(i%2)*2, sticky=tk.W, padx=(0, 10), pady=2)
value_label = ttk.Label(frame, text="Not calculated", style='Info.TLabel')
value_label.grid(row=i//2, column=(i%2)*2+1, sticky=tk.W, padx=(0, 20), pady=2)
self.backup_info_labels[key] = value_label
def create_control_frame(self, parent, row):
"""Create control buttons"""
frame = ttk.Frame(parent)
frame.grid(row=row, column=0, columnspan=3, pady=20)
self.start_btn = ttk.Button(frame, text="▶️ Start Backup", command=self.start_backup, style='Accent.TButton')
self.start_btn.pack(side=tk.LEFT, padx=(0, 10))
self.stop_btn = ttk.Button(frame, text="⏹️ Stop Backup", command=self.stop_backup, state='disabled')
self.stop_btn.pack(side=tk.LEFT, padx=(0, 10))
self.verify_btn = ttk.Button(frame, text="✅ Verify Backup", command=self.verify_backup)
self.verify_btn.pack(side=tk.LEFT, padx=(0, 10))
borg_btn = ttk.Button(frame, text="Borg Backup", command=self.open_borg_config)
borg_btn.pack(side=tk.LEFT, padx=5)
def create_progress_frame(self, parent, row):
"""Create progress monitoring"""
frame = ttk.LabelFrame(parent, text="📈 Progress", padding="10")
frame.grid(row=row, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=5)
frame.columnconfigure(0, weight=1)
# Overall progress
ttk.Label(frame, text="Overall Progress:").grid(row=0, column=0, sticky=tk.W)
self.overall_progress = ttk.Progressbar(frame, mode='determinate', length=400)
self.overall_progress.grid(row=1, column=0, sticky=(tk.W, tk.E), pady=(5, 10))
# Current operation
self.current_operation = ttk.Label(frame, text="Ready to start backup", style='Info.TLabel')
self.current_operation.grid(row=2, column=0, sticky=tk.W)
# Time remaining
self.time_remaining = ttk.Label(frame, text="", style='Info.TLabel')
self.time_remaining.grid(row=3, column=0, sticky=tk.W)
def create_log_frame(self, parent, row):
"""Create log output"""
frame = ttk.LabelFrame(parent, text="📝 Log Output", padding="10")
frame.grid(row=row, column=0, columnspan=3, sticky=(tk.W, tk.E, tk.N, tk.S), pady=5)
frame.columnconfigure(0, weight=1)
frame.rowconfigure(0, weight=1)
parent.rowconfigure(row, weight=1)
self.log_text = tk.Text(frame, height=12, width=80, font=('Consolas', 9))
self.log_text.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
# Add initial message
self.log("LVM Backup Manager initialized")
self.log("Select source and target drives to begin")
def log(self, message):
"""Add message to log with timestamp - thread-safe"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_message = f"[{timestamp}] {message}"
self.log_queue.put(log_message)
def refresh_drives(self):
"""Scan for available LVM volume groups"""
try:
self.log("Scanning for LVM volume groups...")
# Get volume groups
result = subprocess.run(['sudo', 'vgs', '--noheadings', '-o', 'vg_name,vg_size,vg_free'],
capture_output=True, text=True, check=True)
vgs = []
for line in result.stdout.strip().split('\n'):
if line.strip():
parts = line.strip().split()
if len(parts) >= 3:
vg_name = parts[0]
vg_size = parts[1]
vg_free = parts[2]
vgs.append(f"{vg_name} ({vg_size} total, {vg_free} free)")
# Update comboboxes
self.source_combo['values'] = vgs
self.target_combo['values'] = vgs
# Set default selection for the first available VG if none selected
if vgs and not self.source_vg.get():
self.source_combo.set(vgs[0])
self.source_vg.set(vgs[0])
self.log(f"DEBUG: After refresh - source_vg = '{self.source_vg.get()}'")
self.log(f"DEBUG: Available VGs: {vgs}")
if vgs:
self.log(f"Found {len(vgs)} volume groups")
else:
self.log("No LVM volume groups found")
except subprocess.CalledProcessError as e:
self.log(f"Error scanning drives: {e}")
messagebox.showerror("Error", "Failed to scan for LVM volume groups. Make sure you have LVM installed and proper permissions.")
def on_combo_clicked(self, event, var):
"""Handle combobox click events"""
self.log(f"DEBUG: Combo clicked, current value: '{var.get()}'")
def on_combo_selected(self, event, var):
"""Handle combobox selection events"""
widget = event.widget
selection = widget.get()
var.set(selection)
self.log(f"DEBUG: Combo selected: '{selection}', var now: '{var.get()}'")
self.update_drive_info()
def on_selection_change(self, var, combo):
"""Handle combobox selection change"""
selected = combo.get()
var.set(selected)
self.log(f"DEBUG: Selected {selected} for {var}")
self.update_drive_info()
def on_combobox_click(self, combo):
"""Handle combobox click to ensure it's focused"""
combo.focus_set()
def update_drive_info(self):
"""Update drive information when selection changes"""
if not self.source_vg.get() or not self.target_vg.get():
return
try:
source_vg = self.source_vg.get().split()[0]
target_vg = self.target_vg.get().split()[0]
# Get detailed volume information
source_info = self.get_vg_details(source_vg)
target_info = self.get_vg_details(target_vg)
# Calculate backup information
self.calculate_backup_info(source_info, target_info)
except Exception as e:
self.log(f"Error updating drive info: {e}")
def get_vg_details(self, vg_name):
"""Get detailed information about a volume group"""
try:
# Get VG info
vg_result = subprocess.run(['sudo', 'vgs', vg_name, '--noheadings', '-o', 'vg_size,vg_free,vg_uuid'],
capture_output=True, text=True, check=True)
vg_parts = vg_result.stdout.strip().split()
# Get LV info
lv_result = subprocess.run(['sudo', 'lvs', vg_name, '--noheadings', '-o', 'lv_name,lv_size'],
capture_output=True, text=True, check=True)
volumes = []
total_lv_size = 0
for line in lv_result.stdout.strip().split('\n'):
if line.strip():
parts = line.strip().split()
if len(parts) >= 2:
lv_name = parts[0]
lv_size = parts[1]
volumes.append((lv_name, lv_size))
# Convert size to bytes for calculation
size_bytes = self.parse_size_to_bytes(lv_size)
total_lv_size += size_bytes
return {
'name': vg_name,
'total_size': vg_parts[0],
'free_size': vg_parts[1],
'uuid': vg_parts[2],
'volumes': volumes,
'total_lv_size_bytes': total_lv_size
}
except subprocess.CalledProcessError:
return None
def parse_size_to_bytes(self, size_str):
"""Parse LVM size string to bytes"""
size_str = size_str.strip()
multipliers = {'B': 1, 'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4}
# Extract number and unit (handle German locale with comma)
if size_str[-1].upper() in multipliers:
number_str = size_str[:-1].replace(',', '.')
number = float(number_str)
unit = size_str[-1].upper()
else:
number_str = size_str.replace(',', '.')
number = float(number_str)
unit = 'B'
return int(number * multipliers.get(unit, 1))
def calculate_backup_info(self, source_info, target_info):
"""Calculate and display backup information"""
if not source_info or not target_info:
return
# Calculate total size to backup
total_bytes = source_info['total_lv_size_bytes']
total_gb = total_bytes / (1024**3)
# Estimate time (based on typical speeds: 200-400 MB/s)
avg_speed_mbs = 250 # MB/s
est_seconds = total_bytes / (avg_speed_mbs * 1024 * 1024)
est_time = str(datetime.timedelta(seconds=int(est_seconds)))
# Update labels
self.backup_info_labels['total_size'].config(text=f"{total_gb:.1f} GB")
self.backup_info_labels['est_time'].config(text=est_time)
self.backup_info_labels['speed'].config(text=f"~{avg_speed_mbs} MB/s")
self.backup_info_labels['status'].config(text="Ready")
self.log(f"Backup calculation: {total_gb:.1f} GB, estimated {est_time}")
def start_backup(self):
"""Start the backup process"""
if not self.source_vg.get() or not self.target_vg.get():
messagebox.showerror("Error", "Please select both source and target drives")
return
source_vg = self.source_vg.get().split()[0]
target_vg = self.target_vg.get().split()[0]
if source_vg == target_vg:
messagebox.showerror("Error", "Source and target cannot be the same drive")
return
# Confirm backup
if not messagebox.askyesno("Confirm Backup",
f"This will overwrite all data on {target_vg}.\n\nAre you sure you want to continue?"):
return
# Update UI state
self.backup_running = True
self.start_btn.config(state='disabled')
self.stop_btn.config(state='normal')
self.overall_progress.config(value=0)
self.backup_info_labels['status'].config(text="🔄 Running... (Check external drive LED)")
# Start backup in thread
self.backup_thread = threading.Thread(target=self.run_backup, args=(source_vg, target_vg))
self.backup_thread.daemon = True
self.backup_thread.start()
def run_backup(self, source_vg, target_vg):
"""Run the actual backup process"""
try:
self.log(f"🚀 Starting backup: {source_vg}{target_vg}")
# Create backup script content directly
script_content = f'''#!/bin/bash
set -e
SOURCE_VG="{source_vg}"
TARGET_VG="{target_vg}"
SNAPSHOT_SIZE="2G"
log() {{
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
}}
# Clean up any existing snapshots
lvremove -f $SOURCE_VG/root-backup-snap 2>/dev/null || true
lvremove -f $SOURCE_VG/home-backup-snap 2>/dev/null || true
lvremove -f $SOURCE_VG/boot-backup-snap 2>/dev/null || true
log "Creating LVM snapshots..."
lvcreate -L $SNAPSHOT_SIZE -s -n root-backup-snap $SOURCE_VG/root
lvcreate -L $SNAPSHOT_SIZE -s -n home-backup-snap $SOURCE_VG/home
lvcreate -L 1G -s -n boot-backup-snap $SOURCE_VG/boot
log "SUCCESS Snapshots created"
log "Unmounting target volumes..."
umount /dev/$TARGET_VG/home 2>/dev/null || true
umount /dev/$TARGET_VG/root 2>/dev/null || true
umount /dev/$TARGET_VG/boot 2>/dev/null || true
log "Cloning root volume..."
dd if=/dev/$SOURCE_VG/root-backup-snap of=/dev/$TARGET_VG/root bs=64M status=progress 2>&1
log "SUCCESS Root volume cloned"
log "Cloning home volume..."
dd if=/dev/$SOURCE_VG/home-backup-snap of=/dev/$TARGET_VG/home bs=64M status=progress 2>&1
log "SUCCESS Home volume cloned"
log "Cloning boot volume..."
dd if=/dev/$SOURCE_VG/boot-backup-snap of=/dev/$TARGET_VG/boot bs=64M status=progress 2>&1
log "SUCCESS Boot volume cloned"
log "Cleaning up snapshots..."
lvremove -f $SOURCE_VG/root-backup-snap
lvremove -f $SOURCE_VG/home-backup-snap
lvremove -f $SOURCE_VG/boot-backup-snap
log "SUCCESS Backup completed successfully!"
'''
# Write script to temp file
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.sh', delete=False) as f:
f.write(script_content)
script_path = f.name
# Make script executable
os.chmod(script_path, 0o755)
# Update UI - starting
pass # UI update disabled (threading)
# Execute the backup script
self.backup_process = subprocess.Popen(
['sudo', 'bash', script_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
universal_newlines=True,
bufsize=1
)
# Monitor output in real-time
for line in iter(self.backup_process.stdout.readline, ''):
if not self.backup_running:
break
line = line.strip()
if line:
# Update UI from main thread
self.log(l)
# Update progress based on output
if 'SUCCESS' in line:
if 'Snapshots created' in line:
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'Root volume cloned' in line:
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'Home volume cloned' in line:
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'Boot volume cloned' in line:
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'Backup completed' in line:
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'Cloning' in line:
if 'root' in line.lower():
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'home' in line.lower():
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif 'boot' in line.lower():
pass # UI update disabled (threading)
pass # UI update disabled (threading)
elif any(unit in line for unit in ['MB/s', 'GB/s']):
# Parse dd progress for speed
speed_match = re.search(r'(\d+(?:\.\d+)?)\s*(MB|GB)/s', line)
if speed_match:
speed = speed_match.group(1)
unit = speed_match.group(2)
pass # UI update disabled (threading)
# Wait for process to complete
return_code = self.backup_process.wait()
# Clean up temp script
try:
os.unlink(script_path)
except:
pass
# Update UI based on result
success = return_code == 0
pass # backup_finished disabled (threading)
except Exception as e:
error_msg = f"❌ Backup failed: {str(e)}"
self.log(error_msg)
pass # backup_finished disabled (threading)
self.monitor_backup_progress()
except Exception as e:
self.log(f"Backup failed: {e}")
self.log("Backup failed or was interrupted")
def create_temp_script(self, original_script, source_vg, target_vg):
"""Create a temporary script with modified VG names"""
temp_script = '/tmp/lvm_backup_gui_temp.sh'
with open(original_script, 'r') as f:
content = f.read()
# Replace VG names
content = content.replace('SOURCE_VG="internal-vg"', f'SOURCE_VG="{source_vg}"')
content = content.replace('TARGET_VG="migration-vg"', f'TARGET_VG="{target_vg}"')
# Make it auto-answer 'y' to confirmation
content = content.replace('read -p "Are you sure you want to proceed?', 'echo "Auto-confirmed by GUI"; confirm="y"; #read -p "Are you sure you want to proceed?')
with open(temp_script, 'w') as f:
f.write(content)
os.chmod(temp_script, 0o755)
return temp_script
def monitor_backup_progress(self):
"""Monitor backup progress and update UI"""
if not self.backup_running or not self.backup_process:
return
try:
# Read output
line = self.backup_process.stdout.readline()
if line:
line = line.strip()
self.log(line)
# Parse progress from dd output
if 'kopiert' in line or 'copied' in line:
# Show disk activity in log for dd progress
self.log(f"💾⚡ {line} (External drive LED should be flashing)")
self.parse_dd_progress(line)
elif 'SUCCESS' in line:
if 'Root volume cloned' in line:
pass # Widget update disabled (threading)
pass # Widget update disabled (threading)
elif 'Home volume cloned' in line:
pass # Widget update disabled (threading)
pass # Widget update disabled (threading)
elif 'Boot volume cloned' in line:
pass # Widget update disabled (threading)
pass # Widget update disabled (threading)
elif 'Cloning' in line:
if 'root' in line.lower():
pass # Widget update disabled (threading)
elif 'home' in line.lower():
pass # Widget update disabled (threading)
elif 'boot' in line.lower():
pass # Widget update disabled (threading)
# Check if process is still running
if self.backup_process.poll() is None:
# Schedule next check
self.after(100, self.monitor_backup_progress)
else:
# Process finished
success = self.backup_process.returncode == 0
self.log("Backup failed or was interrupted")
except Exception as e:
self.log(f"Error monitoring progress: {e}")
self.log("Backup failed or was interrupted")
def parse_dd_progress(self, line):
"""Parse dd progress output"""
try:
# Look for speed information
if 'MB/s' in line:
speed_match = re.search(r'(\d+(?:\.\d+)?)\s*MB/s', line)
if speed_match:
speed = speed_match.group(1)
self.backup_info_labels['speed'].config(text=f"{speed} MB/s")
except:
pass
def backup_finished(self, success):
"""Handle backup completion"""
self.backup_running = False
self.start_btn.config(state='normal')
self.stop_btn.config(state='disabled')
if success:
self.overall_progress.config(value=100)
self.current_operation.config(text="✅ Backup completed successfully!")
self.backup_info_labels['status'].config(text="Completed")
self.log("🎉 Backup completed successfully!")
messagebox.showinfo("Success", "Backup completed successfully!")
else:
self.current_operation.config(text="❌ Backup failed")
self.backup_info_labels['status'].config(text="Failed")
self.log("❌ Backup failed")
messagebox.showerror("Error", "Backup failed. Check the log for details.")
# Clean up
if hasattr(self, 'backup_process'):
self.backup_process = None
def stop_backup(self):
"""Stop the running backup"""
if self.backup_process:
self.log("Stopping backup...")
self.backup_process.terminate()
self.backup_finished(False)
def verify_backup(self):
"""Verify the backup integrity"""
if not self.target_vg.get():
messagebox.showerror("Error", "Please select a target drive to verify")
return
target_vg = self.target_vg.get().split()[0]
self.log(f"Verifying backup on {target_vg}...")
def verify_thread():
try:
# Run filesystem checks
result = subprocess.run(['sudo', 'fsck', '-n', f'/dev/{target_vg}/root'],
capture_output=True, text=True)
if result.returncode == 0:
self.log("✅ Root filesystem verification passed")
else:
self.log("⚠️ Root filesystem verification issues detected")
result = subprocess.run(['sudo', 'fsck', '-n', f'/dev/{target_vg}/boot'],
capture_output=True, text=True)
if result.returncode == 0:
self.log("✅ Boot filesystem verification passed")
else:
self.log("⚠️ Boot filesystem verification issues detected")
self.log("Verification completed")
messagebox.showinfo("Verification", "Backup verification completed. Check log for details.")
except Exception as e:
self.log(f"Verification error: {e}")
messagebox.showerror("Error", f"Verification failed: {e}")
thread = threading.Thread(target=verify_thread)
thread.daemon = True
thread.start()
def open_borg_config(self):
"""Open Borg repository configuration dialog"""
# Check if source is selected
source_vg = self.source_vg.get()
self.log(f"DEBUG: source_vg value = '{source_vg}'")
self.log(f"DEBUG: source_combo current = '{self.source_combo.get()}'")
# Try to get value directly from combobox if StringVar is empty
if not source_vg and self.source_combo.get():
source_vg = self.source_combo.get()
self.source_vg.set(source_vg)
self.log(f"DEBUG: Fixed source_vg to '{source_vg}'")
if not source_vg or source_vg == "Select Volume Group":
messagebox.showerror("Error", "Please select a source drive first.")
return
BorgConfigDialog(self, self.start_borg_backup_with_config)
def start_borg_backup_with_config(self, repo_path, passphrase, create_new):
"""Start Borg backup with configuration from dialog"""
self.log(f"Starting Borg backup: repo={repo_path}, create_new={create_new}")
self.borg_repo_path = repo_path
self.borg_passphrase = passphrase
self.borg_create_new = create_new
# Start the backup process
threading.Thread(target=self._run_borg_backup, args=(repo_path, passphrase, create_new), daemon=True).start()
def start_borg_backup(self):
"""Start Borg backup of LVM snapshots to Nextcloud"""
# Check if borg is installed
try:
subprocess.run(['borg', '--version'], capture_output=True, check=True)
except (subprocess.CalledProcessError, FileNotFoundError):
messagebox.showerror("Borg Not Found",
"BorgBackup is not installed.\n\n" +
"Install with: sudo apt install borgbackup")
return
# Check if snapshots exist
source_vg = self.source_vg.get().split()[0]
if not source_vg:
messagebox.showerror("Error", "Please select source drive first")
return
snapshots = self.check_snapshots_exist(source_vg)
if not snapshots:
# Create snapshots if they don't exist
response = messagebox.askyesno("Create Snapshots",
"No snapshots found. Create them for Borg backup?")
if response:
self.create_snapshots_for_borg(source_vg)
else:
return
# Setup Borg repository
self.setup_borg_repo()
# Start Borg backup in thread
thread = threading.Thread(target=self.run_borg_backup, args=(source_vg,))
thread.daemon = True
thread.start()
def check_snapshots_exist(self, vg_name):
"""Check if snapshots exist for Borg backup"""
try:
result = subprocess.run(['lvs', vg_name], capture_output=True, text=True)
snapshots = []
for line in result.stdout.split('\n'):
if 'backup-snap' in line:
snapshots.append(line.split()[0])
return snapshots
except:
return []
def create_snapshots_for_borg(self, vg_name):
"""Create snapshots specifically for Borg backup"""
self.log("🔄 Creating snapshots for Borg backup...")
volumes = ['root', 'home', 'boot']
for vol in volumes:
try:
cmd = ['lvcreate', '-L', '2G', '-s', '-n', f'{vol}-backup-snap', f'{vg_name}/{vol}']
subprocess.run(cmd, check=True, capture_output=True)
self.log(f"✅ Created snapshot: {vol}-backup-snap")
except subprocess.CalledProcessError as e:
# Remove if exists and try again
subprocess.run(['lvremove', '-f', f'{vg_name}/{vol}-backup-snap'],
capture_output=True)
try:
subprocess.run(cmd, check=True, capture_output=True)
self.log(f"✅ Created snapshot: {vol}-backup-snap")
except:
self.log(f"❌ Failed to create snapshot: {vol}-backup-snap")
def setup_borg_repo(self):
"""Setup Borg repository in Nextcloud"""
repo_path = "/home/rwiegand/Nextcloud/backups/borg-repo"
# Create directory if it doesn't exist
os.makedirs(repo_path, exist_ok=True)
# Check if repo is initialized
if not os.path.exists(os.path.join(repo_path, "README")):
self.log("🔧 Initializing Borg repository...")
# Prompt for passphrase
from tkinter import simpledialog
passphrase = simpledialog.askstring("Borg Passphrase",
"Enter passphrase for Borg repository:",
show='*')
if not passphrase:
messagebox.showerror("Error", "Passphrase required for Borg repository")
return False
# Initialize repo
env = os.environ.copy()
env['BORG_PASSPHRASE'] = passphrase
try:
subprocess.run(['borg', 'init', '--encryption=repokey', repo_path],
env=env, check=True, capture_output=True)
self.log("✅ Borg repository initialized")
return True
except subprocess.CalledProcessError as e:
self.log(f"❌ Failed to initialize Borg repo: {e}")
return False
else:
self.log("✅ Borg repository already exists")
return True
def _run_borg_backup(self, repo_path, passphrase, create_new):
# Check if source is selected
source_vg = self.source_vg.get()
if not source_vg or source_vg == "Select Volume Group":
self.thread_log("Please select a source drive first")
return
self.thread_log(f"Starting Borg backup from {source_vg} to: {repo_path}")
# Set up environment for Borg
env = os.environ.copy()
if passphrase:
env['BORG_PASSPHRASE'] = passphrase
# Create temporary snapshot for backup
vg_name = source_vg.split()[0] # Extract VG name from "vg-name (size info)"
snapshot_name = "borg-backup-persistent" # Use persistent name
snapshot_lv = f"/dev/{vg_name}/{snapshot_name}"
# Debug: Show VG structure
try:
vg_info = subprocess.run(['vgs', '--noheadings', '-o', 'vg_name,vg_size,vg_free', vg_name],
capture_output=True, text=True, check=True)
self.thread_log(f"VG Info: {vg_info.stdout.strip()}")
lv_info = subprocess.run(['lvs', '--noheadings', '-o', 'lv_name,lv_size', vg_name],
capture_output=True, text=True, check=True)
self.thread_log(f"LVs in {vg_name}:")
for line in lv_info.stdout.strip().split('\n'):
if line.strip():
self.thread_log(f" {line.strip()}")
except:
pass
# Clean up any existing backup snapshots to free space
self.thread_log("Cleaning up existing backup snapshots...")
try:
existing_snaps = subprocess.run(['lvs', '--noheadings', '-o', 'lv_name', vg_name],
capture_output=True, text=True, check=True)
freed_space = 0
for line in existing_snaps.stdout.strip().split('\n'):
lv_name = line.strip()
if ('backup-snap' in lv_name or 'borg-backup' in lv_name) and lv_name != snapshot_name:
try:
subprocess.run(['lvremove', '-f', f'/dev/{vg_name}/{lv_name}'],
capture_output=True, check=True)
self.thread_log(f"Removed old snapshot: {lv_name}")
freed_space += 1
except subprocess.CalledProcessError:
pass
if freed_space > 0:
self.thread_log(f"Freed up space by removing {freed_space} old snapshots")
except subprocess.CalledProcessError:
pass
# Check if persistent snapshot already exists
snapshot_exists = False
existing_snapshot_path = None
try:
result = subprocess.run(['lvs', snapshot_lv], capture_output=True, check=True)
# Check snapshot size - if it's less than 8GB, remove it for recreation
size_result = subprocess.run(['lvs', '--noheadings', '-o', 'lv_size', '--units', 'g', snapshot_lv],
capture_output=True, text=True, check=True)
size_gb = float(size_result.stdout.strip().replace('g', '').replace(',', '.'))
if size_gb < 8.0: # Less than 8GB - too small for disaster recovery
self.thread_log(f"Existing snapshot too small ({size_gb:.1f}GB) - removing for multi-LV backup")
subprocess.run(['lvremove', '-f', snapshot_lv], capture_output=True, check=True)
snapshot_exists = False
else:
snapshot_exists = True
existing_snapshot_path = snapshot_lv
self.thread_log(f"Found existing persistent snapshot: {snapshot_lv} ({size_gb:.1f}GB)")
# If we have large existing snapshot, we'll use it as single source for now
backup_sources = [snapshot_lv]
created_snapshots = [snapshot_name]
except subprocess.CalledProcessError:
pass
# Clean up OTHER backup snapshots (but preserve our persistent one if it exists)
if not snapshot_exists: # Only cleanup if we need to create a new snapshot
self.thread_log("Cleaning up old backup snapshots to free space...")
try:
existing_snaps = subprocess.run(['lvs', '--noheadings', '-o', 'lv_name', vg_name],
capture_output=True, text=True, check=True)
removed_count = 0
for line in existing_snaps.stdout.strip().split('\n'):
lv_name = line.strip()
# Remove old backup snapshots but NOT our persistent snapshot
if lv_name and ('backup-snap' in lv_name or lv_name.endswith('-snap')) and lv_name != snapshot_name:
try:
subprocess.run(['lvremove', '-f', f'/dev/{vg_name}/{lv_name}'],
capture_output=True, check=True)
self.thread_log(f"Removed old snapshot: {lv_name}")
removed_count += 1
except subprocess.CalledProcessError:
pass
if removed_count > 0:
self.thread_log(f"Freed up space by removing {removed_count} old snapshots")
except subprocess.CalledProcessError:
self.thread_log("Could not list existing snapshots")
# Initialize variables for multi-LV backup (only if no existing snapshot)
if not snapshot_exists:
backup_sources = []
created_snapshots = []
if not snapshot_exists:
try:
# Create snapshot of root volume with dynamic sizing
self.thread_log("Creating temporary snapshot for Borg backup...")
# Get available space in VG
vg_result = subprocess.run(['vgs', '--noheadings', '-o', 'vg_free', '--units', 'm', vg_name],
capture_output=True, text=True, check=True)
free_mb = float(vg_result.stdout.strip().replace('m', '').replace(',', '.'))
# Find the largest LV to determine appropriate snapshot size
lvs_result = subprocess.run(['lvs', '--noheadings', '-o', 'lv_name,lv_size', '--units', 'b', vg_name],
capture_output=True, text=True, check=True)
largest_lv = None
largest_size = 0
for line in lvs_result.stdout.strip().split('\n'):
if line.strip():
parts = line.strip().split()
if len(parts) >= 2:
lv_name = parts[0]
lv_size = int(parts[1].replace('B', ''))
if lv_size > largest_size:
largest_size = lv_size
largest_lv = lv_name
if not largest_lv:
self.thread_log("No logical volumes found")
return
largest_lv_size_gb = largest_size / (1024**3)
# Use larger snapshot sizes for big LVs
if largest_lv_size_gb >= 100: # 100GB+
if free_mb >= 10240: # 10GB available
snapshot_size = "10G"
elif free_mb >= 5120: # 5GB available
snapshot_size = "5G"
elif free_mb >= 2048: # 2GB available
snapshot_size = "2G"
elif free_mb >= 1024: # 1GB available
snapshot_size = "1G"
else:
self.thread_log(f"Not enough free space for large LV snapshot. Available: {free_mb:.0f}MB, need at least 1GB")
return
else:
# For smaller LVs, use the original logic
if free_mb >= 2048:
snapshot_size = "2G"
elif free_mb >= 1024:
snapshot_size = "1G"
elif free_mb >= 512:
snapshot_size = "512M"
elif free_mb >= 256:
snapshot_size = "256M"
else:
self.thread_log(f"Not enough free space for snapshot. Available: {free_mb:.0f}MB, need at least 256MB")
return
self.thread_log(f"Creating {snapshot_size} snapshot (available: {free_mb:.0f}MB)")
# Backup ALL important LVs for complete disaster recovery
important_lvs = []
total_lv_size = 0
for line in lvs_result.stdout.strip().split('\n'):
if line.strip():
parts = line.strip().split()
if len(parts) >= 2:
lv_name = parts[0]
lv_size = int(parts[1].replace('B', ''))
# Include all LVs except snapshots and swap
if ('snap' not in lv_name.lower() and 'swap' not in lv_name.lower() and
lv_size > 100 * 1024 * 1024): # > 100MB
important_lvs.append((lv_name, lv_size))
total_lv_size += lv_size
if not important_lvs:
self.thread_log("No suitable logical volumes found for backup")
return
self.thread_log(f"Complete disaster recovery backup - {len(important_lvs)} LVs:")
for lv_name, lv_size in important_lvs:
self.thread_log(f" {lv_name}: {lv_size / (1024**3):.1f}GB")
self.thread_log(f"Total data: {total_lv_size / (1024**3):.1f}GB")
# Calculate snapshot requirements
snapshots_needed = []
total_snapshot_mb = 0
# Check if we have enough space for minimum viable backup
min_required_mb = len(important_lvs) * 256 # Absolute minimum 256MB per LV
available_mb = free_mb * 0.95
if available_mb < min_required_mb:
self.thread_log(f"❌ Insufficient space for disaster recovery backup!")
self.thread_log(f" Available: {available_mb:.0f}MB")
self.thread_log(f" Minimum required: {min_required_mb}MB")
self.thread_log(f"💡 Suggestions:")
self.thread_log(f" • Free up at least {min_required_mb - available_mb:.0f}MB in volume group")
self.thread_log(f" • Use external storage for backup")
self.thread_log(f" • Consider file-level backup instead of block-level")
return
for lv_name, lv_size in important_lvs:
# Use enhanced snapshot sizing with space-aware allocation
lv_size_gb = lv_size / (1024**3)
# Calculate proportional allocation of available space
remaining_lvs = len(important_lvs) - len(snapshots_needed)
if remaining_lvs > 0:
remaining_space = available_mb - total_snapshot_mb
proportional_share = remaining_space / remaining_lvs
# Apply intelligent sizing with space constraints
if lv_size_gb > 300: # Large partitions - need adequate space but respect limits
min_viable = 1024 # 1GB minimum for large partitions
max_reasonable = min(8192, remaining_space * 0.7) # Max 8GB or 70% of remaining space
if remaining_space >= min_viable and proportional_share >= min_viable:
snap_mb = int(min(proportional_share, max_reasonable))
elif remaining_space >= min_viable:
snap_mb = min_viable
else:
# Critical space shortage - need to trigger home shrinking later
snap_mb = max(256, int(remaining_space * 0.9))
elif lv_size_gb > 50:
min_viable = 512 # 512MB minimum for medium partitions
max_reasonable = min(2048, remaining_space * 0.5)
snap_mb = max(min_viable, int(min(proportional_share, max_reasonable)))
else:
min_viable = 256 # 256MB minimum for small partitions
max_reasonable = min(1024, remaining_space * 0.3)
snap_mb = max(min_viable, int(min(proportional_share, max_reasonable)))
# Convert to size string
if snap_mb >= 1024:
snap_size = f"{snap_mb // 1024}G"
else:
snap_size = f"{snap_mb}M"
else:
# Fallback
snap_size = "256M"
snap_mb = 256
snapshots_needed.append((lv_name, lv_size, snap_size, snap_mb))
total_snapshot_mb += snap_mb
# Check if we need more space and can temporarily shrink home LV
shrunk_home = False
original_home_size_gb = None
# Final space check and warnings
self.thread_log(f"📊 Total snapshots needed: {total_snapshot_mb}MB ({total_snapshot_mb/1024:.1f}GB)")
self.thread_log(f"📊 Available space: {free_mb}MB ({free_mb/1024:.1f}GB)")
if total_snapshot_mb > free_mb:
space_shortage_mb = total_snapshot_mb - free_mb
self.thread_log(f"⚠️ Need {space_shortage_mb}MB ({space_shortage_mb/1024:.1f}GB) more space for snapshots")
# Check if space is critically low
if free_mb < 5120: # Less than 5GB available - raised threshold
self.thread_log(f"🚨 CRITICAL: Low free space ({free_mb}MB)!")
self.thread_log(f" Large home partition (404GB) needs substantial snapshot space")
# For large home partitions, aggressive shrinking is needed
minimum_needed_gb = max(15, total_snapshot_mb // 1024 + 8) # At least 15GB or calculated need + 8GB buffer
self.thread_log(f" Minimum {minimum_needed_gb}GB needed for viable snapshots of large partitions")
# Check if we can shrink home LV temporarily (more aggressive for large home)
home_found = False
for lv_name, lv_size in important_lvs:
if lv_name == 'home':
home_found = True
home_size_gb = lv_size // (1024**3)
self.thread_log(f"🏠 Home LV found: {home_size_gb}GB")
if lv_size > 30 * (1024**3): # home > 30GB
# For very large home partitions (>400GB), be very aggressive
if home_size_gb > 400:
base_shrink = 40 # Start with 40GB shrink for huge home
max_shrink = 80 # Up to 80GB max
elif home_size_gb > 200:
base_shrink = 25 # 25GB for large home
max_shrink = 50
else:
base_shrink = 15 # 15GB for medium home
max_shrink = 30
if free_mb < 5120: # Very low space - aggressive shrinking
shrink_gb = min(max_shrink, max(base_shrink, (space_shortage_mb + 8192) // 1024)) # Add 8GB buffer
else:
shrink_gb = min(max_shrink // 2, (space_shortage_mb + 2048) // 1024) # Normal: Add 2GB buffer
self.thread_log(f"🔧 Attempting to temporarily shrink {home_size_gb}GB home LV by {shrink_gb}GB...")
try:
original_home_size_gb = home_size_gb
new_size_gb = original_home_size_gb - shrink_gb
if new_size_gb < 50: # Safety check - don't go below 50GB
self.thread_log(f"⚠️ Cannot shrink home below 50GB (would be {new_size_gb}GB)")
continue
self.thread_log(f"📝 Home LV: {original_home_size_gb}GB → {new_size_gb}GB (temporary for backup)")
# Shrink the LV (reduce logical volume size, not filesystem)
cmd = ["lvreduce", "-L", f"{new_size_gb}G", f"/dev/{vg_name}/home", "--yes"]
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
shrunk_home = True
self.thread_log(f"✅ Home LV temporarily shrunk from {original_home_size_gb}GB to {new_size_gb}GB")
self.thread_log(f"🔓 This freed up {shrink_gb}GB for larger snapshots")
# Recalculate free space and snapshots
vgs_result = subprocess.run(['vgs', '--noheadings', '-o', 'vg_free', '--units', 'b', vg_name],
capture_output=True, text=True, check=True)
new_free_mb = int(vgs_result.stdout.strip().replace('B', '')) // (1024*1024)
self.thread_log(f"📊 New available space: {new_free_mb}MB ({new_free_mb/1024:.1f}GB)")
# Update the free space for the rest of the calculation
free_mb = new_free_mb
# Recalculate snapshot sizes with much more generous allocation
self.thread_log(f"🔄 Recalculating snapshot allocation with {shrink_gb}GB additional space...")
snapshots_needed = []
total_snapshot_mb = 0
for lv_name2, lv_size2 in important_lvs:
lv_size_gb2 = lv_size2 // (1024**3)
# Much more generous allocation with freed space
if lv_name2 == 'home':
# For large home, allocate much more space
snap_mb = min(new_free_mb * 0.75, 20480) # Up to 75% or 20GB max for home
self.thread_log(f"🏠 Home gets generous allocation: {snap_mb}MB ({snap_mb/1024:.1f}GB)")
elif lv_name2 == 'root':
snap_mb = min(new_free_mb * 0.15, 4096) # Up to 15% or 4GB for root
else:
snap_mb = min(new_free_mb * 0.1, 2048) # Up to 10% or 2GB for others
snap_size = snap_mb * (1024*1024)
self.thread_log(f"📝 {lv_name2}: allocated {snap_mb}MB ({snap_mb/1024:.1f}GB) snapshot")
snapshots_needed.append((lv_name2, lv_size2, snap_size, snap_mb))
total_snapshot_mb += snap_mb
break
except subprocess.CalledProcessError as e:
self.thread_log(f"⚠️ Could not shrink home LV: {e.stderr.decode() if e.stderr else str(e)}")
self.thread_log("Proceeding with available space...")
else:
self.thread_log(f"⚠️ Cannot shrink home LV (size: {home_size_gb}GB, minimum 30GB required)")
break
if not home_found:
self.thread_log(f"⚠️ No home LV found for shrinking")
# Special handling for large home partitions - always check if more space needed
has_large_home = False
for lv_name, lv_size in important_lvs:
if lv_name == 'home' and lv_size > 400 * (1024**3): # 400GB+ home
has_large_home = True
self.thread_log(f"🏠 Large home partition detected: {lv_size // (1024**3)}GB")
break
# For large home partitions, we need more space even if basic calculation fits
space_adequate = True
if has_large_home and free_mb < 15360: # Less than 15GB for large home
space_adequate = False
self.thread_log(f"🚨 Large home partition needs more snapshot space (have {free_mb}MB, recommend 15GB+)")
# Final space validation (enhanced for large partitions)
if total_snapshot_mb > free_mb or not space_adequate:
if total_snapshot_mb <= free_mb:
# We have basic space but not adequate for large partitions
space_shortage_mb = 15360 - free_mb # Need 15GB for large home
self.thread_log(f"⚠️ Basic space available but need {space_shortage_mb}MB ({space_shortage_mb/1024:.1f}GB) more for large partition reliability")
else:
# We don't even have basic space
space_shortage_mb = total_snapshot_mb - free_mb
self.thread_log(f"⚠️ Need {space_shortage_mb}MB ({space_shortage_mb/1024:.1f}GB) more space for snapshots")
# Try to shrink home LV to create adequate space
shrinking_attempted = False
self.thread_log(f"🔍 DEBUG: Starting shrinking attempt loop")
for lv_name, lv_size in important_lvs:
self.thread_log(f"🔍 DEBUG: Checking LV {lv_name}, size {lv_size // (1024**3)}GB")
if lv_name == 'home' and lv_size > 30 * (1024**3): # home > 30GB
shrinking_attempted = True
home_size_gb = lv_size // (1024**3)
self.thread_log(f"🏠 Home LV found: {home_size_gb}GB")
# For very large home partitions (>400GB), be very aggressive
if home_size_gb > 400:
base_shrink = 40 # Start with 40GB shrink for huge home
max_shrink = 80 # Up to 80GB max
elif home_size_gb > 200:
base_shrink = 25 # 25GB for large home
max_shrink = 50
else:
base_shrink = 15 # 15GB for medium home
max_shrink = 30
# Calculate shrink amount based on need
if not space_adequate: # Large partition needs more space
shrink_gb = min(max_shrink, max(base_shrink, (space_shortage_mb + 5120) // 1024)) # Add 5GB buffer
else:
shrink_gb = min(max_shrink // 2, (space_shortage_mb + 2048) // 1024) # Normal case
self.thread_log(f"🔧 Attempting to temporarily shrink {home_size_gb}GB home LV by {shrink_gb}GB...")
try:
original_home_size_gb = home_size_gb
new_size_gb = original_home_size_gb - shrink_gb
if new_size_gb < 50: # Safety check - don't go below 50GB
self.thread_log(f"⚠️ Cannot shrink home below 50GB (would be {new_size_gb}GB)")
break
self.thread_log(f"📝 Home LV: {original_home_size_gb}GB → {new_size_gb}GB (temporary for backup)")
# Shrink the LV (reduce logical volume size, not filesystem)
cmd = ["lvreduce", "-L", f"{new_size_gb}G", f"/dev/{vg_name}/home", "--yes"]
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
shrunk_home = True
self.thread_log(f"✅ Home LV temporarily shrunk from {original_home_size_gb}GB to {new_size_gb}GB")
self.thread_log(f"🔓 This freed up {shrink_gb}GB for larger snapshots")
# Recalculate free space and snapshots
vgs_result = subprocess.run(['vgs', '--noheadings', '-o', 'vg_free', '--units', 'b', vg_name],
capture_output=True, text=True, check=True)
new_free_mb = int(vgs_result.stdout.strip().replace('B', '')) // (1024*1024)
self.thread_log(f"📊 New available space: {new_free_mb}MB ({new_free_mb/1024:.1f}GB)")
# Update the free space for the rest of the calculation
free_mb = new_free_mb
# Recalculate snapshot sizes with much more generous allocation
self.thread_log(f"🔄 Recalculating snapshot allocation with {shrink_gb}GB additional space...")
snapshots_needed = []
total_snapshot_mb = 0
for lv_name2, lv_size2 in important_lvs:
lv_size_gb2 = lv_size2 // (1024**3)
# Much more generous allocation with freed space
if lv_name2 == 'home':
# For large home, allocate much more space
snap_mb = min(new_free_mb * 0.75, 20480) # Up to 75% or 20GB max for home
self.thread_log(f"🏠 Home gets generous allocation: {snap_mb}MB ({snap_mb/1024:.1f}GB)")
elif lv_name2 == 'root':
snap_mb = min(new_free_mb * 0.15, 4096) # Up to 15% or 4GB for root
else:
snap_mb = min(new_free_mb * 0.1, 2048) # Up to 10% or 2GB for others
snap_size = snap_mb * (1024*1024)
self.thread_log(f"📝 {lv_name2}: allocated {snap_mb}MB ({snap_mb/1024:.1f}GB) snapshot")
snapshots_needed.append((lv_name2, lv_size2, snap_size, snap_mb))
total_snapshot_mb += snap_mb
break
except subprocess.CalledProcessError as e:
self.thread_log(f"⚠️ Could not shrink home LV: {e.stderr.decode() if e.stderr else str(e)}")
self.thread_log("Proceeding with available space...")
shrunk_home = False
break # Only break after processing home LV
# If shrinking wasn't attempted or failed, show final error
if not shrinking_attempted or not shrunk_home:
shortage = total_snapshot_mb - free_mb if total_snapshot_mb > free_mb else space_shortage_mb
self.thread_log(f"❌ Could not create adequate snapshot space.")
self.thread_log(f"📊 Need: {total_snapshot_mb}MB ({total_snapshot_mb/1024:.1f}GB)")
self.thread_log(f"📊 Have: {free_mb}MB ({free_mb/1024:.1f}GB)")
if shortage > 0:
self.thread_log(f"📊 Short: {shortage}MB ({shortage/1024:.1f}GB)")
self.thread_log(f"💡 Consider freeing up more space in volume group or reducing system activity")
return
self.thread_log(f"✅ Space validation passed: {total_snapshot_mb}MB needed, {free_mb}MB available")
# Show space allocation summary
self.thread_log(f"📊 Snapshot space allocation: {total_snapshot_mb}MB of {free_mb}MB available ({total_snapshot_mb/free_mb*100:.1f}%)")
# Create snapshots for all LVs
created_snapshots = []
backup_sources = []
for lv_name, lv_size, snap_size, snap_mb in snapshots_needed:
snap_name = f"borg-backup-{lv_name}"
snap_path = f"/dev/{vg_name}/{snap_name}"
try:
cmd = ["lvcreate", "-L", snap_size, "-s", "-n", snap_name, f"/dev/{vg_name}/{lv_name}"]
subprocess.run(cmd, check=True, capture_output=True)
created_snapshots.append(snap_name)
backup_sources.append(snap_path)
self.thread_log(f"Created {snap_size} snapshot: {snap_name} -> {snap_path}")
except subprocess.CalledProcessError as e:
self.thread_log(f"Failed to create snapshot for {lv_name}: {e.stderr.decode() if e.stderr else str(e)}")
# Clean up any snapshots created so far
for cleanup_snap in created_snapshots:
try:
subprocess.run(["lvremove", "-f", f"/dev/{vg_name}/{cleanup_snap}"], capture_output=True)
except:
pass
return
self.thread_log(f"All snapshots created successfully! Ready for disaster recovery backup.")
except subprocess.CalledProcessError as e:
self.thread_log(f"Failed to create snapshot: {e.stderr.decode() if e.stderr else str(e)}")
return
# Get total size for verification
total_backup_size = 0
for source in backup_sources:
try:
lv_result = subprocess.run(['lvs', '--noheadings', '-o', 'lv_size', '--units', 'b', source],
capture_output=True, text=True, check=True)
lv_size_bytes = int(lv_result.stdout.strip().replace('B', ''))
total_backup_size += lv_size_bytes
except subprocess.CalledProcessError:
pass
total_backup_gb = total_backup_size / (1024**3)
self.thread_log(f"Disaster recovery backup: {len(backup_sources)} LVs, ~{total_backup_gb:.1f}GB total")
# For resume capability, use date-based archive name but check for interrupted archives
today = datetime.datetime.now().strftime('%Y%m%d')
base_archive_name = f"lvm-backup-{vg_name}-{today}"
# Check for interrupted archive from today that can be resumed
resume_archive = None
try:
list_result = subprocess.run(['borg', 'list', '--short', repo_path], env=env, capture_output=True, text=True, check=True)
archives = list_result.stdout.strip().split('\n')
# Look for today's archive
for archive in archives:
if archive.startswith(base_archive_name):
# Check if archive is complete or interrupted
info_result = subprocess.run(['borg', 'info', f'{repo_path}::{archive}'],
env=env, capture_output=True, text=True)
if 'This archive:' in info_result.stdout:
resume_archive = archive
self.thread_log(f"Found today's archive: {archive} - will create new version")
break
except subprocess.CalledProcessError:
pass
# Create unique archive name
if resume_archive:
# Create next version
version = 1
while f"{base_archive_name}-v{version}" in archives:
version += 1
archive_name = f"{base_archive_name}-v{version}"
else:
archive_name = base_archive_name
env = os.environ.copy()
if passphrase:
env['BORG_PASSPHRASE'] = passphrase
# Optionally initialize repo
if create_new and not os.path.exists(os.path.join(repo_path, 'config')):
self.thread_log("Initializing new Borg repository...")
init_cmd = ["borg", "init", "--encryption=repokey", repo_path]
try:
subprocess.run(init_cmd, env=env, check=True, capture_output=True)
self.thread_log("Repository initialized.")
except subprocess.CalledProcessError as e:
self.thread_log(f"Failed to initialize repo: {e.stderr.decode()}")
return
# Check if archive already exists (for resume)
try:
list_result = subprocess.run(['borg', 'list', repo_path], env=env, capture_output=True, text=True)
if archive_name in list_result.stdout:
self.thread_log(f"Archive {archive_name} exists - this will resume or create new version")
else:
self.thread_log(f"Creating new archive: {archive_name}")
except subprocess.CalledProcessError:
self.thread_log("Could not check existing archives - proceeding with backup")
# Run borg create with all snapshot sources for complete disaster recovery
borg_cmd = [
"borg", "create", f"{repo_path}::{archive_name}"
] + backup_sources + [
"--progress", "--stats", "--read-special",
"--checkpoint-interval", "300", # Save checkpoint every 5 minutes
"--chunker-params", "19,23,21,4095" # Optimized for large files/devices
]
self.thread_log(f"Running: {' '.join(borg_cmd)}")
io_error_detected = False
try:
proc = subprocess.Popen(borg_cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
for line in proc.stdout:
self.thread_log(line.strip())
# Detect I/O errors that might indicate snapshot is too small
if "Input/output error" in line or "read: [Errno 5]" in line:
io_error_detected = True
self.thread_log("🚨 I/O error detected - snapshot may be too small for active filesystem")
proc.wait()
if proc.returncode == 0:
self.thread_log("✅ Disaster recovery backup completed successfully!")
# Fix ownership for Nextcloud access
self.thread_log("Fixing repository ownership for Nextcloud access...")
try:
# Get the original user (not root)
original_user = os.environ.get('SUDO_USER', 'rwiegand')
# Change ownership of entire repository to original user
subprocess.run(["chown", "-R", f"{original_user}:{original_user}", repo_path],
capture_output=True, check=True)
self.thread_log(f"Repository ownership changed to {original_user}")
except subprocess.CalledProcessError as e:
self.thread_log(f"Warning: Could not fix ownership: {e}")
# Only remove snapshots on successful completion
try:
self.thread_log("Removing all snapshots after successful backup...")
for snap_name in created_snapshots:
try:
subprocess.run(["lvremove", "-f", f"/dev/{vg_name}/{snap_name}"], capture_output=True, check=True)
self.thread_log(f"Removed snapshot: {snap_name}")
except subprocess.CalledProcessError as e:
self.thread_log(f"Warning: Could not remove snapshot {snap_name}: {e}")
# Restore home LV to original size if it was shrunk
if shrunk_home and original_home_size_gb:
self.thread_log(f"🔧 Restoring home LV to original size ({original_home_size_gb}GB)...")
try:
cmd = ["lvextend", "-L", f"{original_home_size_gb}G", f"/dev/{vg_name}/home"]
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
self.thread_log(f"✅ Home LV restored to {original_home_size_gb}GB")
except subprocess.CalledProcessError as e:
self.thread_log(f"⚠️ Could not restore home LV size: {e.stderr.decode() if e.stderr else str(e)}")
self.thread_log(f"You may need to manually restore with: lvextend -L {original_home_size_gb}G /dev/{vg_name}/home")
except Exception as e:
self.thread_log(f"Warning during snapshot cleanup: {e}")
else:
if io_error_detected:
self.thread_log("❌ Backup failed due to I/O errors - likely snapshots too small")
self.thread_log("💡 Recommendation: Increase available space in volume group or reduce system activity during backup")
self.thread_log("📋 Current snapshots will be kept for manual cleanup or retry")
else:
self.thread_log("❌ Disaster recovery backup failed - keeping snapshots for resume")
except Exception as e:
self.thread_log(f"Exception: {e}")
self.thread_log("Keeping snapshot for potential resume")
# Note: Snapshot is intentionally NOT removed on failure to allow resume
def main():
"""Main entry point"""
# Check if running as root
if os.geteuid() != 0:
messagebox.showerror("Permission Error",
"This application requires root privileges.\n\n" +
"Please run with: sudo python3 lvm_backup_gui.py")
return
root = tk.Tk()
app = LVMBackupGUI(root)
root.mainloop()
if __name__ == "__main__":
main()