aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src-tauri
diff options
context:
space:
mode:
authorHsiangNianian <i@jyunko.cn>2026-01-18 13:43:12 +0800
committerHsiangNianian <i@jyunko.cn>2026-01-18 13:43:12 +0800
commit17e8dd78ca5b7aae9baa4f86d38fa755c8af21c5 (patch)
tree1fc3c9adb05264b12ed0fa473b997bfca14716e6 /src-tauri
parent02520ca62ac5e508e8748b2445171be64f459b6c (diff)
downloadDropOut-17e8dd78ca5b7aae9baa4f86d38fa755c8af21c5.tar.gz
DropOut-17e8dd78ca5b7aae9baa4f86d38fa755c8af21c5.zip
feat(migration): implement shared cache migration with SHA1 dedup
- Add migrate_to_shared_caches() with hard link preference - SHA1-based deduplication across all instances - Copy fallback for cross-filesystem scenarios - Auto-enable use_shared_caches after successful migration - UI shows statistics: moved files, hardlinks/copies, MB saved
Diffstat (limited to 'src-tauri')
-rw-r--r--src-tauri/src/core/instance.rs224
-rw-r--r--src-tauri/src/main.rs52
2 files changed, 275 insertions, 1 deletions
diff --git a/src-tauri/src/core/instance.rs b/src-tauri/src/core/instance.rs
index 738dbd8..183e1cc 100644
--- a/src-tauri/src/core/instance.rs
+++ b/src-tauri/src/core/instance.rs
@@ -6,6 +6,7 @@
//! - Support for instance switching and isolation
use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
@@ -344,3 +345,226 @@ pub fn migrate_legacy_data(
Ok(())
}
+
+/// Migrate instance caches to shared global caches
+///
+/// This function deduplicates versions, libraries, and assets from all instances
+/// into a global shared cache. It prefers hard links (instant, zero-copy) and
+/// falls back to copying if hard links are not supported.
+///
+/// # Arguments
+/// * `app_handle` - Tauri app handle
+/// * `instance_state` - Instance state management
+///
+/// # Returns
+/// * `Ok((moved_count, hardlink_count, copy_count, saved_bytes))` on success
+/// * `Err(String)` on failure
+pub fn migrate_to_shared_caches(
+ app_handle: &AppHandle,
+ instance_state: &InstanceState,
+) -> Result<(usize, usize, usize, u64), String> {
+ let app_dir = app_handle.path().app_data_dir().unwrap();
+
+ // Global shared cache directories
+ let global_versions = app_dir.join("versions");
+ let global_libraries = app_dir.join("libraries");
+ let global_assets = app_dir.join("assets");
+
+ // Create global cache directories
+ std::fs::create_dir_all(&global_versions).map_err(|e| e.to_string())?;
+ std::fs::create_dir_all(&global_libraries).map_err(|e| e.to_string())?;
+ std::fs::create_dir_all(&global_assets).map_err(|e| e.to_string())?;
+
+ let mut total_moved = 0;
+ let mut hardlink_count = 0;
+ let mut copy_count = 0;
+ let mut saved_bytes = 0u64;
+
+ // Get all instances
+ let instances = instance_state.list_instances();
+
+ for instance in instances {
+ let instance_versions = instance.game_dir.join("versions");
+ let instance_libraries = instance.game_dir.join("libraries");
+ let instance_assets = instance.game_dir.join("assets");
+
+ // Migrate versions
+ if instance_versions.exists() {
+ let (moved, hardlinks, copies, bytes) =
+ deduplicate_directory(&instance_versions, &global_versions)?;
+ total_moved += moved;
+ hardlink_count += hardlinks;
+ copy_count += copies;
+ saved_bytes += bytes;
+ }
+
+ // Migrate libraries
+ if instance_libraries.exists() {
+ let (moved, hardlinks, copies, bytes) =
+ deduplicate_directory(&instance_libraries, &global_libraries)?;
+ total_moved += moved;
+ hardlink_count += hardlinks;
+ copy_count += copies;
+ saved_bytes += bytes;
+ }
+
+ // Migrate assets
+ if instance_assets.exists() {
+ let (moved, hardlinks, copies, bytes) =
+ deduplicate_directory(&instance_assets, &global_assets)?;
+ total_moved += moved;
+ hardlink_count += hardlinks;
+ copy_count += copies;
+ saved_bytes += bytes;
+ }
+ }
+
+ Ok((total_moved, hardlink_count, copy_count, saved_bytes))
+}
+
+/// Deduplicate a directory tree into a global cache
+///
+/// Recursively processes all files, checking SHA1 hashes for deduplication.
+/// Returns (total_moved, hardlink_count, copy_count, saved_bytes)
+fn deduplicate_directory(
+ source_dir: &Path,
+ dest_dir: &Path,
+) -> Result<(usize, usize, usize, u64), String> {
+ let mut moved = 0;
+ let mut hardlinks = 0;
+ let mut copies = 0;
+ let mut saved_bytes = 0u64;
+
+ // Build a hash map of existing files in dest (hash -> path)
+ let mut dest_hashes: HashMap<String, PathBuf> = HashMap::new();
+ if dest_dir.exists() {
+ index_directory_hashes(dest_dir, dest_dir, &mut dest_hashes)?;
+ }
+
+ // Process source directory
+ process_directory_for_migration(
+ source_dir,
+ source_dir,
+ dest_dir,
+ &dest_hashes,
+ &mut moved,
+ &mut hardlinks,
+ &mut copies,
+ &mut saved_bytes,
+ )?;
+
+ Ok((moved, hardlinks, copies, saved_bytes))
+}
+
+/// Index all files in a directory by their SHA1 hash
+fn index_directory_hashes(
+ dir: &Path,
+ base: &Path,
+ hashes: &mut HashMap<String, PathBuf>,
+) -> Result<(), String> {
+ if !dir.is_dir() {
+ return Ok(());
+ }
+
+ for entry in std::fs::read_dir(dir).map_err(|e| e.to_string())? {
+ let entry = entry.map_err(|e| e.to_string())?;
+ let path = entry.path();
+
+ if path.is_dir() {
+ index_directory_hashes(&path, base, hashes)?;
+ } else if path.is_file() {
+ let hash = compute_file_sha1(&path)?;
+ hashes.insert(hash, path);
+ }
+ }
+
+ Ok(())
+}
+
+/// Process directory for migration (recursive)
+fn process_directory_for_migration(
+ current: &Path,
+ source_base: &Path,
+ dest_base: &Path,
+ dest_hashes: &HashMap<String, PathBuf>,
+ moved: &mut usize,
+ hardlinks: &mut usize,
+ copies: &mut usize,
+ saved_bytes: &mut u64,
+) -> Result<(), String> {
+ if !current.is_dir() {
+ return Ok(());
+ }
+
+ for entry in std::fs::read_dir(current).map_err(|e| e.to_string())? {
+ let entry = entry.map_err(|e| e.to_string())?;
+ let source_path = entry.path();
+
+ // Compute relative path
+ let rel_path = source_path
+ .strip_prefix(source_base)
+ .map_err(|e| e.to_string())?;
+ let dest_path = dest_base.join(rel_path);
+
+ if source_path.is_dir() {
+ // Recurse into subdirectory
+ process_directory_for_migration(
+ &source_path,
+ source_base,
+ dest_base,
+ dest_hashes,
+ moved,
+ hardlinks,
+ copies,
+ saved_bytes,
+ )?;
+ } else if source_path.is_file() {
+ let file_size = std::fs::metadata(&source_path)
+ .map(|m| m.len())
+ .unwrap_or(0);
+
+ // Compute file hash
+ let source_hash = compute_file_sha1(&source_path)?;
+
+ // Check if file already exists in dest with same hash
+ if let Some(_existing) = dest_hashes.get(&source_hash) {
+ // File exists, delete source (already deduplicated)
+ std::fs::remove_file(&source_path).map_err(|e| e.to_string())?;
+ *saved_bytes += file_size;
+ *moved += 1;
+ } else {
+ // File doesn't exist, move it
+ // Create parent directory in dest
+ if let Some(parent) = dest_path.parent() {
+ std::fs::create_dir_all(parent).map_err(|e| e.to_string())?;
+ }
+
+ // Try hard link first
+ if std::fs::hard_link(&source_path, &dest_path).is_ok() {
+ // Hard link succeeded, remove source
+ std::fs::remove_file(&source_path).map_err(|e| e.to_string())?;
+ *hardlinks += 1;
+ *moved += 1;
+ } else {
+ // Hard link failed (different filesystem?), copy instead
+ std::fs::copy(&source_path, &dest_path).map_err(|e| e.to_string())?;
+ std::fs::remove_file(&source_path).map_err(|e| e.to_string())?;
+ *copies += 1;
+ *moved += 1;
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
+
+/// Compute SHA1 hash of a file
+fn compute_file_sha1(path: &Path) -> Result<String, String> {
+ use sha1::{Digest, Sha1};
+
+ let data = std::fs::read(path).map_err(|e| e.to_string())?;
+ let mut hasher = Sha1::new();
+ hasher.update(&data);
+ Ok(hex::encode(hasher.finalize()))
+}
diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs
index 6a230c9..a506713 100644
--- a/src-tauri/src/main.rs
+++ b/src-tauri/src/main.rs
@@ -2373,6 +2373,55 @@ async fn assistant_chat_stream(
.await
}
+/// Migrate instance caches to shared global caches
+#[derive(Serialize)]
+struct MigrationResult {
+ moved_files: usize,
+ hardlinks: usize,
+ copies: usize,
+ saved_bytes: u64,
+ saved_mb: f64,
+}
+
+#[tauri::command]
+async fn migrate_shared_caches(
+ window: Window,
+ instance_state: State<'_, core::instance::InstanceState>,
+ config_state: State<'_, core::config::ConfigState>,
+) -> Result<MigrationResult, String> {
+ emit_log!(window, "Starting migration to shared caches...".to_string());
+
+ let app_handle = window.app_handle();
+ let (moved, hardlinks, copies, saved_bytes) =
+ core::instance::migrate_to_shared_caches(app_handle, &instance_state)?;
+
+ let saved_mb = saved_bytes as f64 / (1024.0 * 1024.0);
+
+ emit_log!(
+ window,
+ format!(
+ "Migration complete: {} files moved ({} hardlinks, {} copies), {:.2} MB saved",
+ moved, hardlinks, copies, saved_mb
+ )
+ );
+
+ // Automatically enable shared caches config
+ let mut config = config_state.config.lock().unwrap().clone();
+ config.use_shared_caches = true;
+ drop(config);
+ *config_state.config.lock().unwrap() = config_state.config.lock().unwrap().clone();
+ config_state.config.lock().unwrap().use_shared_caches = true;
+ config_state.save()?;
+
+ Ok(MigrationResult {
+ moved_files: moved,
+ hardlinks,
+ copies,
+ saved_bytes,
+ saved_mb,
+ })
+}
+
fn main() {
tauri::Builder::default()
.plugin(tauri_plugin_fs::init())
@@ -2479,7 +2528,8 @@ fn main() {
get_instance,
set_active_instance,
get_active_instance,
- duplicate_instance
+ duplicate_instance,
+ migrate_shared_caches
])
.run(tauri::generate_context!())
.expect("error while running tauri application");