diff --git a/src/NexusMods.DataModel/ArchiveContents/ArchivedFile.cs b/src/NexusMods.DataModel/ArchiveContents/ArchivedFile.cs
new file mode 100644
index 0000000000..0457f64b82
--- /dev/null
+++ b/src/NexusMods.DataModel/ArchiveContents/ArchivedFile.cs
@@ -0,0 +1,80 @@
+using NexusMods.Archives.Nx.Headers.Managed;
+using NexusMods.DataModel.Attributes;
+using NexusMods.Hashing.xxHash64;
+using NexusMods.MnemonicDB.Abstractions;
+using NexusMods.MnemonicDB.Abstractions.Attributes;
+using NexusMods.MnemonicDB.Abstractions.Models;
+// ReSharper disable MemberHidesStaticFromOuterClass
+
+namespace NexusMods.DataModel.ArchiveContents;
+
+///
+/// A metadata entry for an archived file entry. These are the items stored inside the .nx archives. Each
+/// entry contains the hash, the decompressed size, and a reference to the container. In the case of Nx containers
+/// it also contains the Nx internal header data for the entry so that we can do point lookups, of files.
+///
+public static class ArchivedFile
+{
+ private const string Namespace = "NexusMods.DataModel.ArchivedFile";
+
+ ///
+ /// The compressed container (.nx archive) that contains the file, the entity referenced
+ /// here should have the relative path to the file.
+ ///
+ public static readonly ReferenceAttribute Container = new(Namespace, nameof(Container));
+
+ ///
+ /// The hash of the file entry
+ ///
+ public static readonly HashAttribute Hash = new(Namespace, nameof(Hash)) {IsIndexed = true};
+
+ ///
+ /// The file entry data for the NX block offset data
+ ///
+ public static readonly NxFileEntryAttribute NxFileEntry = new(Namespace, nameof(NxFileEntry));
+
+
+ ///
+ /// Model for the archived file entry.
+ ///
+ public class Model(ITransaction tx) : AEntity(tx)
+ {
+
+ ///
+ /// Id of the containing archive.
+ ///
+ public EntityId ContainerId
+ {
+ get => ArchivedFile.Container.Get(this);
+ set => ArchivedFile.Container.Add(this, value);
+ }
+
+ ///
+ /// The container that contains this file.
+ ///
+ public ArchivedFileContainer.Model Container
+ {
+ get => Db.Get(ContainerId);
+ set => ContainerId = value.Id;
+ }
+
+ ///
+ /// Hash of the file entry
+ ///
+ public Hash Hash
+ {
+ get => ArchivedFile.Hash.Get(this);
+ set => ArchivedFile.Hash.Add(this, value);
+ }
+
+ ///
+ /// The Nx file entry data for the NX block offset data
+ ///
+ public FileEntry NxFileEntry
+ {
+ get => ArchivedFile.NxFileEntry.Get(this);
+ set => ArchivedFile.NxFileEntry.Add(this, value);
+ }
+ }
+
+}
diff --git a/src/NexusMods.DataModel/ArchiveContents/ArchivedFileContainer.cs b/src/NexusMods.DataModel/ArchiveContents/ArchivedFileContainer.cs
new file mode 100644
index 0000000000..9b672cad98
--- /dev/null
+++ b/src/NexusMods.DataModel/ArchiveContents/ArchivedFileContainer.cs
@@ -0,0 +1,43 @@
+using NexusMods.DataModel.Attributes;
+using NexusMods.MnemonicDB.Abstractions;
+using NexusMods.MnemonicDB.Abstractions.IndexSegments;
+using NexusMods.MnemonicDB.Abstractions.Models;
+using NexusMods.Paths;
+
+namespace NexusMods.DataModel.ArchiveContents;
+
+///
+/// Represents a container for archived files.
+///
+public static class ArchivedFileContainer
+{
+ private const string Namespace = "NexusMods.DataModel.ArchiveContents.ArchivedFileContainer";
+
+ ///
+ /// The name of the container on-disk. This will be relative to some archive root path.
+ ///
+ public static readonly RelativePathAttribute Path = new(Namespace, nameof(Path));
+
+ ///
+ /// Model for the archived file container.
+ ///
+ ///
+ public class Model(ITransaction tx) : AEntity(tx)
+ {
+ ///
+ /// The name of the container on-disk. This will be relative to some archive root path.
+ ///
+ public RelativePath Path
+ {
+ get => ArchivedFileContainer.Path.Get(this);
+ set => ArchivedFileContainer.Path.Add(this, value);
+ }
+
+ ///
+ /// The file entries contained in this container.
+ ///
+ public Entities Contents
+ => GetReverse(ArchivedFile.Container);
+ }
+
+}
diff --git a/src/NexusMods.DataModel/ArchiveContents/FileContainedInEx.cs b/src/NexusMods.DataModel/ArchiveContents/FileContainedInEx.cs
deleted file mode 100644
index 2992a5b527..0000000000
--- a/src/NexusMods.DataModel/ArchiveContents/FileContainedInEx.cs
+++ /dev/null
@@ -1,25 +0,0 @@
-using NexusMods.Abstractions.Serialization.Attributes;
-using NexusMods.Abstractions.Serialization.DataModel;
-using NexusMods.Paths;
-
-namespace NexusMods.DataModel.ArchiveContents;
-
-///
-/// Information about what archives a file is contained in, so this is a back-index of the entity.
-///
-[JsonName("NexusMods.DataModel.ArchiveContents.ArchivedFiles")]
-public record ArchivedFiles : Entity
-{
- ///
- public override EntityCategory Category => EntityCategory.ArchivedFiles;
-
- ///
- /// Name of the archive this file is contained in.
- ///
- public required RelativePath File { get; init; }
-
- ///
- /// The file entry data for the NX block offset
- ///
- public required byte[] FileEntryData { get; init; }
-}
diff --git a/src/NexusMods.DataModel/ArchiveContents/NxFileEntryAttribute.cs b/src/NexusMods.DataModel/ArchiveContents/NxFileEntryAttribute.cs
new file mode 100644
index 0000000000..5aa633f493
--- /dev/null
+++ b/src/NexusMods.DataModel/ArchiveContents/NxFileEntryAttribute.cs
@@ -0,0 +1,37 @@
+using NexusMods.Archives.Nx.Headers.Managed;
+using NexusMods.Archives.Nx.Headers.Native;
+using NexusMods.Archives.Nx.Utilities;
+using NexusMods.MnemonicDB.Abstractions.Attributes;
+using NexusMods.MnemonicDB.Abstractions.ElementComparers;
+
+namespace NexusMods.DataModel.ArchiveContents;
+
+///
+/// Stores a NXArchive file entry as a blob.
+///
+public class NxFileEntryAttribute(string ns, string name) : BlobAttribute(ns, name)
+{
+ ///
+ protected override unsafe FileEntry FromLowLevel(ReadOnlySpan value, ValueTags tag)
+ {
+ fixed (byte* ptr = value)
+ {
+ var reader = new LittleEndianReader(ptr);
+ FileEntry tmpEntry = default;
+ tmpEntry.FromReaderV1(ref reader);
+ return tmpEntry;
+ }
+ }
+
+ ///
+ protected override unsafe void WriteValue(FileEntry value, TWriter writer)
+ {
+ var buffer = writer.GetSpan(sizeof(FileEntry));
+ fixed (byte* ptr = buffer)
+ {
+ var interWriter = new LittleEndianWriter(ptr);
+ value.WriteAsV1(ref interWriter);
+ writer.Advance(sizeof(FileEntry));
+ }
+ }
+}
diff --git a/src/NexusMods.DataModel/Attributes/HashAttribute.cs b/src/NexusMods.DataModel/Attributes/HashAttribute.cs
new file mode 100644
index 0000000000..ff1c895d25
--- /dev/null
+++ b/src/NexusMods.DataModel/Attributes/HashAttribute.cs
@@ -0,0 +1,23 @@
+using NexusMods.Hashing.xxHash64;
+using NexusMods.MnemonicDB.Abstractions.Attributes;
+using NexusMods.MnemonicDB.Abstractions.ElementComparers;
+
+namespace NexusMods.DataModel.Attributes;
+
+///
+/// Stores a as a .
+///
+public class HashAttribute(string ns, string name) : ScalarAttribute(ValueTags.UInt64, ns, name)
+{
+ ///
+ protected override ulong ToLowLevel(Hash value)
+ {
+ return value.Value;
+ }
+
+ ///
+ protected override Hash FromLowLevel(ulong value, ValueTags tags)
+ {
+ return Hash.From(value);
+ }
+}
diff --git a/src/NexusMods.DataModel/Attributes/RelativePathAttribute.cs b/src/NexusMods.DataModel/Attributes/RelativePathAttribute.cs
new file mode 100644
index 0000000000..ab72afa43e
--- /dev/null
+++ b/src/NexusMods.DataModel/Attributes/RelativePathAttribute.cs
@@ -0,0 +1,23 @@
+using NexusMods.MnemonicDB.Abstractions.Attributes;
+using NexusMods.MnemonicDB.Abstractions.ElementComparers;
+using NexusMods.Paths;
+
+namespace NexusMods.DataModel.Attributes;
+
+///
+/// Represents a relative path.
+///
+public class RelativePathAttribute(string ns, string name) : ScalarAttribute(ValueTags.Utf8Insensitive, ns, name)
+{
+ ///
+ protected override string ToLowLevel(RelativePath value)
+ {
+ return value.Path;
+ }
+
+ ///
+ protected override RelativePath FromLowLevel(string value, ValueTags tags)
+ {
+ return RelativePath.FromUnsanitizedInput(value);
+ }
+}
diff --git a/src/NexusMods.DataModel/NxFileStore.cs b/src/NexusMods.DataModel/NxFileStore.cs
index c7295f0aa4..f504b87031 100644
--- a/src/NexusMods.DataModel/NxFileStore.cs
+++ b/src/NexusMods.DataModel/NxFileStore.cs
@@ -1,16 +1,11 @@
using System.Buffers;
-using System.Buffers.Binary;
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Logging;
using NexusMods.Abstractions.IO;
-using NexusMods.Abstractions.Serialization;
-using NexusMods.Abstractions.Serialization.DataModel;
-using NexusMods.Abstractions.Serialization.DataModel.Ids;
using NexusMods.Abstractions.Settings;
using NexusMods.Archives.Nx.FileProviders;
using NexusMods.Archives.Nx.Headers;
using NexusMods.Archives.Nx.Headers.Managed;
-using NexusMods.Archives.Nx.Headers.Native;
using NexusMods.Archives.Nx.Interfaces;
using NexusMods.Archives.Nx.Packing;
using NexusMods.Archives.Nx.Structs;
@@ -18,9 +13,9 @@
using NexusMods.DataModel.ArchiveContents;
using NexusMods.DataModel.ChunkedStreams;
using NexusMods.Hashing.xxHash64;
+using NexusMods.MnemonicDB.Abstractions;
using NexusMods.Paths;
using NexusMods.Paths.Utilities;
-using Reloaded.Memory.Extensions;
namespace NexusMods.DataModel;
@@ -30,7 +25,7 @@ namespace NexusMods.DataModel;
public class NxFileStore : IFileStore
{
private readonly AbsolutePath[] _archiveLocations;
- private readonly IDataStore _store;
+ private readonly IConnection _conn;
private readonly ILogger _logger;
///
@@ -38,7 +33,7 @@ public class NxFileStore : IFileStore
///
public NxFileStore(
ILogger logger,
- IDataStore store,
+ IConnection conn,
ISettingsManager settingsManager,
IFileSystem fileSystem)
{
@@ -52,13 +47,14 @@ public NxFileStore(
}
_logger = logger;
- _store = store;
+ _conn = conn;
}
///
public ValueTask HaveFile(Hash hash)
{
- return ValueTask.FromResult(TryGetLocation(hash, out _, out _));
+ var db = _conn.Db;
+ return ValueTask.FromResult(TryGetLocation(db, hash, out _, out _));
}
///
@@ -96,54 +92,39 @@ public async Task BackupFiles(IEnumerable backups, Cancellati
await outputPath.MoveToAsync(finalPath, token: token);
await using var os = finalPath.Read();
var unpacker = new NxUnpacker(new FromStreamProvider(os));
- UpdateIndexes(unpacker, guid, finalPath);
+ await UpdateIndexes(unpacker, finalPath);
}
- private unsafe void UpdateIndexes(NxUnpacker unpacker, Guid guid,
- AbsolutePath finalPath)
+ private async Task UpdateIndexes(NxUnpacker unpacker, AbsolutePath finalPath)
{
+ using var tx = _conn.BeginTransaction();
+
+ var container = new ArchivedFileContainer.Model(tx)
+ {
+ Path = finalPath.Name,
+ };
+
var entries = unpacker.GetPathedFileEntries();
- var items = GC.AllocateUninitializedArray<(IId, ArchivedFiles)>(entries.Length);
- Span buffer = stackalloc byte[sizeof(NativeFileEntryV1)];
- for (var x = 0; x < entries.Length; x++)
+ foreach (var entry in entries)
{
- var entry = entries[x];
- fixed (byte* ptr = buffer)
+ _ = new ArchivedFile.Model(tx)
{
- var writer = new LittleEndianWriter(ptr);
- entry.Entry.WriteAsV1(ref writer);
-
- var hash = Hash.FromHex(entry.FileName);
- var dbId = IdFor(hash, guid);
- var dbEntry = new ArchivedFiles
- {
- File = finalPath.FileName,
- FileEntryData = buffer.ToArray()
- };
-
- // TODO: Consider a bulk-put operation here
- items[x] = (dbId, dbEntry);
- }
+ Hash = Hash.FromHex(entry.FileName),
+ NxFileEntry = entry.Entry,
+ Container = container,
+ };
}
- _store.PutAll(items.AsSpan());
- }
-
- [SkipLocalsInit]
- private IId IdFor(Hash hash, Guid guid)
- {
- Span buffer = stackalloc byte[24];
- BinaryPrimitives.WriteUInt64BigEndian(buffer, hash.Value);
- guid.TryWriteBytes(buffer.SliceFast(8));
- return IId.FromSpan(EntityCategory.ArchivedFiles, buffer);
+ await tx.Commit();
}
-
+
///
public async Task ExtractFiles(IEnumerable<(Hash Src, AbsolutePath Dest)> files, CancellationToken token = default)
{
+ var db = _conn.Db;
var grouped = files.Distinct()
- .Select(input => TryGetLocation(input.Src, out var archivePath, out var fileEntry)
+ .Select(input => TryGetLocation(db, input.Src, out var archivePath, out var fileEntry)
? (true, Hash: input.Src, ArchivePath: archivePath, FileEntry: fileEntry, input.Dest)
: default)
.Where(x => x.Item1)
@@ -186,10 +167,11 @@ public async Task ExtractFiles(IEnumerable<(Hash Src, AbsolutePath Dest)> files,
///
public Task> ExtractFiles(IEnumerable files, CancellationToken token = default)
{
+ var db = _conn.Db;
var results = new Dictionary();
var grouped = files.Distinct()
- .Select(hash => TryGetLocation(hash, out var archivePath, out var fileEntry)
+ .Select(hash => TryGetLocation(db, hash, out var archivePath, out var fileEntry)
? (true, Hash: hash, ArchivePath: archivePath, FileEntry: fileEntry)
: default)
.Where(x => x.Item1)
@@ -226,7 +208,7 @@ public Task GetFileStream(Hash hash, CancellationToken token = default)
{
if (hash == Hash.Zero)
throw new ArgumentNullException(nameof(hash));
- if (!TryGetLocation(hash, out var archivePath, out var entry))
+ if (!TryGetLocation(_conn.Db, hash, out var archivePath, out var entry))
throw new Exception($"Missing archive for {hash.ToHex()}");
var file = archivePath.Read();
@@ -239,19 +221,14 @@ public Task GetFileStream(Hash hash, CancellationToken token = default)
}
///
- public unsafe HashSet GetFileHashes()
+ public HashSet GetFileHashes()
{
// Build a Hash Table of all currently known files. We do this to deduplicate files between downloads.
var fileHashes = new HashSet();
- foreach (var arcFile in _store.GetAll(EntityCategory.ArchivedFiles)!)
- {
- fixed (byte* ptr = arcFile.FileEntryData.AsSpan())
- {
- var reader = new LittleEndianReader(ptr);
- fileHashes.Add(reader.ReadUlongAtOffset(8)); // Hash. Offset 8 in V1 header, per spec.
- }
- }
-
+
+ // Replace this once we redo the IFileStore. Instead that can likely query MneumonicDB directly.
+ fileHashes.AddRange(_conn.Db.Find(ArchivedFile.Hash).Select(f => f.Value));
+
return fileHashes;
}
@@ -425,32 +402,17 @@ internal struct ExtractableBlock
public required int DecompressSize { get; set; }
}
- private unsafe bool TryGetLocation(Hash hash, out AbsolutePath archivePath, out FileEntry fileEntry)
+ private bool TryGetLocation(IDb db, Hash hash, out AbsolutePath archivePath, out FileEntry fileEntry)
{
- var prefix = new Id64(EntityCategory.ArchivedFiles, (ulong)hash);
- foreach (var entry in _store.GetByPrefix(prefix))
- {
- foreach (var location in _archiveLocations)
- {
- var path = location.Combine(entry.File);
- if (!path.FileExists) continue;
-
- archivePath = path;
-
- fixed (byte* ptr = entry.FileEntryData.AsSpan())
- {
- var reader = new LittleEndianReader(ptr);
- FileEntry tmpEntry = default;
-
- tmpEntry.FromReaderV1(ref reader);
- fileEntry = tmpEntry;
- return true;
- }
- }
- }
-
- archivePath = default;
- fileEntry = default;
- return false;
+ var result = false;
+ var entries = from id in db.FindIndexed(hash, ArchivedFile.Hash)
+ let entry = db.Get(id)
+ from location in _archiveLocations
+ let combined = location.Combine(entry.Container.Path)
+ where combined.FileExists
+ select (combined, entry.NxFileEntry, true);
+
+ (archivePath, fileEntry, result) = entries.FirstOrDefault();
+ return result;
}
}
diff --git a/src/NexusMods.DataModel/Services.cs b/src/NexusMods.DataModel/Services.cs
index 37051d7b14..43ca7446f3 100644
--- a/src/NexusMods.DataModel/Services.cs
+++ b/src/NexusMods.DataModel/Services.cs
@@ -11,6 +11,7 @@
using NexusMods.Abstractions.Messaging;
using NexusMods.Abstractions.Serialization;
using NexusMods.Abstractions.Serialization.ExpressionGenerator;
+using NexusMods.DataModel.ArchiveContents;
using NexusMods.DataModel.Attributes;
using NexusMods.DataModel.CommandLine.Verbs;
using NexusMods.DataModel.Diagnostics;
@@ -91,6 +92,10 @@ public static IServiceCollection AddDataModel(this IServiceCollection coll)
coll.AddAllSingleton();
+
+ // File Store
+ coll.AddAttributeCollection(typeof(ArchivedFileContainer));
+ coll.AddAttributeCollection(typeof(ArchivedFile));
coll.AddAllSingleton();
coll.AddSingleton(typeof(IFingerprintCache<,>), typeof(DataStoreFingerprintCache<,>));
diff --git a/src/NexusMods.DataModel/ZipFileStore.cs b/src/NexusMods.DataModel/ZipFileStore.cs
deleted file mode 100644
index 0656eb8cc2..0000000000
--- a/src/NexusMods.DataModel/ZipFileStore.cs
+++ /dev/null
@@ -1,212 +0,0 @@
-using System.Buffers;
-using System.Buffers.Binary;
-using System.IO.Compression;
-using NexusMods.Abstractions.IO;
-using NexusMods.Abstractions.Serialization;
-using NexusMods.Abstractions.Serialization.DataModel;
-using NexusMods.Abstractions.Serialization.DataModel.Ids;
-using NexusMods.Abstractions.Settings;
-using NexusMods.DataModel.ArchiveContents;
-using NexusMods.DataModel.ChunkedStreams;
-using NexusMods.Hashing.xxHash64;
-using NexusMods.Paths;
-using NexusMods.Paths.Utilities;
-using Reloaded.Memory.Extensions;
-
-namespace NexusMods.DataModel;
-
-///
-/// File Store that uses zip files instead of the Nexus Mods archive format. This is used for testing and
-/// to remove any dependency on the Nexus Mods archive format.
-///
-public class ZipFileStore : IFileStore
-{
- private readonly AbsolutePath[] _archiveLocations;
- private readonly IDataStore _store;
-
- private const long _chunkSize = 1024 * 1024;
-
- ///
- /// Constructor.
- ///
- public ZipFileStore(IDataStore store, ISettingsManager settingsManager, IFileSystem fileSystem)
- {
- var settings = settingsManager.Get();
-
- _archiveLocations = settings.ArchiveLocations.Select(f => f.ToPath(fileSystem)).ToArray();
- foreach (var location in _archiveLocations)
- {
- if (!location.DirectoryExists())
- location.CreateDirectory();
- }
- _store = store;
-
- }
-
- ///
- public ValueTask HaveFile(Hash hash)
- {
- return ValueTask.FromResult(TryGetLocation(hash, out _));
- }
-
- ///
- public async Task BackupFiles(IEnumerable backups, CancellationToken token = default)
- {
- var archiveId = ArchiveId.From(Guid.NewGuid());
- var id = archiveId.Value.ToString();
- var backupsList = backups.ToList();
- var distinct = backupsList.DistinctBy(d => d.Hash).ToList();
-
- using var buffer = MemoryPool.Shared.Rent((int)_chunkSize);
- var outputPath = _archiveLocations.First().Combine(id).AppendExtension(KnownExtensions.Tmp);
- {
- await using var archiveStream = outputPath.Create();
- using var builder = new ZipArchive(archiveStream, ZipArchiveMode.Create, true, System.Text.Encoding.UTF8);
-
- foreach (var backup in distinct)
- {
- await using var srcStream = await backup.StreamFactory.GetStreamAsync();
- var chunkCount = (int)(backup.Size.Value / _chunkSize);
- if (backup.Size.Value % _chunkSize > 0)
- chunkCount++;
-
- var hexName = backup.Hash.ToHex();
- for (var chunkIdx = 0; chunkIdx < chunkCount; chunkIdx++)
- {
- var entry = builder.CreateEntry($"{hexName}_{chunkIdx}", CompressionLevel.Optimal);
- await using var entryStream = entry.Open();
-
- var toCopy = (int)Math.Min(_chunkSize, (long)backup.Size.Value - (chunkIdx * _chunkSize));
- await srcStream.ReadExactlyAsync(buffer.Memory[..toCopy], token);
- await entryStream.WriteAsync(buffer.Memory[..toCopy], token);
- await entryStream.FlushAsync(token);
- }
- }
- }
-
- var finalPath = outputPath.ReplaceExtension(KnownExtensions.Zip);
-
- await outputPath.MoveToAsync(finalPath, token: token);
- UpdateReverseIndexes(distinct, archiveId, finalPath);
- }
-
- private void UpdateReverseIndexes(IEnumerable distinct, ArchiveId archiveId,
- AbsolutePath finalPath)
- {
- foreach (var entry in distinct)
- {
- var dbId = IdFor(entry.Hash, archiveId);
-
- var dbEntry = new ArchivedFiles
- {
- File = finalPath.FileName,
- FileEntryData = Array.Empty()
- };
-
- // TODO: Consider a bulk-put operation here
- _store.Put(dbId, dbEntry);
- }
- }
-
- private IId IdFor(Hash hash, ArchiveId archiveId)
- {
- Span buffer = stackalloc byte[24];
- BinaryPrimitives.WriteUInt64BigEndian(buffer, hash.Value);
- archiveId.Value.TryWriteBytes(buffer.SliceFast(8));
- return IId.FromSpan(EntityCategory.ArchivedFiles, buffer);
- }
-
- ///
- public async Task ExtractFiles(IEnumerable<(Hash Src, AbsolutePath Dest)> files, CancellationToken token = default)
- {
- foreach (var (src, dest) in files)
- {
- await using var srcStream = await GetFileStream(src, token);
- dest.Parent.CreateDirectory();
- await using var destStream = dest.Create();
- await srcStream.CopyToAsync(destStream, token);
- }
- }
-
- ///
- public async Task> ExtractFiles(IEnumerable files, CancellationToken token = default)
- {
- var results = new Dictionary();
-
- foreach (var hash in files.Distinct())
- {
- await using var srcStream = await GetFileStream(hash, token);
- await using var destStream = new MemoryStream();
- await srcStream.CopyToAsync(destStream, token);
- results.Add(hash, destStream.ToArray());
- }
-
- return results;
- }
-
- ///
- public async Task GetFileStream(Hash hash, CancellationToken token = default)
- {
- if (!TryGetLocation(hash, out var archivePath))
- throw new Exception($"Missing archive for {hash.ToHex()}");
-
- var file = archivePath.Read();
- var archive = new ZipArchive(file, ZipArchiveMode.Read, true, System.Text.Encoding.UTF8);
-
- return new ChunkedStream(new ChunkedArchiveStream(archive, hash));
- }
-
- ///
- public HashSet GetFileHashes() => throw new NotImplementedException("Implement this if we ever decide to use this again. Been dead code for a while.");
-
- private class ChunkedArchiveStream : IChunkedStreamSource
- {
- private readonly ZipArchiveEntry[] _entries;
-
- public ChunkedArchiveStream(ZipArchive archive, Hash hash)
- {
- var prefix = hash.ToHex() + "_";
- _entries = archive.Entries.Where(entry => entry.Name.StartsWith(prefix))
- .OrderBy(a => a.Name)
- .ToArray();
- Size = Size.FromLong(_entries.Sum(e => e.Length));
- ChunkSize = Size.FromLong(_chunkSize);
- ChunkCount = (ulong)_entries.Length;
- }
-
- public Size Size { get; }
- public Size ChunkSize { get; }
- public ulong ChunkCount { get; }
- public async Task ReadChunkAsync(Memory buffer, ulong chunkIndex, CancellationToken token = default)
- {
- await using var stream = _entries[chunkIndex].Open();
- await stream.ReadAtLeastAsync(buffer, buffer.Length, false, token);
- }
-
- public void ReadChunk(Span buffer, ulong chunkIndex)
- {
- using var stream = _entries[chunkIndex].Open();
- stream.ReadAtLeast(buffer, buffer.Length, false);
- }
- }
-
- private bool TryGetLocation(Hash hash, out AbsolutePath archivePath)
- {
- var prefix = new Id64(EntityCategory.ArchivedFiles, (ulong)hash);
- foreach (var entry in _store.GetByPrefix(prefix))
- {
- foreach (var location in _archiveLocations)
- {
- var path = location.Combine(entry.File);
- if (!path.FileExists) continue;
-
- archivePath = path;
- return true;
- }
-
- }
-
- archivePath = default;
- return false;
- }
-}