diff --git a/CHANGELOG.md b/CHANGELOG.md index c353bf02..43343cd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## v1.0.4 +#### 🐛 Fixes + +- Fix download function (PR: #51) + +#### 📦 General + +- Added how settings impact generation to the readme (PR: #49) + + ## v1.0.3 #### 🐛 Fixes diff --git a/CHANGELOG.release.md b/CHANGELOG.release.md index fa8ef110..2ab8b5f6 100644 --- a/CHANGELOG.release.md +++ b/CHANGELOG.release.md @@ -1,5 +1,8 @@ ### 🐛 Fixes -- fix slash in windows paths (PR: #42) -- Fix chmod when deploying from windows (PR: #43) +- Fix download function (PR: #51) + +### 📦 General + +- Added how settings impact generation to the readme (PR: #49) diff --git a/README.md b/README.md index 0c3eab0f..6f6fb997 100644 --- a/README.md +++ b/README.md @@ -268,13 +268,13 @@ If it is not selected, the full reply from the model is received in one go - `Model` the model being used (inside the Assets/StreamingAssets folder) - `Lora` the LORA model being used (inside the Assets/StreamingAssets folder) - Advanced options: - - `Context Size` Size of the prompt context (0 = context size of the model) - - `Batch Size` Batch size for prompt processing (default: 512) - - `Seed` seed for reproducibility. For random results every time select -1 - - `Temperature` LLM temperature, lower values give more deterministic answers - - `Top K` top-k sampling (default: 40, 0 = disabled) - - `Top P` top-p sampling (default: 0.9, 1.0 = disabled) - - `Num Predict` number of tokens to predict (default: 256, -1 = infinity, -2 = until context filled) + - Context Size Size of the prompt context (0 = context size of the model) + - Batch Size Batch size for prompt processing (default: 512) + - Seed seed for reproducibility. For random results every time select -1 + -
Temperature LLM temperature, lower values give more deterministic answersThe temperature setting adjusts how random the generated responses are. Turning it up makes the generated choices more varied and unpredictable. Turning it down makes the generated responses more predictable and focused on the most likely options.
+ -
Top K top-k sampling (default: 40, 0 = disabled)The top k value controls the top k most probable tokens at each step of generation. This value can help fine tune the output and make this adhere to specific patterns or constraints.
+ -
Top P top-p sampling (default: 0.9, 1.0 = disabled)The top p value controls the cumulative probability of generated tokens. The model will generate tokens until this theshold (p) is reached. By lowering this value you can shorten output & encourage / discourage more diverse output.
+ -
Num Predict number of tokens to predict (default: 256, -1 = infinity, -2 = until context filled)This is the amount of tokens the model will maximum predict. When N predict is reached the model will stop generating. This means words / sentences might not get finished if this is too low.
#### :left_speech_bubble: Chat Settings - `Player Name` the name of the player diff --git a/Runtime/LLM.cs b/Runtime/LLM.cs index 5bc98e85..32ff3aec 100644 --- a/Runtime/LLM.cs +++ b/Runtime/LLM.cs @@ -65,7 +65,7 @@ private static async Task SetupBinaries() binariesDone += 1; if (!File.Exists(server)) { - string serverZip = Path.Combine(Application.dataPath, "llamafile.zip"); + string serverZip = Path.Combine(Application.temporaryCachePath, "llamafile.zip"); if (!File.Exists(serverZip)) await LLMUnitySetup.DownloadFile(serverZipUrl, serverZip, false, null, SetBinariesProgress); binariesDone += 1; LLMUnitySetup.ExtractZip(serverZip, GetAssetPath()); diff --git a/Runtime/LLMUnitySetup.cs b/Runtime/LLMUnitySetup.cs index 5143dc3c..0cf16a62 100644 --- a/Runtime/LLMUnitySetup.cs +++ b/Runtime/LLMUnitySetup.cs @@ -2,11 +2,11 @@ using System.Diagnostics; using System.IO; using UnityEngine; -using UnityEngine.Networking; using Debug = UnityEngine.Debug; using System.Threading.Tasks; using System.Collections.Generic; using System.IO.Compression; +using System.Net; namespace LLMUnity { @@ -74,10 +74,24 @@ public static void makeExecutable(string path) } #if UNITY_EDITOR + public class DownloadStatus + { + Callback progresscallback; + + public DownloadStatus(Callback progresscallback = null) + { + this.progresscallback = progresscallback; + } + + public void DownloadProgressChanged(object sender, DownloadProgressChangedEventArgs e) + { + progresscallback?.Invoke(e.ProgressPercentage / 100.0f); + } + } + public static async Task DownloadFile( string fileUrl, string savePath, bool executable = false, - TaskCallback callback = null, Callback progresscallback = null, - long chunkSize = 10 * 1024 * 1024) + TaskCallback callback = null, Callback progresscallback = null) { // download a file to the specified path if (File.Exists(savePath)) @@ -87,59 +101,23 @@ public static async Task DownloadFile( else { Debug.Log($"Downloading {fileUrl}..."); + string tmpPath = Path.Combine(Application.temporaryCachePath, Path.GetFileName(savePath)); - UnityWebRequest www = UnityWebRequest.Head(fileUrl); - UnityWebRequestAsyncOperation asyncOperation = www.SendWebRequest(); - - while (!asyncOperation.isDone) - { - await Task.Delay(100); // Adjust the delay as needed - } - - if (www.result != UnityWebRequest.Result.Success) - throw new System.Exception("Failed to get file size. Error: " + www.error); + WebClient client = new WebClient(); + DownloadStatus downloadStatus = new DownloadStatus(progresscallback); + client.DownloadProgressChanged += downloadStatus.DownloadProgressChanged; + await client.DownloadFileTaskAsync(fileUrl, tmpPath); + if (executable) makeExecutable(tmpPath); - long fileSize = long.Parse(www.GetResponseHeader("Content-Length")); AssetDatabase.StartAssetEditing(); Directory.CreateDirectory(Path.GetDirectoryName(savePath)); - using (FileStream fs = new FileStream(savePath, FileMode.Create, FileAccess.Write)) - { - long chunks = (long) Mathf.Ceil((float)fileSize / chunkSize); - for (long i = 0; i < chunks; i++) - { - long startByte = i * chunkSize; - long endByte = startByte + chunkSize - 1; - if (endByte > fileSize - 1) endByte = fileSize - 1; - - using (UnityWebRequest wwwChunk = UnityWebRequest.Get(fileUrl)) - { - wwwChunk.SetRequestHeader("Range", "bytes=" + startByte + "-" + endByte); - - asyncOperation = wwwChunk.SendWebRequest(); - - while (!asyncOperation.isDone) - { - await Task.Delay(1000); // Adjust the delay as needed - } - - if (wwwChunk.result != UnityWebRequest.Result.Success) - throw new System.Exception("Download failed. Error: " + wwwChunk.error); - - fs.Write(wwwChunk.downloadHandler.data, 0, wwwChunk.downloadHandler.data.Length); - - int progressPercentage = Mathf.FloorToInt((float) i / chunks * 100); - if (progressPercentage % 1 == 0) - progresscallback((float)progressPercentage / 100); - } - } - } - - if (executable) makeExecutable(savePath); + File.Move(tmpPath, savePath); AssetDatabase.StopAssetEditing(); Debug.Log($"Download complete!"); + + progresscallback?.Invoke(1f); + callback?.Invoke(savePath); } - progresscallback(1f); - callback?.Invoke(savePath); } public static async Task AddAsset(string assetPath, string basePath) diff --git a/VERSION b/VERSION index e946d6bb..3e7bcf08 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.0.3 +v1.0.4 diff --git a/package.json b/package.json index fb134178..3e66cb0a 100644 --- a/package.json +++ b/package.json @@ -1,11 +1,11 @@ { "name": "ai.undream.llmunity", - "version": "1.0.3", + "version": "1.0.4", "displayName": "LLMUnity", "description": "LLMUnity allows to run and distribute LLM models in the Unity engine.", "unity": "2022.3", "unityRelease": "16f1", - "documentationUrl": "https://github.com/amakropoulos/LLMUnity", + "documentationUrl": "https://github.com/undreamai/LLMUnity", "keywords": [ "llm", "large language model",