diff --git a/.gitignore b/.gitignore index 36fa6bc..c8d8847 100644 --- a/.gitignore +++ b/.gitignore @@ -375,3 +375,5 @@ healthchecksdb /FirClient/Assets/Plugins/Editor/JetBrains /FirClient/Assets/Plugins/Editor/JetBrains /FirClient/Assets/Plugins/Editor/JetBrains +/FirClient/PersistentData +/FirClient/Build diff --git a/FirClient/3rd/Debugger/.gitignore b/FirClient/3rd/Debugger/.gitignore deleted file mode 100644 index e31b6a5..0000000 --- a/FirClient/3rd/Debugger/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -[Ll]ibrary/ -[Tt]emp/ -[Oo]bj/ -[Bb]uild/ - -# Autogenerated VS/MD solution and project files -*.csproj -*.unityproj -*.sln -*.suo -*.tmp -*.user -*.userprefs -*.pidb -*.booproj - -# Unity3D generated meta files -*.pidb.meta - -# Unity3D Generated File On Crash Reports -sysinfo.txt diff --git a/FirClient/3rd/Debugger/Debugger/CString.dll b/FirClient/3rd/Debugger/Debugger/CString.dll deleted file mode 100644 index 9c67e23..0000000 Binary files a/FirClient/3rd/Debugger/Debugger/CString.dll and /dev/null differ diff --git a/FirClient/3rd/Debugger/Debugger/ConstStringTable.cs b/FirClient/3rd/Debugger/Debugger/ConstStringTable.cs deleted file mode 100644 index 0fb04ed..0000000 --- a/FirClient/3rd/Debugger/Debugger/ConstStringTable.cs +++ /dev/null @@ -1,40 +0,0 @@ -using System; - -public static class ConstStringTable -{ - static string[] secDict = new string[100]; - static string[] tenDict = new string[10]; - - static ConstStringTable() - { - for (int i = 0; i < 100; i++) - { - secDict[i] = string.Intern(i.ToString("00")); - } - - for (int i = 0; i < 10; i++) - { - tenDict[i] = string.Intern(i.ToString()); - } - } - - static public string GetTimeIntern(int time) - { - if (time < 0 || time > 99) - { - return time.ToString(); - } - - return secDict[time]; - } - - static public string GetNumIntern(int num) - { - if (num < 0 || num > 99) - { - return num.ToString(); - } - - return num < 10 ? tenDict[num] : secDict[num]; - } -} diff --git a/FirClient/3rd/Debugger/Debugger/Debugger.cs b/FirClient/3rd/Debugger/Debugger/Debugger.cs deleted file mode 100644 index 08e837e..0000000 --- a/FirClient/3rd/Debugger/Debugger/Debugger.cs +++ /dev/null @@ -1,234 +0,0 @@ -using UnityEngine; -using System; -using System.Text; - -namespace UnityEngine -{ - public static class Debugger - { - public static bool useLog = true; - public static string threadStack = string.Empty; - public static IULogger logger = null; - - private static CString sb = new CString(256); - - static Debugger() - { - for (int i = 24; i < 70; i++) - { - StringPool.PreAlloc(i, 2); - } - } - - //减少gc alloc - static string GetLogFormat(string str) - { - DateTime time = DateTime.Now; - //StringBuilder sb = StringBuilderCache.Acquire(); - - //sb.Append(ConstStringTable.GetTimeIntern(time.Hour)) - // .Append(":") - // .Append(ConstStringTable.GetTimeIntern(time.Minute)) - // .Append(":") - // .Append(ConstStringTable.GetTimeIntern(time.Second)) - // .Append(".") - // .Append(time.Millisecond) - // .Append("-") - // .Append(Time.frameCount % 999) - // .Append(": ") - // .Append(str); - - //return StringBuilderCache.GetStringAndRelease(sb); - - sb.Clear(); - sb.Append(ConstStringTable.GetTimeIntern(time.Hour)) - .Append(":") - .Append(ConstStringTable.GetTimeIntern(time.Minute)) - .Append(":") - .Append(ConstStringTable.GetTimeIntern(time.Second)) - .Append(".") - .Append(time.Millisecond) - .Append("-") - .Append(Time.frameCount % 999) - .Append(": ") - .Append(str); - - String dest = StringPool.Alloc(sb.Length); - sb.CopyToString(dest); - return dest; - } - - public static void Log(string str) - { - str = GetLogFormat(str); - - if (useLog) - { - Debug.Log(str); - } - else if (logger != null) - { - //普通log节省一点记录堆栈性能和避免调用手机系统log函数 - logger.Log(str, string.Empty, LogType.Log); - } - - StringPool.Collect(str); - } - - public static void Log(object message) - { - Log(message.ToString()); - } - - public static void Log(string str, object arg0) - { - string s = string.Format(str, arg0); - Log(s); - } - - public static void Log(string str, object arg0, object arg1) - { - string s = string.Format(str, arg0, arg1); - Log(s); - } - - public static void Log(string str, object arg0, object arg1, object arg2) - { - string s = string.Format(str, arg0, arg1, arg2); - Log(s); - } - - public static void Log(string str, params object[] param) - { - string s = string.Format(str, param); - Log(s); - } - - public static void LogWarning(string str) - { - str = GetLogFormat(str); - - if (useLog) - { - Debug.LogWarning(str); - } - else if (logger != null) - { - string stack = StackTraceUtility.ExtractStackTrace(); - logger.Log(str, stack, LogType.Warning); - } - - StringPool.Collect(str); - } - - public static void LogWarning(object message) - { - LogWarning(message.ToString()); - } - - public static void LogWarning(string str, object arg0) - { - string s = string.Format(str, arg0); - LogWarning(s); - } - - public static void LogWarning(string str, object arg0, object arg1) - { - string s = string.Format(str, arg0, arg1); - LogWarning(s); - } - - public static void LogWarning(string str, object arg0, object arg1, object arg2) - { - string s = string.Format(str, arg0, arg1, arg2); - LogWarning(s); - } - - public static void LogWarning(string str, params object[] param) - { - string s = string.Format(str, param); - LogWarning(s); - } - - public static void LogError(string str) - { - str = GetLogFormat(str); - - if (useLog) - { - Debug.LogError(str); - } - else if (logger != null) - { - string stack = StackTraceUtility.ExtractStackTrace(); - logger.Log(str, stack, LogType.Error); - } - - StringPool.Collect(str); - } - - public static void LogError(object message) - { - LogError(message.ToString()); - } - - public static void LogError(string str, object arg0) - { - string s = string.Format(str, arg0); - LogError(s); - } - - public static void LogError(string str, object arg0, object arg1) - { - string s = string.Format(str, arg0, arg1); - LogError(s); - } - - public static void LogError(string str, object arg0, object arg1, object arg2) - { - string s = string.Format(str, arg0, arg1, arg2); - LogError(s); - } - - public static void LogError(string str, params object[] param) - { - string s = string.Format(str, param); - LogError(s); - } - - - public static void LogException(Exception e) - { - threadStack = e.StackTrace; - string str = GetLogFormat(e.Message); - - if (useLog) - { - Debug.LogError(str); - } - else if (logger != null) - { - logger.Log(str, threadStack, LogType.Exception); - } - - StringPool.Collect(str); - } - - public static void LogException(string str, Exception e) - { - threadStack = e.StackTrace; - str = GetLogFormat(str + e.Message); - - if (useLog) - { - Debug.LogError(str); - } - else if (logger != null) - { - logger.Log(str, threadStack, LogType.Exception); - } - - StringPool.Collect(str); - } - } -} diff --git a/FirClient/3rd/Debugger/Debugger/ExtendMethods.cs b/FirClient/3rd/Debugger/Debugger/ExtendMethods.cs deleted file mode 100644 index f1770ca..0000000 --- a/FirClient/3rd/Debugger/Debugger/ExtendMethods.cs +++ /dev/null @@ -1,22 +0,0 @@ -using UnityEngine; -using System; -using System.Text; - -namespace UnityEngine -{ - public static partial class StringBuilderExtensionMethods - { - /*到.net4.0 stringbuild 会有Clear 函数,到时可以删掉这个函数*/ - public static void Clear(this StringBuilder sb) - { - sb.Length = 0; - } - - public static void AppendLineEx(this StringBuilder sb, string str = "") - { - sb.Append(str).Append("\r\n"); - } - } -} - - diff --git a/FirClient/3rd/Debugger/Debugger/ILog.cs b/FirClient/3rd/Debugger/Debugger/ILog.cs deleted file mode 100644 index 2d0e4f0..0000000 --- a/FirClient/3rd/Debugger/Debugger/ILog.cs +++ /dev/null @@ -1,19 +0,0 @@ -using UnityEngine; - -namespace UnityEngine -{ - public interface ICmd - { - void Log(string msg); - void LogWarning(string msg); - void LogError(string msg); - void Show(bool flag); - } - - public interface IULogger - { - void Log(string msg, string stack, LogType type); - } -} - - diff --git a/FirClient/3rd/Debugger/Debugger/Properties/AssemblyInfo.cs b/FirClient/3rd/Debugger/Debugger/Properties/AssemblyInfo.cs deleted file mode 100644 index a37833e..0000000 --- a/FirClient/3rd/Debugger/Debugger/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System.Resources; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; - -// 有关程序集的常规信息通过以下 -// 特性集控制。更改这些特性值可修改 -// 与程序集关联的信息。 -[assembly: AssemblyTitle("Debugger")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("ToLua")] -[assembly: AssemblyProduct("Debugger")] -[assembly: AssemblyCopyright("Copyright © 2015")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// 将 ComVisible 设置为 false 使此程序集中的类型 -// 对 COM 组件不可见。如果需要从 COM 访问此程序集中的类型, -// 则将该类型上的 ComVisible 特性设置为 true。 -[assembly: ComVisible(false)] - -// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID -[assembly: Guid("e7e6c9ef-9a4b-44cb-b1c9-fc312f273cda")] - -// 程序集的版本信息由下面四个值组成: -// -// 主版本 -// 次版本 -// 生成号 -// 修订号 -// -// 可以指定所有这些值,也可以使用“生成号”和“修订号”的默认值, -// 方法是按如下所示使用“*”: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/FirClient/3rd/Debugger/Debugger/StringBuilderCache.cs b/FirClient/3rd/Debugger/Debugger/StringBuilderCache.cs deleted file mode 100644 index 23e7b70..0000000 --- a/FirClient/3rd/Debugger/Debugger/StringBuilderCache.cs +++ /dev/null @@ -1,44 +0,0 @@ -/*copy from .net by topameng*/ - -using System; -using System.Reflection; -using System.Text; - -namespace UnityEngine -{ - public static class StringBuilderCache - { - [ThreadStatic] - static StringBuilder _cache = new StringBuilder(256); - private const int MAX_BUILDER_SIZE = 512; - - public static StringBuilder Acquire(int capacity = 256) - { - StringBuilder sb = _cache; - - if (sb != null && sb.Capacity >= capacity) - { - _cache = null; - sb.Clear(); - return sb; - } - - return new StringBuilder(capacity); - } - - public static string GetStringAndRelease(StringBuilder sb) - { - string str = sb.ToString(); - Release(sb); - return str; - } - - public static void Release(StringBuilder sb) - { - if (sb.Capacity <= MAX_BUILDER_SIZE) - { - _cache = sb; - } - } - } -} diff --git a/FirClient/3rd/Debugger/LICENSE b/FirClient/3rd/Debugger/LICENSE deleted file mode 100644 index 730e200..0000000 --- a/FirClient/3rd/Debugger/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 topameng - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/FirClient/3rd/Debugger/README.md b/FirClient/3rd/Debugger/README.md deleted file mode 100644 index b70e620..0000000 --- a/FirClient/3rd/Debugger/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Debugger - -just a log system for tolua#. (detail in chinese) - -一个logger系统,并且包含部分扩展函数。 - -可以自定义接口形式运行时替换unity Debug.Log。 减少不必要的堆栈获取行为。 - -为什么不放在unity工程内部 - -通过封装Debugger.Log 到dll可以在点击log时,避免错误跳到Debugger.Log内部情况 - -当lua堆栈离开游戏进程时,对于unity log 从一定程度能避免卡死。(具体见tolua# wiki) diff --git a/FirClient/Assets/Editor/AtlasBuilder.cs b/FirClient/Assets/Editor/AtlasBuilder.cs index 5589f7f..c6e0b1e 100644 --- a/FirClient/Assets/Editor/AtlasBuilder.cs +++ b/FirClient/Assets/Editor/AtlasBuilder.cs @@ -32,6 +32,7 @@ static void CreateOrUpdateAtlas() string texturePath = GetSelectedPathOrFallback(); if (!texturePath.Contains(texPrefix) || texturePath.EndsWith(texPrefix)) { + Debug.LogError("Texture Asset Path not found!!!"); return; } string atlasName = Path.GetFileNameWithoutExtension(texturePath); diff --git a/FirClient/Assets/Editor/BaseEditor.cs b/FirClient/Assets/Editor/BaseEditor.cs index c000175..678bfd3 100644 --- a/FirClient/Assets/Editor/BaseEditor.cs +++ b/FirClient/Assets/Editor/BaseEditor.cs @@ -161,20 +161,6 @@ public static long FileSize(string filePath) return new FileInfo(filePath).Length; } - [MenuItem("Assets/Create/Game/Game Settings", false, 82)] - static void CreateGameSettings() - { - string gameSettingPath = AppConst.GameSettingPath + ".asset"; - string fullPath = AppDataWithoutAssetPath + "/" + gameSettingPath; - if (!File.Exists(fullPath)) - { - var scriptObj = ScriptableObject.CreateInstance(); - AssetDatabase.CreateAsset(scriptObj, gameSettingPath); - AssetDatabase.Refresh(); - Debug.LogWarning("Create GameSettings OK!!"); - } - } - public static string GetSelectedPathOrFallback() { string path = "Assets"; diff --git a/FirClient/Assets/Editor/CustomSettings.cs b/FirClient/Assets/Editor/CustomSettings.cs index ef47ffd..9296f68 100644 --- a/FirClient/Assets/Editor/CustomSettings.cs +++ b/FirClient/Assets/Editor/CustomSettings.cs @@ -19,7 +19,7 @@ using System.IO; using FirClient.View; using FirClient.Extensions; -using LiteNetLib.Utils; +using TMPro; public static class CustomSettings { @@ -30,6 +30,13 @@ public static class CustomSettings public static string baseLuaDir = FrameworkPath + "/ToLua/Lua"; public static string injectionFilesPath = Application.dataPath + "/ToLua/Injection/"; + //lua print或者error重定向 + public const int PRINTLOGLINE = 208; //ToLua.Print函数中Debugger.Log位置 + public const int PCALLERRORLINE = 810; //LuaState.Pcall函数中throw位置 + public const int LUADLLERRORLINE = 803; //LuaDLL.luaL_argerror函数中throw位置 + + public const string LUAJIT_CMD_OPTION = "-b -g"; //luajit.exe 编译命令行参数 + //导出时强制做为静态类的类型(注意customTypeList 还要添加这个类型才能导出) //unity 有些类作为sealed class, 其实完全等价于静态类 public static List staticClassTypes = new List @@ -95,7 +102,6 @@ public static class CustomSettings _GT(typeof(Screen)), _GT(typeof(AudioClip)), _GT(typeof(Animator)), - _GT(typeof(MeshRenderer)), _GT(typeof(Animation)), _GT(typeof(Resources)), _GT(typeof(Debug)), @@ -155,6 +161,7 @@ public static class CustomSettings _GT(typeof(CLuaComponent)), _GT(typeof(CPrefabVar)), _GT(typeof(VarData)), + _GT(typeof(TMP_InputField)), }; public static List dynamicList = new List() diff --git a/FirClient/Assets/Editor/FixChecker.cs b/FirClient/Assets/Editor/FixChecker.cs index 6122ed3..6279f88 100644 --- a/FirClient/Assets/Editor/FixChecker.cs +++ b/FirClient/Assets/Editor/FixChecker.cs @@ -38,25 +38,31 @@ public static void ClearItemBoxAsset() [MenuItem("FixChecker/Encode Lua File with UTF-8")] public static void EncodeAllLuaFile() { - var luaPath = AppDataPath + "/Scripts/Lua"; - var files = Directory.GetFiles(luaPath, "*.lua", SearchOption.AllDirectories); - foreach(var file in files) - { - Utf8Encode(file); - } + EncodeDir(AppDataPath + "/Scripts/Lua", "*.lua"); } - static void Utf8Encode(string filename) + [MenuItem("FixChecker/Encode CS File with UTF-8")] + public static void EncodeAllCSFile() { - if (!File.Exists(filename)) - { - return; - } - string text = File.ReadAllText(filename, Encoding.UTF8); - using (var sw = new StreamWriter(filename, false, new UTF8Encoding(false))) + EncodeDir(AppDataPath + "/Scripts", "*.cs"); + } + + static void EncodeDir(string scriptPath, string extName) + { + var files = Directory.GetFiles(scriptPath, extName, SearchOption.AllDirectories); + foreach (var file in files) { - sw.Write(text); - sw.Close(); + if (!File.Exists(file)) + { + continue; + } + string text = File.ReadAllText(file, Encoding.UTF8); + using (var sw = new StreamWriter(file, false, new UTF8Encoding(false))) + { + sw.Write(text); + sw.Close(); + } } + AssetDatabase.Refresh(); } } diff --git a/FirClient/Assets/Editor/Importer/TexturePreImporter.cs b/FirClient/Assets/Editor/Importer/TexturePreImporter.cs index 95b9d79..ec4f23e 100644 --- a/FirClient/Assets/Editor/Importer/TexturePreImporter.cs +++ b/FirClient/Assets/Editor/Importer/TexturePreImporter.cs @@ -2,14 +2,12 @@ public static class TexturePreImporter { - static BuildTarget activePlatform = EditorUserBuildSettings.activeBuildTarget; - public static void ProcTexture(string assetPath, ref TextureImporter importer) { importer.mipmapEnabled = false; importer.compressionQuality = 50; importer.textureType = TextureImporterType.Sprite; - + if (assetPath.StartsWith("Assets/res/Atlas")) { importer.spriteImportMode = SpriteImportMode.Multiple; @@ -18,37 +16,29 @@ public static void ProcTexture(string assetPath, ref TextureImporter importer) { importer.textureType = TextureImporterType.Default; } - var settings = new TextureImporterPlatformSettings(); - settings.compressionQuality = 50; + var andSettings = new TextureImporterPlatformSettings(); + var iosSettings = new TextureImporterPlatformSettings(); + andSettings.name = "Android"; + iosSettings.name = "iPhone"; + andSettings.overridden = true; + iosSettings.overridden = true; + andSettings.compressionQuality = iosSettings.compressionQuality = 50; var info = GetTexCompressInfo(assetPath); if (info == null) { - switch (activePlatform) - { - case BuildTarget.Android: - settings.format = TextureImporterFormat.ETC2_RGBA8; - settings.androidETC2FallbackOverride = AndroidETC2FallbackOverride.Quality32Bit; - break; - case BuildTarget.iOS: - settings.format = TextureImporterFormat.ASTC_6x6; - break; - } - settings.maxTextureSize = 1024; + andSettings.format = TextureImporterFormat.ETC2_RGBA8; + andSettings.androidETC2FallbackOverride = AndroidETC2FallbackOverride.Quality32Bit; + iosSettings.format=TextureImporterFormat.ASTC_6x6; + andSettings.maxTextureSize = iosSettings.maxTextureSize = 1024; } else { - switch (activePlatform) - { - case BuildTarget.Android: - settings.format = info.androidFormat; - break; - case BuildTarget.iOS: - settings.format = info.iosFormat; - break; - } - settings.maxTextureSize = GetTextureSize(info.textureSize); + andSettings.format = info.androidFormat; + iosSettings.format = info.iosFormat; + andSettings.maxTextureSize = iosSettings.maxTextureSize= GetTextureSize(info.textureSize); } - importer.SetPlatformTextureSettings(settings); + importer.SetPlatformTextureSettings(andSettings); + importer.SetPlatformTextureSettings(iosSettings); } static int GetTextureSize(TextureSize type) @@ -65,15 +55,18 @@ static int GetTextureSize(TextureSize type) static TextureCompressInfo GetTexCompressInfo(string assetPath) { TextureCompressInfo info = null; - var list = BaseEditor.gameSettings.atlasSettings; - if (list != null) + if (BaseEditor.gameSettings != null) { - foreach (var item in list) + var list = BaseEditor.gameSettings.atlasSettings; + if (list != null) { - var path = "Assets/" + item.assetPath; - if (assetPath.Contains(path)) + foreach (var item in list) { - return item; + var path = "Assets/" + item.assetPath; + if (assetPath.Contains(path)) + { + return item; + } } } } diff --git a/FirClient/Assets/Editor/PrefabVarEditor.cs b/FirClient/Assets/Editor/PrefabVarEditor.cs index 4bde5c0..5f68da8 100644 --- a/FirClient/Assets/Editor/PrefabVarEditor.cs +++ b/FirClient/Assets/Editor/PrefabVarEditor.cs @@ -20,6 +20,16 @@ public override void OnInspectorGUI() base.OnInspectorGUI(); serializedObject.Update(); mReordList.DoLayoutList(); + if (GUILayout.Button("Auto bind")) + { + Undo.RecordObject(mPrefabVar, "Auto Bind"); + mPrefabVar.AutoBind(); + } + if (GUILayout.Button("Clear bind")) + { + Undo.RecordObject(mPrefabVar, "Clear Bind"); + mPrefabVar.varData.Clear(); + } serializedObject.ApplyModifiedProperties(); } @@ -87,7 +97,7 @@ void OnReorderItem(ReorderableList list) e.FindPropertyRelative(str).objectReferenceValue = null; } } - var varName = mPrefabVar.GetVarNameByType(typeid); + var varName = mPrefabVar.GetVarNameByType((VarType)typeid); EditorGUI.PropertyField(new Rect(rect.x + 320, rect.y, rect.width - 320, EditorGUIUtility.singleLineHeight), e.FindPropertyRelative(varName), GUIContent.none); }; diff --git a/FirClient/Assets/Editor/ResPackager.cs b/FirClient/Assets/Editor/ResPackager.cs index 2c53c53..d2e4e7d 100644 --- a/FirClient/Assets/Editor/ResPackager.cs +++ b/FirClient/Assets/Editor/ResPackager.cs @@ -8,27 +8,48 @@ using FirClient.Component; using FirClient.Define; using Debug = UnityEngine.Debug; -using System.Text; public class ResPackager : BaseEditor { static List maps = new List(); - [MenuItem("GameAsset/Build All Assets", false, 102)] - public static void BuildAllResource() + [MenuItem("GameAsset/Open Persistent Dir", false, 10)] + static void OpenPersistentPath() { - if (gameSettings.debugMode) + var path = Path.GetDirectoryName(AppDataPath) + "/PersistentData"; + if (!Directory.Exists(path)) { - Debug.LogError("BuildAssetResource cannot run DebugMode!!!"); + Debug.LogError("Persistent DataPath not found!!:>" + path); return; } - if (Directory.Exists(Util.DataPath)) + EditorUtility.RevealInFinder(path); + } + + [MenuItem("GameAsset/Clear Persistent Data", false, 11)] + static void ClearPersistentData() + { + var persistentPath = Path.GetDirectoryName(AppDataPath) + "/PersistentData"; + if (Directory.Exists(persistentPath)) + { + Directory.Delete(persistentPath, true); + Debug.Log("Delete Res Directory:>" + persistentPath); + } + var dataDir = new DirectoryInfo(Application.persistentDataPath); + if (dataDir.Exists) { - Directory.Delete(Util.DataPath, true); + dataDir.Delete(true); + Debug.Log("Delete Persistent DataPath:>" + Application.persistentDataPath); } - if (Directory.Exists(StreamDir)) + } + + [MenuItem("GameAsset/Build All Assets", false, 102)] + public static void BuildAllResource() + { + if (gameSettings.debugMode) { - Directory.Delete(StreamDir, true); + Debug.LogError("BuildAssetResource cannot run DebugMode!!!"); + Selection.activeObject = Util.LoadGameSettings(); + return; } BuildAssetBundles(); BuildScriptWithDatas(); //构建脚本+配置 @@ -103,7 +124,7 @@ static void PrintAssetBundleList() { foreach(var map in maps) { - Debug.LogError("abName: " + map.assetBundleName); + Debug.Log("abName: " + map.assetBundleName); for(int i = 0; i < map.assetNames.Length; i++) { Debug.Log("(" + i + ")--->" + map.assetNames[i]); @@ -178,12 +199,12 @@ static void PackDatasFromGameSettings() static void PackLuaFiles() { string resPath = AppDataPath + "/StreamingAssets/"; - string luaPath = resPath + "/lua/"; + string luaPath = resPath + "lua/"; //----------复制Lua文件---------------- if (Directory.Exists(luaPath)) { - Directory.Delete(luaPath); + Directory.Delete(luaPath, true); } Directory.CreateDirectory(luaPath); string[] luaPaths = { AppDataPath + "/Scripts/lua/", @@ -359,7 +380,7 @@ static void PackCompressDir(string dirName, string fileType) var files = Directory.GetFiles(srcPath, "*.*", SearchOption.AllDirectories) .Where(file => allowedExtensions.Any(file.ToLower().EndsWith)).ToList(); - string zipName = dirName.ToLower() + "_" + files.Count() + ".zip"; + string zipName = dirName.ToLower().Remove(0, 4) + "_" + files.Count() + ".zip"; string zipPath = AppDataPath + "/StreamingAssets/" + zipName; var zipExtNames = fileType.Replace("*.", ""); diff --git a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/DOTweenIcon.png.meta b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/DOTweenIcon.png.meta index 8f50dc1..324c848 100644 --- a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/DOTweenIcon.png.meta +++ b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/DOTweenIcon.png.meta @@ -3,7 +3,7 @@ guid: 94008168eb5e72543a89c29e505f5944 TextureImporter: internalIDToNameTable: [] externalObjects: {} - serializedVersion: 10 + serializedVersion: 11 mipmaps: mipMapMode: 0 enableMipMap: 0 @@ -57,6 +57,7 @@ TextureImporter: maxTextureSizeSet: 0 compressionQualitySet: 0 textureFormatSet: 0 + applyGammaDecoding: 1 platformSettings: - serializedVersion: 3 buildTarget: DefaultTexturePlatform @@ -70,6 +71,30 @@ TextureImporter: overridden: 0 androidETC2FallbackOverride: 0 forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: Android + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 47 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 1 + forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: iPhone + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 50 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 spriteSheet: serializedVersion: 2 sprites: [] diff --git a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer.png.meta b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer.png.meta index 16e3f14..01f7d49 100644 --- a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer.png.meta +++ b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer.png.meta @@ -3,7 +3,7 @@ guid: 099554a8274a7c54da3b0ad99e247638 TextureImporter: internalIDToNameTable: [] externalObjects: {} - serializedVersion: 10 + serializedVersion: 11 mipmaps: mipMapMode: 0 enableMipMap: 0 @@ -57,6 +57,7 @@ TextureImporter: maxTextureSizeSet: 0 compressionQualitySet: 0 textureFormatSet: 0 + applyGammaDecoding: 1 platformSettings: - serializedVersion: 3 buildTarget: DefaultTexturePlatform @@ -70,6 +71,30 @@ TextureImporter: overridden: 0 androidETC2FallbackOverride: 0 forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: Android + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 47 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 1 + forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: iPhone + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 50 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 spriteSheet: serializedVersion: 2 sprites: [] diff --git a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer_dark.png.meta b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer_dark.png.meta index 7d2180c..7bdf479 100644 --- a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer_dark.png.meta +++ b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Footer_dark.png.meta @@ -3,7 +3,7 @@ guid: 9670710e99b75494b9ea5a0f3654ab89 TextureImporter: internalIDToNameTable: [] externalObjects: {} - serializedVersion: 10 + serializedVersion: 11 mipmaps: mipMapMode: 0 enableMipMap: 0 @@ -57,6 +57,7 @@ TextureImporter: maxTextureSizeSet: 0 compressionQualitySet: 0 textureFormatSet: 0 + applyGammaDecoding: 1 platformSettings: - serializedVersion: 3 buildTarget: DefaultTexturePlatform @@ -70,6 +71,30 @@ TextureImporter: overridden: 0 androidETC2FallbackOverride: 0 forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: Android + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 47 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 1 + forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: iPhone + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 50 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 spriteSheet: serializedVersion: 2 sprites: [] diff --git a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Header.jpg.meta b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Header.jpg.meta index 9cd059b..a06b4ee 100644 --- a/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Header.jpg.meta +++ b/FirClient/Assets/Libraries/DOTween/Editor/Imgs/Header.jpg.meta @@ -3,7 +3,7 @@ guid: 7791d50ec315fcf4e90870ff4ee3ca37 TextureImporter: internalIDToNameTable: [] externalObjects: {} - serializedVersion: 10 + serializedVersion: 11 mipmaps: mipMapMode: 0 enableMipMap: 0 @@ -57,6 +57,7 @@ TextureImporter: maxTextureSizeSet: 0 compressionQualitySet: 0 textureFormatSet: 0 + applyGammaDecoding: 1 platformSettings: - serializedVersion: 3 buildTarget: DefaultTexturePlatform @@ -70,6 +71,30 @@ TextureImporter: overridden: 0 androidETC2FallbackOverride: 0 forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: Android + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 47 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 1 + forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: iPhone + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 50 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 spriteSheet: serializedVersion: 2 sprites: [] diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib.meta new file mode 100644 index 0000000..545e4cd --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 43665f6ad61374f42b529302c4d69507 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2.meta new file mode 100644 index 0000000..0ba4943 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: ef59fcc5115dc46b7a580f880f1b737f +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs new file mode 100644 index 0000000..4bd48b0 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs @@ -0,0 +1,79 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.BZip2 +{ + /// + /// An example class to demonstrate compression and decompression of BZip2 streams. + /// + public static class BZip2 + { + /// + /// Decompress the input writing + /// uncompressed data to the output stream + /// + /// The readable stream containing data to decompress. + /// The output stream to receive the decompressed data. + /// Both streams are closed on completion if true. + public static void Decompress(Stream inStream, Stream outStream, bool isStreamOwner) + { + if (inStream == null) + throw new ArgumentNullException(nameof(inStream)); + + if (outStream == null) + throw new ArgumentNullException(nameof(outStream)); + + try + { + using (BZip2InputStream bzipInput = new BZip2InputStream(inStream)) + { + bzipInput.IsStreamOwner = isStreamOwner; + Core.StreamUtils.Copy(bzipInput, outStream, new byte[4096]); + } + } + finally + { + if (isStreamOwner) + { + // inStream is closed by the BZip2InputStream if stream owner + outStream.Dispose(); + } + } + } + + /// + /// Compress the input stream sending + /// result data to output stream + /// + /// The readable stream to compress. + /// The output stream to receive the compressed data. + /// Both streams are closed on completion if true. + /// Block size acts as compression level (1 to 9) with 1 giving + /// the lowest compression and 9 the highest. + public static void Compress(Stream inStream, Stream outStream, bool isStreamOwner, int level) + { + if (inStream == null) + throw new ArgumentNullException(nameof(inStream)); + + if (outStream == null) + throw new ArgumentNullException(nameof(outStream)); + + try + { + using (BZip2OutputStream bzipOutput = new BZip2OutputStream(outStream, level)) + { + bzipOutput.IsStreamOwner = isStreamOwner; + Core.StreamUtils.Copy(inStream, bzipOutput, new byte[4096]); + } + } + finally + { + if (isStreamOwner) + { + // outStream is closed by the BZip2OutputStream if stream owner + inStream.Dispose(); + } + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs.meta new file mode 100644 index 0000000..1627174 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 2916c893b7ffc4c8e905f8f95e63bea1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs new file mode 100644 index 0000000..146e0a0 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs @@ -0,0 +1,121 @@ +namespace ICSharpCode.SharpZipLib.BZip2 +{ + /// + /// Defines internal values for both compression and decompression + /// + internal sealed class BZip2Constants + { + /// + /// Random numbers used to randomise repetitive blocks + /// + public readonly static int[] RandomNumbers = { + 619, 720, 127, 481, 931, 816, 813, 233, 566, 247, + 985, 724, 205, 454, 863, 491, 741, 242, 949, 214, + 733, 859, 335, 708, 621, 574, 73, 654, 730, 472, + 419, 436, 278, 496, 867, 210, 399, 680, 480, 51, + 878, 465, 811, 169, 869, 675, 611, 697, 867, 561, + 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, + 150, 238, 59, 379, 684, 877, 625, 169, 643, 105, + 170, 607, 520, 932, 727, 476, 693, 425, 174, 647, + 73, 122, 335, 530, 442, 853, 695, 249, 445, 515, + 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, + 641, 801, 220, 162, 819, 984, 589, 513, 495, 799, + 161, 604, 958, 533, 221, 400, 386, 867, 600, 782, + 382, 596, 414, 171, 516, 375, 682, 485, 911, 276, + 98, 553, 163, 354, 666, 933, 424, 341, 533, 870, + 227, 730, 475, 186, 263, 647, 537, 686, 600, 224, + 469, 68, 770, 919, 190, 373, 294, 822, 808, 206, + 184, 943, 795, 384, 383, 461, 404, 758, 839, 887, + 715, 67, 618, 276, 204, 918, 873, 777, 604, 560, + 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, + 652, 934, 970, 447, 318, 353, 859, 672, 112, 785, + 645, 863, 803, 350, 139, 93, 354, 99, 820, 908, + 609, 772, 154, 274, 580, 184, 79, 626, 630, 742, + 653, 282, 762, 623, 680, 81, 927, 626, 789, 125, + 411, 521, 938, 300, 821, 78, 343, 175, 128, 250, + 170, 774, 972, 275, 999, 639, 495, 78, 352, 126, + 857, 956, 358, 619, 580, 124, 737, 594, 701, 612, + 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, + 944, 375, 748, 52, 600, 747, 642, 182, 862, 81, + 344, 805, 988, 739, 511, 655, 814, 334, 249, 515, + 897, 955, 664, 981, 649, 113, 974, 459, 893, 228, + 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, + 686, 754, 806, 760, 493, 403, 415, 394, 687, 700, + 946, 670, 656, 610, 738, 392, 760, 799, 887, 653, + 978, 321, 576, 617, 626, 502, 894, 679, 243, 440, + 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, + 707, 151, 457, 449, 797, 195, 791, 558, 945, 679, + 297, 59, 87, 824, 713, 663, 412, 693, 342, 606, + 134, 108, 571, 364, 631, 212, 174, 643, 304, 329, + 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, + 140, 206, 73, 263, 980, 736, 876, 478, 430, 305, + 170, 514, 364, 692, 829, 82, 855, 953, 676, 246, + 369, 970, 294, 750, 807, 827, 150, 790, 288, 923, + 804, 378, 215, 828, 592, 281, 565, 555, 710, 82, + 896, 831, 547, 261, 524, 462, 293, 465, 502, 56, + 661, 821, 976, 991, 658, 869, 905, 758, 745, 193, + 768, 550, 608, 933, 378, 286, 215, 979, 792, 961, + 61, 688, 793, 644, 986, 403, 106, 366, 905, 644, + 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, + 780, 773, 635, 389, 707, 100, 626, 958, 165, 504, + 920, 176, 193, 713, 857, 265, 203, 50, 668, 108, + 645, 990, 626, 197, 510, 357, 358, 850, 858, 364, + 936, 638 + }; + + /// + /// When multiplied by compression parameter (1-9) gives the block size for compression + /// 9 gives the best compression but uses the most memory. + /// + public const int BaseBlockSize = 100000; + + /// + /// Backend constant + /// + public const int MaximumAlphaSize = 258; + + /// + /// Backend constant + /// + public const int MaximumCodeLength = 23; + + /// + /// Backend constant + /// + public const int RunA = 0; + + /// + /// Backend constant + /// + public const int RunB = 1; + + /// + /// Backend constant + /// + public const int GroupCount = 6; + + /// + /// Backend constant + /// + public const int GroupSize = 50; + + /// + /// Backend constant + /// + public const int NumberOfIterations = 4; + + /// + /// Backend constant + /// + public const int MaximumSelectors = (2 + (900000 / GroupSize)); + + /// + /// Backend constant + /// + public const int OvershootBytes = 20; + + private BZip2Constants() + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs.meta new file mode 100644 index 0000000..67f2e00 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Constants.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 6ebe4502a92124336b25d9fa20d0cc0a +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs new file mode 100644 index 0000000..111d21c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs @@ -0,0 +1,54 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.BZip2 +{ + /// + /// BZip2Exception represents exceptions specific to BZip2 classes and code. + /// + [Serializable] + public class BZip2Exception : SharpZipBaseException + { + /// + /// Initialise a new instance of . + /// + public BZip2Exception() + { + } + + /// + /// Initialise a new instance of with its message string. + /// + /// A that describes the error. + public BZip2Exception(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of . + /// + /// A that describes the error. + /// The that caused this exception. + public BZip2Exception(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the BZip2Exception class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected BZip2Exception(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs.meta new file mode 100644 index 0000000..66c101c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2Exception.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: e5ec591629af04ec59e9a410fc8e5fd0 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs new file mode 100644 index 0000000..e639bc1 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs @@ -0,0 +1,1028 @@ +using ICSharpCode.SharpZipLib.Checksum; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.BZip2 +{ + /// + /// An input stream that decompresses files in the BZip2 format + /// + public class BZip2InputStream : Stream + { + #region Constants + + private const int START_BLOCK_STATE = 1; + private const int RAND_PART_A_STATE = 2; + private const int RAND_PART_B_STATE = 3; + private const int RAND_PART_C_STATE = 4; + private const int NO_RAND_PART_A_STATE = 5; + private const int NO_RAND_PART_B_STATE = 6; + private const int NO_RAND_PART_C_STATE = 7; + + #endregion Constants + + #region Instance Fields + + /*-- + index of the last char in the block, so + the block size == last + 1. + --*/ + private int last; + + /*-- + index in zptr[] of original string after sorting. + --*/ + private int origPtr; + + /*-- + always: in the range 0 .. 9. + The current block size is 100000 * this number. + --*/ + private int blockSize100k; + + private bool blockRandomised; + + private int bsBuff; + private int bsLive; + private IChecksum mCrc = new BZip2Crc(); + + private bool[] inUse = new bool[256]; + private int nInUse; + + private byte[] seqToUnseq = new byte[256]; + private byte[] unseqToSeq = new byte[256]; + + private byte[] selector = new byte[BZip2Constants.MaximumSelectors]; + private byte[] selectorMtf = new byte[BZip2Constants.MaximumSelectors]; + + private int[] tt; + private byte[] ll8; + + /*-- + freq table collected to save a pass over the data + during decompression. + --*/ + private int[] unzftab = new int[256]; + + private int[][] limit = new int[BZip2Constants.GroupCount][]; + private int[][] baseArray = new int[BZip2Constants.GroupCount][]; + private int[][] perm = new int[BZip2Constants.GroupCount][]; + private int[] minLens = new int[BZip2Constants.GroupCount]; + + private readonly Stream baseStream; + private bool streamEnd; + + private int currentChar = -1; + + private int currentState = START_BLOCK_STATE; + + private int storedBlockCRC, storedCombinedCRC; + private int computedBlockCRC; + private uint computedCombinedCRC; + + private int count, chPrev, ch2; + private int tPos; + private int rNToGo; + private int rTPos; + private int i2, j2; + private byte z; + + #endregion Instance Fields + + /// + /// Construct instance for reading from stream + /// + /// Data source + public BZip2InputStream(Stream stream) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + // init arrays + for (int i = 0; i < BZip2Constants.GroupCount; ++i) + { + limit[i] = new int[BZip2Constants.MaximumAlphaSize]; + baseArray[i] = new int[BZip2Constants.MaximumAlphaSize]; + perm[i] = new int[BZip2Constants.MaximumAlphaSize]; + } + + baseStream = stream; + bsLive = 0; + bsBuff = 0; + Initialize(); + InitBlock(); + SetupBlock(); + } + + /// + /// Get/set flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + public bool IsStreamOwner { get; set; } = true; + + #region Stream Overrides + + /// + /// Gets a value indicating if the stream supports reading + /// + public override bool CanRead + { + get + { + return baseStream.CanRead; + } + } + + /// + /// Gets a value indicating whether the current stream supports seeking. + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Gets a value indicating whether the current stream supports writing. + /// This property always returns false + /// + public override bool CanWrite + { + get + { + return false; + } + } + + /// + /// Gets the length in bytes of the stream. + /// + public override long Length + { + get + { + return baseStream.Length; + } + } + + /// + /// Gets the current position of the stream. + /// Setting the position is not supported and will throw a NotSupportException. + /// + /// Any attempt to set the position. + public override long Position + { + get + { + return baseStream.Position; + } + set + { + throw new NotSupportedException("BZip2InputStream position cannot be set"); + } + } + + /// + /// Flushes the stream. + /// + public override void Flush() + { + baseStream.Flush(); + } + + /// + /// Set the streams position. This operation is not supported and will throw a NotSupportedException + /// + /// A byte offset relative to the parameter. + /// A value of type indicating the reference point used to obtain the new position. + /// The new position of the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("BZip2InputStream Seek not supported"); + } + + /// + /// Sets the length of this stream to the given value. + /// This operation is not supported and will throw a NotSupportedExceptionortedException + /// + /// The new length for the stream. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("BZip2InputStream SetLength not supported"); + } + + /// + /// Writes a block of bytes to this stream using data from a buffer. + /// This operation is not supported and will throw a NotSupportedException + /// + /// The buffer to source data from. + /// The offset to start obtaining data from. + /// The number of bytes of data to write. + /// Any access + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("BZip2InputStream Write not supported"); + } + + /// + /// Writes a byte to the current position in the file stream. + /// This operation is not supported and will throw a NotSupportedException + /// + /// The value to write. + /// Any access + public override void WriteByte(byte value) + { + throw new NotSupportedException("BZip2InputStream WriteByte not supported"); + } + + /// + /// Read a sequence of bytes and advances the read position by one byte. + /// + /// Array of bytes to store values in + /// Offset in array to begin storing data + /// The maximum number of bytes to read + /// The total number of bytes read into the buffer. This might be less + /// than the number of bytes requested if that number of bytes are not + /// currently available or zero if the end of the stream is reached. + /// + public override int Read(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + for (int i = 0; i < count; ++i) + { + int rb = ReadByte(); + if (rb == -1) + { + return i; + } + buffer[offset + i] = (byte)rb; + } + return count; + } + + /// + /// Closes the stream, releasing any associated resources. + /// + protected override void Dispose(bool disposing) + { + if (disposing && IsStreamOwner) + { + baseStream.Dispose(); + } + } + + /// + /// Read a byte from stream advancing position + /// + /// byte read or -1 on end of stream + public override int ReadByte() + { + if (streamEnd) + { + return -1; // ok + } + + int retChar = currentChar; + switch (currentState) + { + case RAND_PART_B_STATE: + SetupRandPartB(); + break; + + case RAND_PART_C_STATE: + SetupRandPartC(); + break; + + case NO_RAND_PART_B_STATE: + SetupNoRandPartB(); + break; + + case NO_RAND_PART_C_STATE: + SetupNoRandPartC(); + break; + + case START_BLOCK_STATE: + case NO_RAND_PART_A_STATE: + case RAND_PART_A_STATE: + break; + } + return retChar; + } + + #endregion Stream Overrides + + private void MakeMaps() + { + nInUse = 0; + for (int i = 0; i < 256; ++i) + { + if (inUse[i]) + { + seqToUnseq[nInUse] = (byte)i; + unseqToSeq[i] = (byte)nInUse; + nInUse++; + } + } + } + + private void Initialize() + { + char magic1 = BsGetUChar(); + char magic2 = BsGetUChar(); + + char magic3 = BsGetUChar(); + char magic4 = BsGetUChar(); + + if (magic1 != 'B' || magic2 != 'Z' || magic3 != 'h' || magic4 < '1' || magic4 > '9') + { + streamEnd = true; + return; + } + + SetDecompressStructureSizes(magic4 - '0'); + computedCombinedCRC = 0; + } + + private void InitBlock() + { + char magic1 = BsGetUChar(); + char magic2 = BsGetUChar(); + char magic3 = BsGetUChar(); + char magic4 = BsGetUChar(); + char magic5 = BsGetUChar(); + char magic6 = BsGetUChar(); + + if (magic1 == 0x17 && magic2 == 0x72 && magic3 == 0x45 && magic4 == 0x38 && magic5 == 0x50 && magic6 == 0x90) + { + Complete(); + return; + } + + if (magic1 != 0x31 || magic2 != 0x41 || magic3 != 0x59 || magic4 != 0x26 || magic5 != 0x53 || magic6 != 0x59) + { + BadBlockHeader(); + streamEnd = true; + return; + } + + storedBlockCRC = BsGetInt32(); + + blockRandomised = (BsR(1) == 1); + + GetAndMoveToFrontDecode(); + + mCrc.Reset(); + currentState = START_BLOCK_STATE; + } + + private void EndBlock() + { + computedBlockCRC = (int)mCrc.Value; + + // -- A bad CRC is considered a fatal error. -- + if (storedBlockCRC != computedBlockCRC) + { + CrcError(); + } + + // 1528150659 + computedCombinedCRC = ((computedCombinedCRC << 1) & 0xFFFFFFFF) | (computedCombinedCRC >> 31); + computedCombinedCRC = computedCombinedCRC ^ (uint)computedBlockCRC; + } + + private void Complete() + { + storedCombinedCRC = BsGetInt32(); + if (storedCombinedCRC != (int)computedCombinedCRC) + { + CrcError(); + } + + streamEnd = true; + } + + private void FillBuffer() + { + int thech = 0; + + try + { + thech = baseStream.ReadByte(); + } + catch (Exception) + { + CompressedStreamEOF(); + } + + if (thech == -1) + { + CompressedStreamEOF(); + } + + bsBuff = (bsBuff << 8) | (thech & 0xFF); + bsLive += 8; + } + + private int BsR(int n) + { + while (bsLive < n) + { + FillBuffer(); + } + + int v = (bsBuff >> (bsLive - n)) & ((1 << n) - 1); + bsLive -= n; + return v; + } + + private char BsGetUChar() + { + return (char)BsR(8); + } + + private int BsGetIntVS(int numBits) + { + return BsR(numBits); + } + + private int BsGetInt32() + { + int result = BsR(8); + result = (result << 8) | BsR(8); + result = (result << 8) | BsR(8); + result = (result << 8) | BsR(8); + return result; + } + + private void RecvDecodingTables() + { + char[][] len = new char[BZip2Constants.GroupCount][]; + for (int i = 0; i < BZip2Constants.GroupCount; ++i) + { + len[i] = new char[BZip2Constants.MaximumAlphaSize]; + } + + bool[] inUse16 = new bool[16]; + + //--- Receive the mapping table --- + for (int i = 0; i < 16; i++) + { + inUse16[i] = (BsR(1) == 1); + } + + for (int i = 0; i < 16; i++) + { + if (inUse16[i]) + { + for (int j = 0; j < 16; j++) + { + inUse[i * 16 + j] = (BsR(1) == 1); + } + } + else + { + for (int j = 0; j < 16; j++) + { + inUse[i * 16 + j] = false; + } + } + } + + MakeMaps(); + int alphaSize = nInUse + 2; + + //--- Now the selectors --- + int nGroups = BsR(3); + int nSelectors = BsR(15); + + for (int i = 0; i < nSelectors; i++) + { + int j = 0; + while (BsR(1) == 1) + { + j++; + } + selectorMtf[i] = (byte)j; + } + + //--- Undo the MTF values for the selectors. --- + byte[] pos = new byte[BZip2Constants.GroupCount]; + for (int v = 0; v < nGroups; v++) + { + pos[v] = (byte)v; + } + + for (int i = 0; i < nSelectors; i++) + { + int v = selectorMtf[i]; + byte tmp = pos[v]; + while (v > 0) + { + pos[v] = pos[v - 1]; + v--; + } + pos[0] = tmp; + selector[i] = tmp; + } + + //--- Now the coding tables --- + for (int t = 0; t < nGroups; t++) + { + int curr = BsR(5); + for (int i = 0; i < alphaSize; i++) + { + while (BsR(1) == 1) + { + if (BsR(1) == 0) + { + curr++; + } + else + { + curr--; + } + } + len[t][i] = (char)curr; + } + } + + //--- Create the Huffman decoding tables --- + for (int t = 0; t < nGroups; t++) + { + int minLen = 32; + int maxLen = 0; + for (int i = 0; i < alphaSize; i++) + { + maxLen = Math.Max(maxLen, len[t][i]); + minLen = Math.Min(minLen, len[t][i]); + } + HbCreateDecodeTables(limit[t], baseArray[t], perm[t], len[t], minLen, maxLen, alphaSize); + minLens[t] = minLen; + } + } + + private void GetAndMoveToFrontDecode() + { + byte[] yy = new byte[256]; + int nextSym; + + int limitLast = BZip2Constants.BaseBlockSize * blockSize100k; + origPtr = BsGetIntVS(24); + + RecvDecodingTables(); + int EOB = nInUse + 1; + int groupNo = -1; + int groupPos = 0; + + /*-- + Setting up the unzftab entries here is not strictly + necessary, but it does save having to do it later + in a separate pass, and so saves a block's worth of + cache misses. + --*/ + for (int i = 0; i <= 255; i++) + { + unzftab[i] = 0; + } + + for (int i = 0; i <= 255; i++) + { + yy[i] = (byte)i; + } + + last = -1; + + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.GroupSize; + } + + groupPos--; + int zt = selector[groupNo]; + int zn = minLens[zt]; + int zvec = BsR(zn); + int zj; + + while (zvec > limit[zt][zn]) + { + if (zn > 20) + { // the longest code + throw new BZip2Exception("Bzip data error"); + } + zn++; + while (bsLive < 1) + { + FillBuffer(); + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + zvec = (zvec << 1) | zj; + } + if (zvec - baseArray[zt][zn] < 0 || zvec - baseArray[zt][zn] >= BZip2Constants.MaximumAlphaSize) + { + throw new BZip2Exception("Bzip data error"); + } + nextSym = perm[zt][zvec - baseArray[zt][zn]]; + + while (true) + { + if (nextSym == EOB) + { + break; + } + + if (nextSym == BZip2Constants.RunA || nextSym == BZip2Constants.RunB) + { + int s = -1; + int n = 1; + do + { + if (nextSym == BZip2Constants.RunA) + { + s += (0 + 1) * n; + } + else if (nextSym == BZip2Constants.RunB) + { + s += (1 + 1) * n; + } + + n <<= 1; + + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.GroupSize; + } + + groupPos--; + + zt = selector[groupNo]; + zn = minLens[zt]; + zvec = BsR(zn); + + while (zvec > limit[zt][zn]) + { + zn++; + while (bsLive < 1) + { + FillBuffer(); + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + zvec = (zvec << 1) | zj; + } + nextSym = perm[zt][zvec - baseArray[zt][zn]]; + } while (nextSym == BZip2Constants.RunA || nextSym == BZip2Constants.RunB); + + s++; + byte ch = seqToUnseq[yy[0]]; + unzftab[ch] += s; + + while (s > 0) + { + last++; + ll8[last] = ch; + s--; + } + + if (last >= limitLast) + { + BlockOverrun(); + } + continue; + } + else + { + last++; + if (last >= limitLast) + { + BlockOverrun(); + } + + byte tmp = yy[nextSym - 1]; + unzftab[seqToUnseq[tmp]]++; + ll8[last] = seqToUnseq[tmp]; + + for (int j = nextSym - 1; j > 0; --j) + { + yy[j] = yy[j - 1]; + } + yy[0] = tmp; + + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.GroupSize; + } + + groupPos--; + zt = selector[groupNo]; + zn = minLens[zt]; + zvec = BsR(zn); + while (zvec > limit[zt][zn]) + { + zn++; + while (bsLive < 1) + { + FillBuffer(); + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + zvec = (zvec << 1) | zj; + } + nextSym = perm[zt][zvec - baseArray[zt][zn]]; + continue; + } + } + } + + private void SetupBlock() + { + int[] cftab = new int[257]; + + cftab[0] = 0; + Array.Copy(unzftab, 0, cftab, 1, 256); + + for (int i = 1; i <= 256; i++) + { + cftab[i] += cftab[i - 1]; + } + + for (int i = 0; i <= last; i++) + { + byte ch = ll8[i]; + tt[cftab[ch]] = i; + cftab[ch]++; + } + + cftab = null; + + tPos = tt[origPtr]; + + count = 0; + i2 = 0; + ch2 = 256; /*-- not a char and not EOF --*/ + + if (blockRandomised) + { + rNToGo = 0; + rTPos = 0; + SetupRandPartA(); + } + else + { + SetupNoRandPartA(); + } + } + + private void SetupRandPartA() + { + if (i2 <= last) + { + chPrev = ch2; + ch2 = ll8[tPos]; + tPos = tt[tPos]; + if (rNToGo == 0) + { + rNToGo = BZip2Constants.RandomNumbers[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + ch2 ^= (int)((rNToGo == 1) ? 1 : 0); + i2++; + + currentChar = ch2; + currentState = RAND_PART_B_STATE; + mCrc.Update(ch2); + } + else + { + EndBlock(); + InitBlock(); + SetupBlock(); + } + } + + private void SetupNoRandPartA() + { + if (i2 <= last) + { + chPrev = ch2; + ch2 = ll8[tPos]; + tPos = tt[tPos]; + i2++; + + currentChar = ch2; + currentState = NO_RAND_PART_B_STATE; + mCrc.Update(ch2); + } + else + { + EndBlock(); + InitBlock(); + SetupBlock(); + } + } + + private void SetupRandPartB() + { + if (ch2 != chPrev) + { + currentState = RAND_PART_A_STATE; + count = 1; + SetupRandPartA(); + } + else + { + count++; + if (count >= 4) + { + z = ll8[tPos]; + tPos = tt[tPos]; + if (rNToGo == 0) + { + rNToGo = BZip2Constants.RandomNumbers[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + z ^= (byte)((rNToGo == 1) ? 1 : 0); + j2 = 0; + currentState = RAND_PART_C_STATE; + SetupRandPartC(); + } + else + { + currentState = RAND_PART_A_STATE; + SetupRandPartA(); + } + } + } + + private void SetupRandPartC() + { + if (j2 < (int)z) + { + currentChar = ch2; + mCrc.Update(ch2); + j2++; + } + else + { + currentState = RAND_PART_A_STATE; + i2++; + count = 0; + SetupRandPartA(); + } + } + + private void SetupNoRandPartB() + { + if (ch2 != chPrev) + { + currentState = NO_RAND_PART_A_STATE; + count = 1; + SetupNoRandPartA(); + } + else + { + count++; + if (count >= 4) + { + z = ll8[tPos]; + tPos = tt[tPos]; + currentState = NO_RAND_PART_C_STATE; + j2 = 0; + SetupNoRandPartC(); + } + else + { + currentState = NO_RAND_PART_A_STATE; + SetupNoRandPartA(); + } + } + } + + private void SetupNoRandPartC() + { + if (j2 < (int)z) + { + currentChar = ch2; + mCrc.Update(ch2); + j2++; + } + else + { + currentState = NO_RAND_PART_A_STATE; + i2++; + count = 0; + SetupNoRandPartA(); + } + } + + private void SetDecompressStructureSizes(int newSize100k) + { + if (!(0 <= newSize100k && newSize100k <= 9 && 0 <= blockSize100k && blockSize100k <= 9)) + { + throw new BZip2Exception("Invalid block size"); + } + + blockSize100k = newSize100k; + + if (newSize100k == 0) + { + return; + } + + int n = BZip2Constants.BaseBlockSize * newSize100k; + ll8 = new byte[n]; + tt = new int[n]; + } + + private static void CompressedStreamEOF() + { + throw new EndOfStreamException("BZip2 input stream end of compressed stream"); + } + + private static void BlockOverrun() + { + throw new BZip2Exception("BZip2 input stream block overrun"); + } + + private static void BadBlockHeader() + { + throw new BZip2Exception("BZip2 input stream bad block header"); + } + + private static void CrcError() + { + throw new BZip2Exception("BZip2 input stream crc error"); + } + + private static void HbCreateDecodeTables(int[] limit, int[] baseArray, int[] perm, char[] length, int minLen, int maxLen, int alphaSize) + { + int pp = 0; + + for (int i = minLen; i <= maxLen; ++i) + { + for (int j = 0; j < alphaSize; ++j) + { + if (length[j] == i) + { + perm[pp] = j; + ++pp; + } + } + } + + for (int i = 0; i < BZip2Constants.MaximumCodeLength; i++) + { + baseArray[i] = 0; + } + + for (int i = 0; i < alphaSize; i++) + { + ++baseArray[length[i] + 1]; + } + + for (int i = 1; i < BZip2Constants.MaximumCodeLength; i++) + { + baseArray[i] += baseArray[i - 1]; + } + + for (int i = 0; i < BZip2Constants.MaximumCodeLength; i++) + { + limit[i] = 0; + } + + int vec = 0; + + for (int i = minLen; i <= maxLen; i++) + { + vec += (baseArray[i + 1] - baseArray[i]); + limit[i] = vec - 1; + vec <<= 1; + } + + for (int i = minLen + 1; i <= maxLen; i++) + { + baseArray[i] = ((limit[i - 1] + 1) << 1) - baseArray[i]; + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs.meta new file mode 100644 index 0000000..2fb29ea --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2InputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 224e16fdfcb0f4ceb91caf14479a0e54 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs new file mode 100644 index 0000000..f331ec6 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs @@ -0,0 +1,2033 @@ +using ICSharpCode.SharpZipLib.Checksum; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.BZip2 +{ + /// + /// An output stream that compresses into the BZip2 format + /// including file header chars into another stream. + /// + public class BZip2OutputStream : Stream + { + #region Constants + + private const int SETMASK = (1 << 21); + private const int CLEARMASK = (~SETMASK); + private const int GREATER_ICOST = 15; + private const int LESSER_ICOST = 0; + private const int SMALL_THRESH = 20; + private const int DEPTH_THRESH = 10; + + /*-- + If you are ever unlucky/improbable enough + to get a stack overflow whilst sorting, + increase the following constant and try + again. In practice I have never seen the + stack go above 27 elems, so the following + limit seems very generous. + --*/ + private const int QSORT_STACK_SIZE = 1000; + + /*-- + Knuth's increments seem to work better + than Incerpi-Sedgewick here. Possibly + because the number of elems to sort is + usually small, typically <= 20. + --*/ + + private readonly int[] increments = { + 1, 4, 13, 40, 121, 364, 1093, 3280, + 9841, 29524, 88573, 265720, + 797161, 2391484 + }; + + #endregion Constants + + #region Instance Fields + + /*-- + index of the last char in the block, so + the block size == last + 1. + --*/ + private int last; + + /*-- + index in zptr[] of original string after sorting. + --*/ + private int origPtr; + + /*-- + always: in the range 0 .. 9. + The current block size is 100000 * this number. + --*/ + private int blockSize100k; + + private bool blockRandomised; + + private int bytesOut; + private int bsBuff; + private int bsLive; + private IChecksum mCrc = new BZip2Crc(); + + private bool[] inUse = new bool[256]; + private int nInUse; + + private char[] seqToUnseq = new char[256]; + private char[] unseqToSeq = new char[256]; + + private char[] selector = new char[BZip2Constants.MaximumSelectors]; + private char[] selectorMtf = new char[BZip2Constants.MaximumSelectors]; + + private byte[] block; + private int[] quadrant; + private int[] zptr; + private short[] szptr; + private int[] ftab; + + private int nMTF; + + private int[] mtfFreq = new int[BZip2Constants.MaximumAlphaSize]; + + /* + * Used when sorting. If too many long comparisons + * happen, we stop sorting, randomise the block + * slightly, and try again. + */ + private int workFactor; + private int workDone; + private int workLimit; + private bool firstAttempt; + private int nBlocksRandomised; + + private int currentChar = -1; + private int runLength; + private uint blockCRC, combinedCRC; + private int allowableBlockSize; + private readonly Stream baseStream; + private bool disposed_; + + #endregion Instance Fields + + /// + /// Construct a default output stream with maximum block size + /// + /// The stream to write BZip data onto. + public BZip2OutputStream(Stream stream) : this(stream, 9) + { + } + + /// + /// Initialise a new instance of the + /// for the specified stream, using the given blocksize. + /// + /// The stream to write compressed data to. + /// The block size to use. + /// + /// Valid block sizes are in the range 1..9, with 1 giving + /// the lowest compression and 9 the highest. + /// + public BZip2OutputStream(Stream stream, int blockSize) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + + baseStream = stream; + bsLive = 0; + bsBuff = 0; + bytesOut = 0; + + workFactor = 50; + if (blockSize > 9) + { + blockSize = 9; + } + + if (blockSize < 1) + { + blockSize = 1; + } + blockSize100k = blockSize; + AllocateCompressStructures(); + Initialize(); + InitBlock(); + } + + /// + /// Ensures that resources are freed and other cleanup operations + /// are performed when the garbage collector reclaims the BZip2OutputStream. + /// + ~BZip2OutputStream() + { + Dispose(false); + } + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = true; + + /// + /// Gets a value indicating whether the current stream supports reading + /// + public override bool CanRead + { + get + { + return false; + } + } + + /// + /// Gets a value indicating whether the current stream supports seeking + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Gets a value indicating whether the current stream supports writing + /// + public override bool CanWrite + { + get + { + return baseStream.CanWrite; + } + } + + /// + /// Gets the length in bytes of the stream + /// + public override long Length + { + get + { + return baseStream.Length; + } + } + + /// + /// Gets or sets the current position of this stream. + /// + public override long Position + { + get + { + return baseStream.Position; + } + set + { + throw new NotSupportedException("BZip2OutputStream position cannot be set"); + } + } + + /// + /// Sets the current position of this stream to the given value. + /// + /// The point relative to the offset from which to being seeking. + /// The reference point from which to begin seeking. + /// The new position in the stream. + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("BZip2OutputStream Seek not supported"); + } + + /// + /// Sets the length of this stream to the given value. + /// + /// The new stream length. + public override void SetLength(long value) + { + throw new NotSupportedException("BZip2OutputStream SetLength not supported"); + } + + /// + /// Read a byte from the stream advancing the position. + /// + /// The byte read cast to an int; -1 if end of stream. + public override int ReadByte() + { + throw new NotSupportedException("BZip2OutputStream ReadByte not supported"); + } + + /// + /// Read a block of bytes + /// + /// The buffer to read into. + /// The offset in the buffer to start storing data at. + /// The maximum number of bytes to read. + /// The total number of bytes read. This might be less than the number of bytes + /// requested if that number of bytes are not currently available, or zero + /// if the end of the stream is reached. + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("BZip2OutputStream Read not supported"); + } + + /// + /// Write a block of bytes to the stream + /// + /// The buffer containing data to write. + /// The offset of the first byte to write. + /// The number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + if (buffer.Length - offset < count) + { + throw new ArgumentException("Offset/count out of range"); + } + + for (int i = 0; i < count; ++i) + { + WriteByte(buffer[offset + i]); + } + } + + /// + /// Write a byte to the stream. + /// + /// The byte to write to the stream. + public override void WriteByte(byte value) + { + int b = (256 + value) % 256; + if (currentChar != -1) + { + if (currentChar == b) + { + runLength++; + if (runLength > 254) + { + WriteRun(); + currentChar = -1; + runLength = 0; + } + } + else + { + WriteRun(); + runLength = 1; + currentChar = b; + } + } + else + { + currentChar = b; + runLength++; + } + } + + private void MakeMaps() + { + nInUse = 0; + for (int i = 0; i < 256; i++) + { + if (inUse[i]) + { + seqToUnseq[nInUse] = (char)i; + unseqToSeq[i] = (char)nInUse; + nInUse++; + } + } + } + + /// + /// Get the number of bytes written to output. + /// + private void WriteRun() + { + if (last < allowableBlockSize) + { + inUse[currentChar] = true; + for (int i = 0; i < runLength; i++) + { + mCrc.Update(currentChar); + } + + switch (runLength) + { + case 1: + last++; + block[last + 1] = (byte)currentChar; + break; + + case 2: + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + break; + + case 3: + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + break; + + default: + inUse[runLength - 4] = true; + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)currentChar; + last++; + block[last + 1] = (byte)(runLength - 4); + break; + } + } + else + { + EndBlock(); + InitBlock(); + WriteRun(); + } + } + + /// + /// Get the number of bytes written to the output. + /// + public int BytesWritten + { + get { return bytesOut; } + } + + /// + /// Releases the unmanaged resources used by the and optionally releases the managed resources. + /// + /// true to release both managed and unmanaged resources; false to release only unmanaged resources. + override protected void Dispose(bool disposing) + { + try + { + try + { + base.Dispose(disposing); + if (!disposed_) + { + disposed_ = true; + + if (runLength > 0) + { + WriteRun(); + } + + currentChar = -1; + EndBlock(); + EndCompression(); + Flush(); + } + } + finally + { + if (disposing) + { + if (IsStreamOwner) + { + baseStream.Dispose(); + } + } + } + } + catch + { + } + } + + /// + /// Flush output buffers + /// + public override void Flush() + { + baseStream.Flush(); + } + + private void Initialize() + { + bytesOut = 0; + nBlocksRandomised = 0; + + /*--- Write header `magic' bytes indicating file-format == huffmanised, + followed by a digit indicating blockSize100k. + ---*/ + + BsPutUChar('B'); + BsPutUChar('Z'); + + BsPutUChar('h'); + BsPutUChar('0' + blockSize100k); + + combinedCRC = 0; + } + + private void InitBlock() + { + mCrc.Reset(); + last = -1; + + for (int i = 0; i < 256; i++) + { + inUse[i] = false; + } + + /*--- 20 is just a paranoia constant ---*/ + allowableBlockSize = BZip2Constants.BaseBlockSize * blockSize100k - 20; + } + + private void EndBlock() + { + if (last < 0) + { // dont do anything for empty files, (makes empty files compatible with original Bzip) + return; + } + + blockCRC = unchecked((uint)mCrc.Value); + combinedCRC = (combinedCRC << 1) | (combinedCRC >> 31); + combinedCRC ^= blockCRC; + + /*-- sort the block and establish position of original string --*/ + DoReversibleTransformation(); + + /*-- + A 6-byte block header, the value chosen arbitrarily + as 0x314159265359 :-). A 32 bit value does not really + give a strong enough guarantee that the value will not + appear by chance in the compressed datastream. Worst-case + probability of this event, for a 900k block, is about + 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48 bits. + For a compressed file of size 100Gb -- about 100000 blocks -- + only a 48-bit marker will do. NB: normal compression/ + decompression do *not* rely on these statistical properties. + They are only important when trying to recover blocks from + damaged files. + --*/ + BsPutUChar(0x31); + BsPutUChar(0x41); + BsPutUChar(0x59); + BsPutUChar(0x26); + BsPutUChar(0x53); + BsPutUChar(0x59); + + /*-- Now the block's CRC, so it is in a known place. --*/ + unchecked + { + BsPutint((int)blockCRC); + } + + /*-- Now a single bit indicating randomisation. --*/ + if (blockRandomised) + { + BsW(1, 1); + nBlocksRandomised++; + } + else + { + BsW(1, 0); + } + + /*-- Finally, block's contents proper. --*/ + MoveToFrontCodeAndSend(); + } + + private void EndCompression() + { + /*-- + Now another magic 48-bit number, 0x177245385090, to + indicate the end of the last block. (sqrt(pi), if + you want to know. I did want to use e, but it contains + too much repetition -- 27 18 28 18 28 46 -- for me + to feel statistically comfortable. Call me paranoid.) + --*/ + BsPutUChar(0x17); + BsPutUChar(0x72); + BsPutUChar(0x45); + BsPutUChar(0x38); + BsPutUChar(0x50); + BsPutUChar(0x90); + + unchecked + { + BsPutint((int)combinedCRC); + } + + BsFinishedWithStream(); + } + + private void BsFinishedWithStream() + { + while (bsLive > 0) + { + int ch = (bsBuff >> 24); + baseStream.WriteByte((byte)ch); // write 8-bit + bsBuff <<= 8; + bsLive -= 8; + bytesOut++; + } + } + + private void BsW(int n, int v) + { + while (bsLive >= 8) + { + int ch = (bsBuff >> 24); + unchecked { baseStream.WriteByte((byte)ch); } // write 8-bit + bsBuff <<= 8; + bsLive -= 8; + ++bytesOut; + } + bsBuff |= (v << (32 - bsLive - n)); + bsLive += n; + } + + private void BsPutUChar(int c) + { + BsW(8, c); + } + + private void BsPutint(int u) + { + BsW(8, (u >> 24) & 0xFF); + BsW(8, (u >> 16) & 0xFF); + BsW(8, (u >> 8) & 0xFF); + BsW(8, u & 0xFF); + } + + private void BsPutIntVS(int numBits, int c) + { + BsW(numBits, c); + } + + private void SendMTFValues() + { + char[][] len = new char[BZip2Constants.GroupCount][]; + for (int i = 0; i < BZip2Constants.GroupCount; ++i) + { + len[i] = new char[BZip2Constants.MaximumAlphaSize]; + } + + int gs, ge, totc, bt, bc, iter; + int nSelectors = 0, alphaSize, minLen, maxLen, selCtr; + int nGroups; + + alphaSize = nInUse + 2; + for (int t = 0; t < BZip2Constants.GroupCount; t++) + { + for (int v = 0; v < alphaSize; v++) + { + len[t][v] = (char)GREATER_ICOST; + } + } + + /*--- Decide how many coding tables to use ---*/ + if (nMTF <= 0) + { + Panic(); + } + + if (nMTF < 200) + { + nGroups = 2; + } + else if (nMTF < 600) + { + nGroups = 3; + } + else if (nMTF < 1200) + { + nGroups = 4; + } + else if (nMTF < 2400) + { + nGroups = 5; + } + else + { + nGroups = 6; + } + + /*--- Generate an initial set of coding tables ---*/ + int nPart = nGroups; + int remF = nMTF; + gs = 0; + while (nPart > 0) + { + int tFreq = remF / nPart; + int aFreq = 0; + ge = gs - 1; + while (aFreq < tFreq && ge < alphaSize - 1) + { + ge++; + aFreq += mtfFreq[ge]; + } + + if (ge > gs && nPart != nGroups && nPart != 1 && ((nGroups - nPart) % 2 == 1)) + { + aFreq -= mtfFreq[ge]; + ge--; + } + + for (int v = 0; v < alphaSize; v++) + { + if (v >= gs && v <= ge) + { + len[nPart - 1][v] = (char)LESSER_ICOST; + } + else + { + len[nPart - 1][v] = (char)GREATER_ICOST; + } + } + + nPart--; + gs = ge + 1; + remF -= aFreq; + } + + int[][] rfreq = new int[BZip2Constants.GroupCount][]; + for (int i = 0; i < BZip2Constants.GroupCount; ++i) + { + rfreq[i] = new int[BZip2Constants.MaximumAlphaSize]; + } + + int[] fave = new int[BZip2Constants.GroupCount]; + short[] cost = new short[BZip2Constants.GroupCount]; + /*--- + Iterate up to N_ITERS times to improve the tables. + ---*/ + for (iter = 0; iter < BZip2Constants.NumberOfIterations; ++iter) + { + for (int t = 0; t < nGroups; ++t) + { + fave[t] = 0; + } + + for (int t = 0; t < nGroups; ++t) + { + for (int v = 0; v < alphaSize; ++v) + { + rfreq[t][v] = 0; + } + } + + nSelectors = 0; + totc = 0; + gs = 0; + while (true) + { + /*--- Set group start & end marks. --*/ + if (gs >= nMTF) + { + break; + } + ge = gs + BZip2Constants.GroupSize - 1; + if (ge >= nMTF) + { + ge = nMTF - 1; + } + + /*-- + Calculate the cost of this group as coded + by each of the coding tables. + --*/ + for (int t = 0; t < nGroups; t++) + { + cost[t] = 0; + } + + if (nGroups == 6) + { + short cost0, cost1, cost2, cost3, cost4, cost5; + cost0 = cost1 = cost2 = cost3 = cost4 = cost5 = 0; + for (int i = gs; i <= ge; ++i) + { + short icv = szptr[i]; + cost0 += (short)len[0][icv]; + cost1 += (short)len[1][icv]; + cost2 += (short)len[2][icv]; + cost3 += (short)len[3][icv]; + cost4 += (short)len[4][icv]; + cost5 += (short)len[5][icv]; + } + cost[0] = cost0; + cost[1] = cost1; + cost[2] = cost2; + cost[3] = cost3; + cost[4] = cost4; + cost[5] = cost5; + } + else + { + for (int i = gs; i <= ge; ++i) + { + short icv = szptr[i]; + for (int t = 0; t < nGroups; t++) + { + cost[t] += (short)len[t][icv]; + } + } + } + + /*-- + Find the coding table which is best for this group, + and record its identity in the selector table. + --*/ + bc = 999999999; + bt = -1; + for (int t = 0; t < nGroups; ++t) + { + if (cost[t] < bc) + { + bc = cost[t]; + bt = t; + } + } + totc += bc; + fave[bt]++; + selector[nSelectors] = (char)bt; + nSelectors++; + + /*-- + Increment the symbol frequencies for the selected table. + --*/ + for (int i = gs; i <= ge; ++i) + { + ++rfreq[bt][szptr[i]]; + } + + gs = ge + 1; + } + + /*-- + Recompute the tables based on the accumulated frequencies. + --*/ + for (int t = 0; t < nGroups; ++t) + { + HbMakeCodeLengths(len[t], rfreq[t], alphaSize, 20); + } + } + + rfreq = null; + fave = null; + cost = null; + + if (!(nGroups < 8)) + { + Panic(); + } + + if (!(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZip2Constants.GroupSize)))) + { + Panic(); + } + + /*--- Compute MTF values for the selectors. ---*/ + char[] pos = new char[BZip2Constants.GroupCount]; + char ll_i, tmp2, tmp; + + for (int i = 0; i < nGroups; i++) + { + pos[i] = (char)i; + } + + for (int i = 0; i < nSelectors; i++) + { + ll_i = selector[i]; + int j = 0; + tmp = pos[j]; + while (ll_i != tmp) + { + j++; + tmp2 = tmp; + tmp = pos[j]; + pos[j] = tmp2; + } + pos[0] = tmp; + selectorMtf[i] = (char)j; + } + + int[][] code = new int[BZip2Constants.GroupCount][]; + + for (int i = 0; i < BZip2Constants.GroupCount; ++i) + { + code[i] = new int[BZip2Constants.MaximumAlphaSize]; + } + + /*--- Assign actual codes for the tables. --*/ + for (int t = 0; t < nGroups; t++) + { + minLen = 32; + maxLen = 0; + for (int i = 0; i < alphaSize; i++) + { + if (len[t][i] > maxLen) + { + maxLen = len[t][i]; + } + if (len[t][i] < minLen) + { + minLen = len[t][i]; + } + } + if (maxLen > 20) + { + Panic(); + } + if (minLen < 1) + { + Panic(); + } + HbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize); + } + + /*--- Transmit the mapping table. ---*/ + bool[] inUse16 = new bool[16]; + for (int i = 0; i < 16; ++i) + { + inUse16[i] = false; + for (int j = 0; j < 16; ++j) + { + if (inUse[i * 16 + j]) + { + inUse16[i] = true; + } + } + } + + for (int i = 0; i < 16; ++i) + { + if (inUse16[i]) + { + BsW(1, 1); + } + else + { + BsW(1, 0); + } + } + + for (int i = 0; i < 16; ++i) + { + if (inUse16[i]) + { + for (int j = 0; j < 16; ++j) + { + if (inUse[i * 16 + j]) + { + BsW(1, 1); + } + else + { + BsW(1, 0); + } + } + } + } + + /*--- Now the selectors. ---*/ + BsW(3, nGroups); + BsW(15, nSelectors); + for (int i = 0; i < nSelectors; ++i) + { + for (int j = 0; j < selectorMtf[i]; ++j) + { + BsW(1, 1); + } + BsW(1, 0); + } + + /*--- Now the coding tables. ---*/ + for (int t = 0; t < nGroups; ++t) + { + int curr = len[t][0]; + BsW(5, curr); + for (int i = 0; i < alphaSize; ++i) + { + while (curr < len[t][i]) + { + BsW(2, 2); + curr++; /* 10 */ + } + while (curr > len[t][i]) + { + BsW(2, 3); + curr--; /* 11 */ + } + BsW(1, 0); + } + } + + /*--- And finally, the block data proper ---*/ + selCtr = 0; + gs = 0; + while (true) + { + if (gs >= nMTF) + { + break; + } + ge = gs + BZip2Constants.GroupSize - 1; + if (ge >= nMTF) + { + ge = nMTF - 1; + } + + for (int i = gs; i <= ge; i++) + { + BsW(len[selector[selCtr]][szptr[i]], code[selector[selCtr]][szptr[i]]); + } + + gs = ge + 1; + ++selCtr; + } + if (!(selCtr == nSelectors)) + { + Panic(); + } + } + + private void MoveToFrontCodeAndSend() + { + BsPutIntVS(24, origPtr); + GenerateMTFValues(); + SendMTFValues(); + } + + private void SimpleSort(int lo, int hi, int d) + { + int i, j, h, bigN, hp; + int v; + + bigN = hi - lo + 1; + if (bigN < 2) + { + return; + } + + hp = 0; + while (increments[hp] < bigN) + { + hp++; + } + hp--; + + for (; hp >= 0; hp--) + { + h = increments[hp]; + + i = lo + h; + while (true) + { + /*-- copy 1 --*/ + if (i > hi) + break; + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + break; + } + zptr[j] = v; + i++; + + /*-- copy 2 --*/ + if (i > hi) + { + break; + } + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + { + break; + } + } + zptr[j] = v; + i++; + + /*-- copy 3 --*/ + if (i > hi) + { + break; + } + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + { + break; + } + } + zptr[j] = v; + i++; + + if (workDone > workLimit && firstAttempt) + { + return; + } + } + } + } + + private void Vswap(int p1, int p2, int n) + { + int temp = 0; + while (n > 0) + { + temp = zptr[p1]; + zptr[p1] = zptr[p2]; + zptr[p2] = temp; + p1++; + p2++; + n--; + } + } + + private void QSort3(int loSt, int hiSt, int dSt) + { + int unLo, unHi, ltLo, gtHi, med, n, m; + int lo, hi, d; + + StackElement[] stack = new StackElement[QSORT_STACK_SIZE]; + + int sp = 0; + + stack[sp].ll = loSt; + stack[sp].hh = hiSt; + stack[sp].dd = dSt; + sp++; + + while (sp > 0) + { + if (sp >= QSORT_STACK_SIZE) + { + Panic(); + } + + sp--; + lo = stack[sp].ll; + hi = stack[sp].hh; + d = stack[sp].dd; + + if (hi - lo < SMALL_THRESH || d > DEPTH_THRESH) + { + SimpleSort(lo, hi, d); + if (workDone > workLimit && firstAttempt) + { + return; + } + continue; + } + + med = Med3(block[zptr[lo] + d + 1], + block[zptr[hi] + d + 1], + block[zptr[(lo + hi) >> 1] + d + 1]); + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (true) + { + while (true) + { + if (unLo > unHi) + { + break; + } + n = ((int)block[zptr[unLo] + d + 1]) - med; + if (n == 0) + { + int temp = zptr[unLo]; + zptr[unLo] = zptr[ltLo]; + zptr[ltLo] = temp; + ltLo++; + unLo++; + continue; + } + if (n > 0) + { + break; + } + unLo++; + } + + while (true) + { + if (unLo > unHi) + { + break; + } + n = ((int)block[zptr[unHi] + d + 1]) - med; + if (n == 0) + { + int temp = zptr[unHi]; + zptr[unHi] = zptr[gtHi]; + zptr[gtHi] = temp; + gtHi--; + unHi--; + continue; + } + if (n < 0) + { + break; + } + unHi--; + } + + if (unLo > unHi) + { + break; + } + + { + int temp = zptr[unLo]; + zptr[unLo] = zptr[unHi]; + zptr[unHi] = temp; + unLo++; + unHi--; + } + } + + if (gtHi < ltLo) + { + stack[sp].ll = lo; + stack[sp].hh = hi; + stack[sp].dd = d + 1; + sp++; + continue; + } + + n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo) : (unLo - ltLo); + Vswap(lo, unLo - n, n); + m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi) : (gtHi - unHi); + Vswap(unLo, hi - m + 1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + stack[sp].ll = lo; + stack[sp].hh = n; + stack[sp].dd = d; + sp++; + + stack[sp].ll = n + 1; + stack[sp].hh = m - 1; + stack[sp].dd = d + 1; + sp++; + + stack[sp].ll = m; + stack[sp].hh = hi; + stack[sp].dd = d; + sp++; + } + } + + private void MainSort() + { + int i, j, ss, sb; + int[] runningOrder = new int[256]; + int[] copy = new int[256]; + bool[] bigDone = new bool[256]; + int c1, c2; + int numQSorted; + + /*-- + In the various block-sized structures, live data runs + from 0 to last+NUM_OVERSHOOT_BYTES inclusive. First, + set up the overshoot area for block. + --*/ + + // if (verbosity >= 4) fprintf ( stderr, " sort initialise ...\n" ); + for (i = 0; i < BZip2Constants.OvershootBytes; i++) + { + block[last + i + 2] = block[(i % (last + 1)) + 1]; + } + for (i = 0; i <= last + BZip2Constants.OvershootBytes; i++) + { + quadrant[i] = 0; + } + + block[0] = (byte)(block[last + 1]); + + if (last < 4000) + { + /*-- + Use simpleSort(), since the full sorting mechanism + has quite a large constant overhead. + --*/ + for (i = 0; i <= last; i++) + { + zptr[i] = i; + } + firstAttempt = false; + workDone = workLimit = 0; + SimpleSort(0, last, 0); + } + else + { + numQSorted = 0; + for (i = 0; i <= 255; i++) + { + bigDone[i] = false; + } + for (i = 0; i <= 65536; i++) + { + ftab[i] = 0; + } + + c1 = block[0]; + for (i = 0; i <= last; i++) + { + c2 = block[i + 1]; + ftab[(c1 << 8) + c2]++; + c1 = c2; + } + + for (i = 1; i <= 65536; i++) + { + ftab[i] += ftab[i - 1]; + } + + c1 = block[1]; + for (i = 0; i < last; i++) + { + c2 = block[i + 2]; + j = (c1 << 8) + c2; + c1 = c2; + ftab[j]--; + zptr[ftab[j]] = i; + } + + j = ((block[last + 1]) << 8) + (block[1]); + ftab[j]--; + zptr[ftab[j]] = last; + + /*-- + Now ftab contains the first loc of every small bucket. + Calculate the running order, from smallest to largest + big bucket. + --*/ + + for (i = 0; i <= 255; i++) + { + runningOrder[i] = i; + } + + int vv; + int h = 1; + do + { + h = 3 * h + 1; + } while (h <= 256); + do + { + h = h / 3; + for (i = h; i <= 255; i++) + { + vv = runningOrder[i]; + j = i; + while ((ftab[((runningOrder[j - h]) + 1) << 8] - ftab[(runningOrder[j - h]) << 8]) > (ftab[((vv) + 1) << 8] - ftab[(vv) << 8])) + { + runningOrder[j] = runningOrder[j - h]; + j = j - h; + if (j <= (h - 1)) + { + break; + } + } + runningOrder[j] = vv; + } + } while (h != 1); + + /*-- + The main sorting loop. + --*/ + for (i = 0; i <= 255; i++) + { + /*-- + Process big buckets, starting with the least full. + --*/ + ss = runningOrder[i]; + + /*-- + Complete the big bucket [ss] by quicksorting + any unsorted small buckets [ss, j]. Hopefully + previous pointer-scanning phases have already + completed many of the small buckets [ss, j], so + we don't have to sort them at all. + --*/ + for (j = 0; j <= 255; j++) + { + sb = (ss << 8) + j; + if (!((ftab[sb] & SETMASK) == SETMASK)) + { + int lo = ftab[sb] & CLEARMASK; + int hi = (ftab[sb + 1] & CLEARMASK) - 1; + if (hi > lo) + { + QSort3(lo, hi, 2); + numQSorted += (hi - lo + 1); + if (workDone > workLimit && firstAttempt) + { + return; + } + } + ftab[sb] |= SETMASK; + } + } + + /*-- + The ss big bucket is now done. Record this fact, + and update the quadrant descriptors. Remember to + update quadrants in the overshoot area too, if + necessary. The "if (i < 255)" test merely skips + this updating for the last bucket processed, since + updating for the last bucket is pointless. + --*/ + bigDone[ss] = true; + + if (i < 255) + { + int bbStart = ftab[ss << 8] & CLEARMASK; + int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart; + int shifts = 0; + + while ((bbSize >> shifts) > 65534) + { + shifts++; + } + + for (j = 0; j < bbSize; j++) + { + int a2update = zptr[bbStart + j]; + int qVal = (j >> shifts); + quadrant[a2update] = qVal; + if (a2update < BZip2Constants.OvershootBytes) + { + quadrant[a2update + last + 1] = qVal; + } + } + + if (!(((bbSize - 1) >> shifts) <= 65535)) + { + Panic(); + } + } + + /*-- + Now scan this big bucket so as to synthesise the + sorted order for small buckets [t, ss] for all t != ss. + --*/ + for (j = 0; j <= 255; j++) + { + copy[j] = ftab[(j << 8) + ss] & CLEARMASK; + } + + for (j = ftab[ss << 8] & CLEARMASK; j < (ftab[(ss + 1) << 8] & CLEARMASK); j++) + { + c1 = block[zptr[j]]; + if (!bigDone[c1]) + { + zptr[copy[c1]] = zptr[j] == 0 ? last : zptr[j] - 1; + copy[c1]++; + } + } + + for (j = 0; j <= 255; j++) + { + ftab[(j << 8) + ss] |= SETMASK; + } + } + } + } + + private void RandomiseBlock() + { + int i; + int rNToGo = 0; + int rTPos = 0; + for (i = 0; i < 256; i++) + { + inUse[i] = false; + } + + for (i = 0; i <= last; i++) + { + if (rNToGo == 0) + { + rNToGo = (int)BZip2Constants.RandomNumbers[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + block[i + 1] ^= (byte)((rNToGo == 1) ? 1 : 0); + // handle 16 bit signed numbers + block[i + 1] &= 0xFF; + + inUse[block[i + 1]] = true; + } + } + + private void DoReversibleTransformation() + { + workLimit = workFactor * last; + workDone = 0; + blockRandomised = false; + firstAttempt = true; + + MainSort(); + + if (workDone > workLimit && firstAttempt) + { + RandomiseBlock(); + workLimit = workDone = 0; + blockRandomised = true; + firstAttempt = false; + MainSort(); + } + + origPtr = -1; + for (int i = 0; i <= last; i++) + { + if (zptr[i] == 0) + { + origPtr = i; + break; + } + } + + if (origPtr == -1) + { + Panic(); + } + } + + private bool FullGtU(int i1, int i2) + { + int k; + byte c1, c2; + int s1, s2; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + i1++; + i2++; + + k = last + 1; + + do + { + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return s1 > s2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return s1 > s2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return s1 > s2; + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return c1 > c2; + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return s1 > s2; + } + i1++; + i2++; + + if (i1 > last) + { + i1 -= last; + i1--; + } + if (i2 > last) + { + i2 -= last; + i2--; + } + + k -= 4; + ++workDone; + } while (k >= 0); + + return false; + } + + private void AllocateCompressStructures() + { + int n = BZip2Constants.BaseBlockSize * blockSize100k; + block = new byte[(n + 1 + BZip2Constants.OvershootBytes)]; + quadrant = new int[(n + BZip2Constants.OvershootBytes)]; + zptr = new int[n]; + ftab = new int[65537]; + + if (block == null || quadrant == null || zptr == null || ftab == null) + { + // int totalDraw = (n + 1 + NUM_OVERSHOOT_BYTES) + (n + NUM_OVERSHOOT_BYTES) + n + 65537; + // compressOutOfMemory ( totalDraw, n ); + } + + /* + The back end needs a place to store the MTF values + whilst it calculates the coding tables. We could + put them in the zptr array. However, these values + will fit in a short, so we overlay szptr at the + start of zptr, in the hope of reducing the number + of cache misses induced by the multiple traversals + of the MTF values when calculating coding tables. + Seems to improve compression speed by about 1%. + */ + // szptr = zptr; + + szptr = new short[2 * n]; + } + + private void GenerateMTFValues() + { + char[] yy = new char[256]; + int i, j; + char tmp; + char tmp2; + int zPend; + int wr; + int EOB; + + MakeMaps(); + EOB = nInUse + 1; + + for (i = 0; i <= EOB; i++) + { + mtfFreq[i] = 0; + } + + wr = 0; + zPend = 0; + for (i = 0; i < nInUse; i++) + { + yy[i] = (char)i; + } + + for (i = 0; i <= last; i++) + { + char ll_i; + + ll_i = unseqToSeq[block[zptr[i]]]; + + j = 0; + tmp = yy[j]; + while (ll_i != tmp) + { + j++; + tmp2 = tmp; + tmp = yy[j]; + yy[j] = tmp2; + } + yy[0] = tmp; + + if (j == 0) + { + zPend++; + } + else + { + if (zPend > 0) + { + zPend--; + while (true) + { + switch (zPend % 2) + { + case 0: + szptr[wr] = (short)BZip2Constants.RunA; + wr++; + mtfFreq[BZip2Constants.RunA]++; + break; + + case 1: + szptr[wr] = (short)BZip2Constants.RunB; + wr++; + mtfFreq[BZip2Constants.RunB]++; + break; + } + if (zPend < 2) + { + break; + } + zPend = (zPend - 2) / 2; + } + zPend = 0; + } + szptr[wr] = (short)(j + 1); + wr++; + mtfFreq[j + 1]++; + } + } + + if (zPend > 0) + { + zPend--; + while (true) + { + switch (zPend % 2) + { + case 0: + szptr[wr] = (short)BZip2Constants.RunA; + wr++; + mtfFreq[BZip2Constants.RunA]++; + break; + + case 1: + szptr[wr] = (short)BZip2Constants.RunB; + wr++; + mtfFreq[BZip2Constants.RunB]++; + break; + } + if (zPend < 2) + { + break; + } + zPend = (zPend - 2) / 2; + } + } + + szptr[wr] = (short)EOB; + wr++; + mtfFreq[EOB]++; + + nMTF = wr; + } + + private static void Panic() + { + throw new BZip2Exception("BZip2 output stream panic"); + } + + private static void HbMakeCodeLengths(char[] len, int[] freq, int alphaSize, int maxLen) + { + /*-- + Nodes and heap entries run from 1. Entry 0 + for both the heap and nodes is a sentinel. + --*/ + int nNodes, nHeap, n1, n2, j, k; + bool tooLong; + + int[] heap = new int[BZip2Constants.MaximumAlphaSize + 2]; + int[] weight = new int[BZip2Constants.MaximumAlphaSize * 2]; + int[] parent = new int[BZip2Constants.MaximumAlphaSize * 2]; + + for (int i = 0; i < alphaSize; ++i) + { + weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8; + } + + while (true) + { + nNodes = alphaSize; + nHeap = 0; + + heap[0] = 0; + weight[0] = 0; + parent[0] = -2; + + for (int i = 1; i <= alphaSize; ++i) + { + parent[i] = -1; + nHeap++; + heap[nHeap] = i; + int zz = nHeap; + int tmp = heap[zz]; + while (weight[tmp] < weight[heap[zz >> 1]]) + { + heap[zz] = heap[zz >> 1]; + zz >>= 1; + } + heap[zz] = tmp; + } + if (!(nHeap < (BZip2Constants.MaximumAlphaSize + 2))) + { + Panic(); + } + + while (nHeap > 1) + { + n1 = heap[1]; + heap[1] = heap[nHeap]; + nHeap--; + int zz = 1; + int yy = 0; + int tmp = heap[zz]; + while (true) + { + yy = zz << 1; + if (yy > nHeap) + { + break; + } + if (yy < nHeap && weight[heap[yy + 1]] < weight[heap[yy]]) + { + yy++; + } + if (weight[tmp] < weight[heap[yy]]) + { + break; + } + + heap[zz] = heap[yy]; + zz = yy; + } + heap[zz] = tmp; + n2 = heap[1]; + heap[1] = heap[nHeap]; + nHeap--; + + zz = 1; + yy = 0; + tmp = heap[zz]; + while (true) + { + yy = zz << 1; + if (yy > nHeap) + { + break; + } + if (yy < nHeap && weight[heap[yy + 1]] < weight[heap[yy]]) + { + yy++; + } + if (weight[tmp] < weight[heap[yy]]) + { + break; + } + heap[zz] = heap[yy]; + zz = yy; + } + heap[zz] = tmp; + nNodes++; + parent[n1] = parent[n2] = nNodes; + + weight[nNodes] = (int)((weight[n1] & 0xffffff00) + (weight[n2] & 0xffffff00)) | + (int)(1 + (((weight[n1] & 0x000000ff) > (weight[n2] & 0x000000ff)) ? (weight[n1] & 0x000000ff) : (weight[n2] & 0x000000ff))); + + parent[nNodes] = -1; + nHeap++; + heap[nHeap] = nNodes; + + zz = nHeap; + tmp = heap[zz]; + while (weight[tmp] < weight[heap[zz >> 1]]) + { + heap[zz] = heap[zz >> 1]; + zz >>= 1; + } + heap[zz] = tmp; + } + if (!(nNodes < (BZip2Constants.MaximumAlphaSize * 2))) + { + Panic(); + } + + tooLong = false; + for (int i = 1; i <= alphaSize; ++i) + { + j = 0; + k = i; + while (parent[k] >= 0) + { + k = parent[k]; + j++; + } + len[i - 1] = (char)j; + tooLong |= j > maxLen; + } + + if (!tooLong) + { + break; + } + + for (int i = 1; i < alphaSize; ++i) + { + j = weight[i] >> 8; + j = 1 + (j / 2); + weight[i] = j << 8; + } + } + } + + private static void HbAssignCodes(int[] code, char[] length, int minLen, int maxLen, int alphaSize) + { + int vec = 0; + for (int n = minLen; n <= maxLen; ++n) + { + for (int i = 0; i < alphaSize; ++i) + { + if (length[i] == n) + { + code[i] = vec; + ++vec; + } + } + vec <<= 1; + } + } + + private static byte Med3(byte a, byte b, byte c) + { + byte t; + if (a > b) + { + t = a; + a = b; + b = t; + } + if (b > c) + { + t = b; + b = c; + c = t; + } + if (a > b) + { + b = a; + } + return b; + } + + private struct StackElement + { + public int ll; + public int hh; + public int dd; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs.meta new file mode 100644 index 0000000..8380ec7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/BZip2/BZip2OutputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: e5f5f802681244694a82865f16c16cc7 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum.meta new file mode 100644 index 0000000..5c92728 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: f0af070aeede646529e2d2eadb4fcc44 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs new file mode 100644 index 0000000..b2a0f15 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs @@ -0,0 +1,163 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Checksum +{ + /// + /// Computes Adler32 checksum for a stream of data. An Adler32 + /// checksum is not as reliable as a CRC32 checksum, but a lot faster to + /// compute. + /// + /// The specification for Adler32 may be found in RFC 1950. + /// ZLIB Compressed Data Format Specification version 3.3) + /// + /// + /// From that document: + /// + /// "ADLER32 (Adler-32 checksum) + /// This contains a checksum value of the uncompressed data + /// (excluding any dictionary data) computed according to Adler-32 + /// algorithm. This algorithm is a 32-bit extension and improvement + /// of the Fletcher algorithm, used in the ITU-T X.224 / ISO 8073 + /// standard. + /// + /// Adler-32 is composed of two sums accumulated per byte: s1 is + /// the sum of all bytes, s2 is the sum of all s1 values. Both sums + /// are done modulo 65521. s1 is initialized to 1, s2 to zero. The + /// Adler-32 checksum is stored as s2*65536 + s1 in most- + /// significant-byte first (network) order." + /// + /// "8.2. The Adler-32 algorithm + /// + /// The Adler-32 algorithm is much faster than the CRC32 algorithm yet + /// still provides an extremely low probability of undetected errors. + /// + /// The modulo on unsigned long accumulators can be delayed for 5552 + /// bytes, so the modulo operation time is negligible. If the bytes + /// are a, b, c, the second sum is 3a + 2b + c + 3, and so is position + /// and order sensitive, unlike the first sum, which is just a + /// checksum. That 65521 is prime is important to avoid a possible + /// large class of two-byte errors that leave the check unchanged. + /// (The Fletcher checksum uses 255, which is not prime and which also + /// makes the Fletcher check insensitive to single byte changes 0 - + /// 255.) + /// + /// The sum s1 is initialized to 1 instead of zero to make the length + /// of the sequence part of s2, so that the length does not have to be + /// checked separately. (Any sequence of zeroes has a Fletcher + /// checksum of zero.)" + /// + /// + /// + public sealed class Adler32 : IChecksum + { + #region Instance Fields + + /// + /// largest prime smaller than 65536 + /// + private static readonly uint BASE = 65521; + + /// + /// The CRC data checksum so far. + /// + private uint checkValue; + + #endregion Instance Fields + + /// + /// Initialise a default instance of + /// + public Adler32() + { + Reset(); + } + + /// + /// Resets the Adler32 data checksum as if no update was ever called. + /// + public void Reset() + { + checkValue = 1; + } + + /// + /// Returns the Adler32 data checksum computed so far. + /// + public long Value + { + get + { + return checkValue; + } + } + + /// + /// Updates the checksum with the byte b. + /// + /// + /// The data value to add. The high byte of the int is ignored. + /// + public void Update(int bval) + { + // We could make a length 1 byte array and call update again, but I + // would rather not have that overhead + uint s1 = checkValue & 0xFFFF; + uint s2 = checkValue >> 16; + + s1 = (s1 + ((uint)bval & 0xFF)) % BASE; + s2 = (s1 + s2) % BASE; + + checkValue = (s2 << 16) + s1; + } + + /// + /// Updates the Adler32 data checksum with the bytes taken from + /// a block of data. + /// + /// Contains the data to update the checksum with. + public void Update(byte[] buffer) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + Update(new ArraySegment(buffer, 0, buffer.Length)); + } + + /// + /// Update Adler32 data checksum based on a portion of a block of data + /// + /// + /// The chunk of data to add + /// + public void Update(ArraySegment segment) + { + //(By Per Bothner) + uint s1 = checkValue & 0xFFFF; + uint s2 = checkValue >> 16; + var count = segment.Count; + var offset = segment.Offset; + while (count > 0) + { + // We can defer the modulo operation: + // s1 maximally grows from 65521 to 65521 + 255 * 3800 + // s2 maximally grows by 3800 * median(s1) = 2090079800 < 2^31 + int n = 3800; + if (n > count) + { + n = count; + } + count -= n; + while (--n >= 0) + { + s1 = s1 + (uint)(segment.Array[offset++] & 0xff); + s2 = s2 + s1; + } + s1 %= BASE; + s2 %= BASE; + } + checkValue = (s2 << 16) | s1; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs.meta new file mode 100644 index 0000000..a816b88 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Adler32.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: de98773c037b6419a9b6974c97f96145 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs new file mode 100644 index 0000000..16cfda0 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs @@ -0,0 +1,187 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Checksum +{ + /// + /// CRC-32 with unreversed data and reversed output + /// + /// + /// Generate a table for a byte-wise 32-bit CRC calculation on the polynomial: + /// x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0. + /// + /// Polynomials over GF(2) are represented in binary, one bit per coefficient, + /// with the lowest powers in the most significant bit. Then adding polynomials + /// is just exclusive-or, and multiplying a polynomial by x is a right shift by + /// one. If we call the above polynomial p, and represent a byte as the + /// polynomial q, also with the lowest power in the most significant bit (so the + /// byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, + /// where a mod b means the remainder after dividing a by b. + /// + /// This calculation is done using the shift-register method of multiplying and + /// taking the remainder. The register is initialized to zero, and for each + /// incoming bit, x^32 is added mod p to the register if the bit is a one (where + /// x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by + /// x (which is shifting right by one and adding x^32 mod p if the bit shifted + /// out is a one). We start with the highest power (least significant bit) of + /// q and repeat for all eight bits of q. + /// + /// The table is simply the CRC of all possible eight bit values. This is all + /// the information needed to generate CRC's on data a byte at a time for all + /// combinations of CRC register values and incoming bytes. + /// + public sealed class BZip2Crc : IChecksum + { + #region Instance Fields + + private const uint crcInit = 0xFFFFFFFF; + //const uint crcXor = 0x00000000; + + private static readonly uint[] crcTable = { + 0X00000000, 0X04C11DB7, 0X09823B6E, 0X0D4326D9, + 0X130476DC, 0X17C56B6B, 0X1A864DB2, 0X1E475005, + 0X2608EDB8, 0X22C9F00F, 0X2F8AD6D6, 0X2B4BCB61, + 0X350C9B64, 0X31CD86D3, 0X3C8EA00A, 0X384FBDBD, + 0X4C11DB70, 0X48D0C6C7, 0X4593E01E, 0X4152FDA9, + 0X5F15ADAC, 0X5BD4B01B, 0X569796C2, 0X52568B75, + 0X6A1936C8, 0X6ED82B7F, 0X639B0DA6, 0X675A1011, + 0X791D4014, 0X7DDC5DA3, 0X709F7B7A, 0X745E66CD, + 0X9823B6E0, 0X9CE2AB57, 0X91A18D8E, 0X95609039, + 0X8B27C03C, 0X8FE6DD8B, 0X82A5FB52, 0X8664E6E5, + 0XBE2B5B58, 0XBAEA46EF, 0XB7A96036, 0XB3687D81, + 0XAD2F2D84, 0XA9EE3033, 0XA4AD16EA, 0XA06C0B5D, + 0XD4326D90, 0XD0F37027, 0XDDB056FE, 0XD9714B49, + 0XC7361B4C, 0XC3F706FB, 0XCEB42022, 0XCA753D95, + 0XF23A8028, 0XF6FB9D9F, 0XFBB8BB46, 0XFF79A6F1, + 0XE13EF6F4, 0XE5FFEB43, 0XE8BCCD9A, 0XEC7DD02D, + 0X34867077, 0X30476DC0, 0X3D044B19, 0X39C556AE, + 0X278206AB, 0X23431B1C, 0X2E003DC5, 0X2AC12072, + 0X128E9DCF, 0X164F8078, 0X1B0CA6A1, 0X1FCDBB16, + 0X018AEB13, 0X054BF6A4, 0X0808D07D, 0X0CC9CDCA, + 0X7897AB07, 0X7C56B6B0, 0X71159069, 0X75D48DDE, + 0X6B93DDDB, 0X6F52C06C, 0X6211E6B5, 0X66D0FB02, + 0X5E9F46BF, 0X5A5E5B08, 0X571D7DD1, 0X53DC6066, + 0X4D9B3063, 0X495A2DD4, 0X44190B0D, 0X40D816BA, + 0XACA5C697, 0XA864DB20, 0XA527FDF9, 0XA1E6E04E, + 0XBFA1B04B, 0XBB60ADFC, 0XB6238B25, 0XB2E29692, + 0X8AAD2B2F, 0X8E6C3698, 0X832F1041, 0X87EE0DF6, + 0X99A95DF3, 0X9D684044, 0X902B669D, 0X94EA7B2A, + 0XE0B41DE7, 0XE4750050, 0XE9362689, 0XEDF73B3E, + 0XF3B06B3B, 0XF771768C, 0XFA325055, 0XFEF34DE2, + 0XC6BCF05F, 0XC27DEDE8, 0XCF3ECB31, 0XCBFFD686, + 0XD5B88683, 0XD1799B34, 0XDC3ABDED, 0XD8FBA05A, + 0X690CE0EE, 0X6DCDFD59, 0X608EDB80, 0X644FC637, + 0X7A089632, 0X7EC98B85, 0X738AAD5C, 0X774BB0EB, + 0X4F040D56, 0X4BC510E1, 0X46863638, 0X42472B8F, + 0X5C007B8A, 0X58C1663D, 0X558240E4, 0X51435D53, + 0X251D3B9E, 0X21DC2629, 0X2C9F00F0, 0X285E1D47, + 0X36194D42, 0X32D850F5, 0X3F9B762C, 0X3B5A6B9B, + 0X0315D626, 0X07D4CB91, 0X0A97ED48, 0X0E56F0FF, + 0X1011A0FA, 0X14D0BD4D, 0X19939B94, 0X1D528623, + 0XF12F560E, 0XF5EE4BB9, 0XF8AD6D60, 0XFC6C70D7, + 0XE22B20D2, 0XE6EA3D65, 0XEBA91BBC, 0XEF68060B, + 0XD727BBB6, 0XD3E6A601, 0XDEA580D8, 0XDA649D6F, + 0XC423CD6A, 0XC0E2D0DD, 0XCDA1F604, 0XC960EBB3, + 0XBD3E8D7E, 0XB9FF90C9, 0XB4BCB610, 0XB07DABA7, + 0XAE3AFBA2, 0XAAFBE615, 0XA7B8C0CC, 0XA379DD7B, + 0X9B3660C6, 0X9FF77D71, 0X92B45BA8, 0X9675461F, + 0X8832161A, 0X8CF30BAD, 0X81B02D74, 0X857130C3, + 0X5D8A9099, 0X594B8D2E, 0X5408ABF7, 0X50C9B640, + 0X4E8EE645, 0X4A4FFBF2, 0X470CDD2B, 0X43CDC09C, + 0X7B827D21, 0X7F436096, 0X7200464F, 0X76C15BF8, + 0X68860BFD, 0X6C47164A, 0X61043093, 0X65C52D24, + 0X119B4BE9, 0X155A565E, 0X18197087, 0X1CD86D30, + 0X029F3D35, 0X065E2082, 0X0B1D065B, 0X0FDC1BEC, + 0X3793A651, 0X3352BBE6, 0X3E119D3F, 0X3AD08088, + 0X2497D08D, 0X2056CD3A, 0X2D15EBE3, 0X29D4F654, + 0XC5A92679, 0XC1683BCE, 0XCC2B1D17, 0XC8EA00A0, + 0XD6AD50A5, 0XD26C4D12, 0XDF2F6BCB, 0XDBEE767C, + 0XE3A1CBC1, 0XE760D676, 0XEA23F0AF, 0XEEE2ED18, + 0XF0A5BD1D, 0XF464A0AA, 0XF9278673, 0XFDE69BC4, + 0X89B8FD09, 0X8D79E0BE, 0X803AC667, 0X84FBDBD0, + 0X9ABC8BD5, 0X9E7D9662, 0X933EB0BB, 0X97FFAD0C, + 0XAFB010B1, 0XAB710D06, 0XA6322BDF, 0XA2F33668, + 0XBCB4666D, 0XB8757BDA, 0XB5365D03, 0XB1F740B4 + }; + + /// + /// The CRC data checksum so far. + /// + private uint checkValue; + + #endregion Instance Fields + + /// + /// Initialise a default instance of + /// + public BZip2Crc() + { + Reset(); + } + + /// + /// Resets the CRC data checksum as if no update was ever called. + /// + public void Reset() + { + checkValue = crcInit; + } + + /// + /// Returns the CRC data checksum computed so far. + /// + /// Reversed Out = true + public long Value + { + get + { + // Technically, the output should be: + //return (long)(~checkValue ^ crcXor); + // but x ^ 0 = x, so there is no point in adding + // the XOR operation + return (long)(~checkValue); + } + } + + /// + /// Updates the checksum with the int bval. + /// + /// + /// the byte is taken as the lower 8 bits of bval + /// + /// Reversed Data = false + public void Update(int bval) + { + checkValue = unchecked(crcTable[(byte)(((checkValue >> 24) & 0xFF) ^ bval)] ^ (checkValue << 8)); + } + + /// + /// Updates the CRC data checksum with the bytes taken from + /// a block of data. + /// + /// Contains the data to update the CRC with. + public void Update(byte[] buffer) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + Update(new ArraySegment(buffer, 0, buffer.Length)); + } + + /// + /// Update CRC data checksum based on a portion of a block of data + /// + /// + /// The chunk of data to add + /// + public void Update(ArraySegment segment) + { + var count = segment.Count; + var offset = segment.Offset; + + while (--count >= 0) + Update(segment.Array[offset++]); + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs.meta new file mode 100644 index 0000000..d1ac486 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/BZip2Crc.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d40682eb91aaf451eb1d42b47ecd5bf9 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs new file mode 100644 index 0000000..9b8ab4b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs @@ -0,0 +1,176 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Checksum +{ + /// + /// CRC-32 with reversed data and unreversed output + /// + /// + /// Generate a table for a byte-wise 32-bit CRC calculation on the polynomial: + /// x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0. + /// + /// Polynomials over GF(2) are represented in binary, one bit per coefficient, + /// with the lowest powers in the most significant bit. Then adding polynomials + /// is just exclusive-or, and multiplying a polynomial by x is a right shift by + /// one. If we call the above polynomial p, and represent a byte as the + /// polynomial q, also with the lowest power in the most significant bit (so the + /// byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, + /// where a mod b means the remainder after dividing a by b. + /// + /// This calculation is done using the shift-register method of multiplying and + /// taking the remainder. The register is initialized to zero, and for each + /// incoming bit, x^32 is added mod p to the register if the bit is a one (where + /// x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by + /// x (which is shifting right by one and adding x^32 mod p if the bit shifted + /// out is a one). We start with the highest power (least significant bit) of + /// q and repeat for all eight bits of q. + /// + /// The table is simply the CRC of all possible eight bit values. This is all + /// the information needed to generate CRC's on data a byte at a time for all + /// combinations of CRC register values and incoming bytes. + /// + public sealed class Crc32 : IChecksum + { + #region Instance Fields + + private static readonly uint crcInit = 0xFFFFFFFF; + private static readonly uint crcXor = 0xFFFFFFFF; + + private static readonly uint[] crcTable = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, + 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, + 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, + 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, + 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, + 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, + 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, + 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, + 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, + 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, + 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, + 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, + 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, + 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, + 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, + 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, + 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, + 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, + 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, + 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, + 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, + 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, + 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, + 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, + 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, + 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, + 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, + 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, + 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, + 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, + 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, + 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, + 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, + 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, + 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, + 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, + 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, + 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, + 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, + 0x2D02EF8D + }; + + /// + /// The CRC data checksum so far. + /// + private uint checkValue; + + #endregion Instance Fields + + internal static uint ComputeCrc32(uint oldCrc, byte bval) + { + return (uint)(Crc32.crcTable[(oldCrc ^ bval) & 0xFF] ^ (oldCrc >> 8)); + } + + /// + /// Initialise a default instance of + /// + public Crc32() + { + Reset(); + } + + /// + /// Resets the CRC data checksum as if no update was ever called. + /// + public void Reset() + { + checkValue = crcInit; + } + + /// + /// Returns the CRC data checksum computed so far. + /// + /// Reversed Out = false + public long Value + { + get + { + return (long)(checkValue ^ crcXor); + } + } + + /// + /// Updates the checksum with the int bval. + /// + /// + /// the byte is taken as the lower 8 bits of bval + /// + /// Reversed Data = true + public void Update(int bval) + { + checkValue = unchecked(crcTable[(checkValue ^ bval) & 0xFF] ^ (checkValue >> 8)); + } + + /// + /// Updates the CRC data checksum with the bytes taken from + /// a block of data. + /// + /// Contains the data to update the CRC with. + public void Update(byte[] buffer) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + Update(new ArraySegment(buffer, 0, buffer.Length)); + } + + /// + /// Update CRC data checksum based on a portion of a block of data + /// + /// + /// The chunk of data to add + /// + public void Update(ArraySegment segment) + { + var count = segment.Count; + var offset = segment.Offset; + + while (--count >= 0) + Update(segment.Array[offset++]); + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs.meta new file mode 100644 index 0000000..e8e9e86 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/Crc32.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 6e2e3dcf8b0cd495890aa1cf56488d35 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs new file mode 100644 index 0000000..db74a5a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs @@ -0,0 +1,51 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Checksum +{ + /// + /// Interface to compute a data checksum used by checked input/output streams. + /// A data checksum can be updated by one byte or with a byte array. After each + /// update the value of the current checksum can be returned by calling + /// getValue. The complete checksum object can also be reset + /// so it can be used again with new data. + /// + public interface IChecksum + { + /// + /// Resets the data checksum as if no update was ever called. + /// + void Reset(); + + /// + /// Returns the data checksum computed so far. + /// + long Value + { + get; + } + + /// + /// Adds one byte to the data checksum. + /// + /// + /// the data value to add. The high byte of the int is ignored. + /// + void Update(int bval); + + /// + /// Updates the data checksum with the bytes taken from the array. + /// + /// + /// buffer an array of bytes + /// + void Update(byte[] buffer); + + /// + /// Adds the byte array to the data checksum. + /// + /// + /// The chunk of data to add + /// + void Update(ArraySegment segment); + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs.meta new file mode 100644 index 0000000..3b7fe14 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Checksum/IChecksum.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: e1f113882f94543a8aa6d4b9cf770392 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core.meta new file mode 100644 index 0000000..3349ee4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: de82092d5d69640af9c83fd89cf1c5cd +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions.meta new file mode 100644 index 0000000..9d9b475 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 95be3da657dcb4fd59af31d452881af1 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs new file mode 100644 index 0000000..eb14e2d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs @@ -0,0 +1,58 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib +{ + /// + /// SharpZipBaseException is the base exception class for SharpZipLib. + /// All library exceptions are derived from this. + /// + /// NOTE: Not all exceptions thrown will be derived from this class. + /// A variety of other exceptions are possible for example + [Serializable] + public class SharpZipBaseException : Exception + { + /// + /// Initializes a new instance of the SharpZipBaseException class. + /// + public SharpZipBaseException() + { + } + + /// + /// Initializes a new instance of the SharpZipBaseException class with a specified error message. + /// + /// A message describing the exception. + public SharpZipBaseException(string message) + : base(message) + { + } + + /// + /// Initializes a new instance of the SharpZipBaseException class with a specified + /// error message and a reference to the inner exception that is the cause of this exception. + /// + /// A message describing the exception. + /// The inner exception + public SharpZipBaseException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the SharpZipBaseException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected SharpZipBaseException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs.meta new file mode 100644 index 0000000..64a3139 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/SharpZipBaseException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: c95c88dd3476942b88134706b88b73c6 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs new file mode 100644 index 0000000..389b7d0 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs @@ -0,0 +1,50 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib +{ + /// + /// Indicates that an error occured during decoding of a input stream due to corrupt + /// data or (unintentional) library incompability. + /// + [Serializable] + public class StreamDecodingException : SharpZipBaseException + { + private const string GenericMessage = "Input stream could not be decoded"; + + /// + /// Initializes a new instance of the StreamDecodingException with a generic message + /// + public StreamDecodingException() : base(GenericMessage) { } + + /// + /// Initializes a new instance of the StreamDecodingException class with a specified error message. + /// + /// A message describing the exception. + public StreamDecodingException(string message) : base(message) { } + + /// + /// Initializes a new instance of the StreamDecodingException class with a specified + /// error message and a reference to the inner exception that is the cause of this exception. + /// + /// A message describing the exception. + /// The inner exception + public StreamDecodingException(string message, Exception innerException) : base(message, innerException) { } + + /// + /// Initializes a new instance of the StreamDecodingException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected StreamDecodingException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs.meta new file mode 100644 index 0000000..febfb1d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamDecodingException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 77a3ab610444a4a36a8733bda0293be6 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs new file mode 100644 index 0000000..5827e55 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs @@ -0,0 +1,49 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib +{ + /// + /// Indicates that the input stream could not decoded due to known library incompability or missing features + /// + [Serializable] + public class StreamUnsupportedException : StreamDecodingException + { + private const string GenericMessage = "Input stream is in a unsupported format"; + + /// + /// Initializes a new instance of the StreamUnsupportedException with a generic message + /// + public StreamUnsupportedException() : base(GenericMessage) { } + + /// + /// Initializes a new instance of the StreamUnsupportedException class with a specified error message. + /// + /// A message describing the exception. + public StreamUnsupportedException(string message) : base(message) { } + + /// + /// Initializes a new instance of the StreamUnsupportedException class with a specified + /// error message and a reference to the inner exception that is the cause of this exception. + /// + /// A message describing the exception. + /// The inner exception + public StreamUnsupportedException(string message, Exception innerException) : base(message, innerException) { } + + /// + /// Initializes a new instance of the StreamUnsupportedException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected StreamUnsupportedException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs.meta new file mode 100644 index 0000000..a5d84a4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/StreamUnsupportedException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: b823c2a985ff94afbb82fbb13d7e3747 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs new file mode 100644 index 0000000..a35c49f --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs @@ -0,0 +1,49 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib +{ + /// + /// Indicates that the input stream could not decoded due to the stream ending before enough data had been provided + /// + [Serializable] + public class UnexpectedEndOfStreamException : StreamDecodingException + { + private const string GenericMessage = "Input stream ended unexpectedly"; + + /// + /// Initializes a new instance of the UnexpectedEndOfStreamException with a generic message + /// + public UnexpectedEndOfStreamException() : base(GenericMessage) { } + + /// + /// Initializes a new instance of the UnexpectedEndOfStreamException class with a specified error message. + /// + /// A message describing the exception. + public UnexpectedEndOfStreamException(string message) : base(message) { } + + /// + /// Initializes a new instance of the UnexpectedEndOfStreamException class with a specified + /// error message and a reference to the inner exception that is the cause of this exception. + /// + /// A message describing the exception. + /// The inner exception + public UnexpectedEndOfStreamException(string message, Exception innerException) : base(message, innerException) { } + + /// + /// Initializes a new instance of the UnexpectedEndOfStreamException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected UnexpectedEndOfStreamException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs.meta new file mode 100644 index 0000000..6c91828 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/UnexpectedEndOfStreamException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 3583b8091062c4e7fbfe884d94600026 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs new file mode 100644 index 0000000..d41cf98 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs @@ -0,0 +1,66 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib +{ + /// + /// Indicates that a value was outside of the expected range when decoding an input stream + /// + [Serializable] + public class ValueOutOfRangeException : StreamDecodingException + { + /// + /// Initializes a new instance of the ValueOutOfRangeException class naming the causing variable + /// + /// Name of the variable, use: nameof() + public ValueOutOfRangeException(string nameOfValue) + : base($"{nameOfValue} out of range") { } + + /// + /// Initializes a new instance of the ValueOutOfRangeException class naming the causing variable, + /// it's current value and expected range. + /// + /// Name of the variable, use: nameof() + /// The invalid value + /// Expected maximum value + /// Expected minimum value + public ValueOutOfRangeException(string nameOfValue, long value, long maxValue, long minValue = 0) + : this(nameOfValue, value.ToString(), maxValue.ToString(), minValue.ToString()) { } + + /// + /// Initializes a new instance of the ValueOutOfRangeException class naming the causing variable, + /// it's current value and expected range. + /// + /// Name of the variable, use: nameof() + /// The invalid value + /// Expected maximum value + /// Expected minimum value + public ValueOutOfRangeException(string nameOfValue, string value, string maxValue, string minValue = "0") : + base($"{nameOfValue} out of range: {value}, should be {minValue}..{maxValue}") + { } + + private ValueOutOfRangeException() + { + } + + private ValueOutOfRangeException(string message, Exception innerException) : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the ValueOutOfRangeException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected ValueOutOfRangeException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs.meta new file mode 100644 index 0000000..96afa9c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/Exceptions/ValueOutOfRangeException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 0a3c5f8682aae4a88adf0e280a353e1c +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs new file mode 100644 index 0000000..427e7d8 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs @@ -0,0 +1,545 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Core +{ + #region EventArgs + + /// + /// Event arguments for scanning. + /// + public class ScanEventArgs : EventArgs + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The file or directory name. + public ScanEventArgs(string name) + { + name_ = name; + } + + #endregion Constructors + + /// + /// The file or directory name for this event. + /// + public string Name + { + get { return name_; } + } + + /// + /// Get set a value indicating if scanning should continue or not. + /// + public bool ContinueRunning + { + get { return continueRunning_; } + set { continueRunning_ = value; } + } + + #region Instance Fields + + private string name_; + private bool continueRunning_ = true; + + #endregion Instance Fields + } + + /// + /// Event arguments during processing of a single file or directory. + /// + public class ProgressEventArgs : EventArgs + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The file or directory name if known. + /// The number of bytes processed so far + /// The total number of bytes to process, 0 if not known + public ProgressEventArgs(string name, long processed, long target) + { + name_ = name; + processed_ = processed; + target_ = target; + } + + #endregion Constructors + + /// + /// The name for this event if known. + /// + public string Name + { + get { return name_; } + } + + /// + /// Get set a value indicating whether scanning should continue or not. + /// + public bool ContinueRunning + { + get { return continueRunning_; } + set { continueRunning_ = value; } + } + + /// + /// Get a percentage representing how much of the has been processed + /// + /// 0.0 to 100.0 percent; 0 if target is not known. + public float PercentComplete + { + get + { + float result; + if (target_ <= 0) + { + result = 0; + } + else + { + result = ((float)processed_ / (float)target_) * 100.0f; + } + return result; + } + } + + /// + /// The number of bytes processed so far + /// + public long Processed + { + get { return processed_; } + } + + /// + /// The number of bytes to process. + /// + /// Target may be 0 or negative if the value isnt known. + public long Target + { + get { return target_; } + } + + #region Instance Fields + + private string name_; + private long processed_; + private long target_; + private bool continueRunning_ = true; + + #endregion Instance Fields + } + + /// + /// Event arguments for directories. + /// + public class DirectoryEventArgs : ScanEventArgs + { + #region Constructors + + /// + /// Initialize an instance of . + /// + /// The name for this directory. + /// Flag value indicating if any matching files are contained in this directory. + public DirectoryEventArgs(string name, bool hasMatchingFiles) + : base(name) + { + hasMatchingFiles_ = hasMatchingFiles; + } + + #endregion Constructors + + /// + /// Get a value indicating if the directory contains any matching files or not. + /// + public bool HasMatchingFiles + { + get { return hasMatchingFiles_; } + } + + private readonly + + #region Instance Fields + + bool hasMatchingFiles_; + + #endregion Instance Fields + } + + /// + /// Arguments passed when scan failures are detected. + /// + public class ScanFailureEventArgs : EventArgs + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The name to apply. + /// The exception to use. + public ScanFailureEventArgs(string name, Exception e) + { + name_ = name; + exception_ = e; + continueRunning_ = true; + } + + #endregion Constructors + + /// + /// The applicable name. + /// + public string Name + { + get { return name_; } + } + + /// + /// The applicable exception. + /// + public Exception Exception + { + get { return exception_; } + } + + /// + /// Get / set a value indicating whether scanning should continue. + /// + public bool ContinueRunning + { + get { return continueRunning_; } + set { continueRunning_ = value; } + } + + #region Instance Fields + + private string name_; + private Exception exception_; + private bool continueRunning_; + + #endregion Instance Fields + } + + #endregion EventArgs + + #region Delegates + + /// + /// Delegate invoked before starting to process a file. + /// + /// The source of the event + /// The event arguments. + public delegate void ProcessFileHandler(object sender, ScanEventArgs e); + + /// + /// Delegate invoked during processing of a file or directory + /// + /// The source of the event + /// The event arguments. + public delegate void ProgressHandler(object sender, ProgressEventArgs e); + + /// + /// Delegate invoked when a file has been completely processed. + /// + /// The source of the event + /// The event arguments. + public delegate void CompletedFileHandler(object sender, ScanEventArgs e); + + /// + /// Delegate invoked when a directory failure is detected. + /// + /// The source of the event + /// The event arguments. + public delegate void DirectoryFailureHandler(object sender, ScanFailureEventArgs e); + + /// + /// Delegate invoked when a file failure is detected. + /// + /// The source of the event + /// The event arguments. + public delegate void FileFailureHandler(object sender, ScanFailureEventArgs e); + + #endregion Delegates + + /// + /// FileSystemScanner provides facilities scanning of files and directories. + /// + public class FileSystemScanner + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The file filter to apply when scanning. + public FileSystemScanner(string filter) + { + fileFilter_ = new PathFilter(filter); + } + + /// + /// Initialise a new instance of + /// + /// The file filter to apply. + /// The directory filter to apply. + public FileSystemScanner(string fileFilter, string directoryFilter) + { + fileFilter_ = new PathFilter(fileFilter); + directoryFilter_ = new PathFilter(directoryFilter); + } + + /// + /// Initialise a new instance of + /// + /// The file filter to apply. + public FileSystemScanner(IScanFilter fileFilter) + { + fileFilter_ = fileFilter; + } + + /// + /// Initialise a new instance of + /// + /// The file filter to apply. + /// The directory filter to apply. + public FileSystemScanner(IScanFilter fileFilter, IScanFilter directoryFilter) + { + fileFilter_ = fileFilter; + directoryFilter_ = directoryFilter; + } + + #endregion Constructors + + #region Delegates + + /// + /// Delegate to invoke when a directory is processed. + /// + public event EventHandler ProcessDirectory; + + /// + /// Delegate to invoke when a file is processed. + /// + public ProcessFileHandler ProcessFile; + + /// + /// Delegate to invoke when processing for a file has finished. + /// + public CompletedFileHandler CompletedFile; + + /// + /// Delegate to invoke when a directory failure is detected. + /// + public DirectoryFailureHandler DirectoryFailure; + + /// + /// Delegate to invoke when a file failure is detected. + /// + public FileFailureHandler FileFailure; + + #endregion Delegates + + /// + /// Raise the DirectoryFailure event. + /// + /// The directory name. + /// The exception detected. + private bool OnDirectoryFailure(string directory, Exception e) + { + DirectoryFailureHandler handler = DirectoryFailure; + bool result = (handler != null); + if (result) + { + var args = new ScanFailureEventArgs(directory, e); + handler(this, args); + alive_ = args.ContinueRunning; + } + return result; + } + + /// + /// Raise the FileFailure event. + /// + /// The file name. + /// The exception detected. + private bool OnFileFailure(string file, Exception e) + { + FileFailureHandler handler = FileFailure; + + bool result = (handler != null); + + if (result) + { + var args = new ScanFailureEventArgs(file, e); + FileFailure(this, args); + alive_ = args.ContinueRunning; + } + return result; + } + + /// + /// Raise the ProcessFile event. + /// + /// The file name. + private void OnProcessFile(string file) + { + ProcessFileHandler handler = ProcessFile; + + if (handler != null) + { + var args = new ScanEventArgs(file); + handler(this, args); + alive_ = args.ContinueRunning; + } + } + + /// + /// Raise the complete file event + /// + /// The file name + private void OnCompleteFile(string file) + { + CompletedFileHandler handler = CompletedFile; + + if (handler != null) + { + var args = new ScanEventArgs(file); + handler(this, args); + alive_ = args.ContinueRunning; + } + } + + /// + /// Raise the ProcessDirectory event. + /// + /// The directory name. + /// Flag indicating if the directory has matching files. + private void OnProcessDirectory(string directory, bool hasMatchingFiles) + { + EventHandler handler = ProcessDirectory; + + if (handler != null) + { + var args = new DirectoryEventArgs(directory, hasMatchingFiles); + handler(this, args); + alive_ = args.ContinueRunning; + } + } + + /// + /// Scan a directory. + /// + /// The base directory to scan. + /// True to recurse subdirectories, false to scan a single directory. + public void Scan(string directory, bool recurse) + { + alive_ = true; + ScanDir(directory, recurse); + } + + private void ScanDir(string directory, bool recurse) + { + try + { + string[] names = System.IO.Directory.GetFiles(directory); + bool hasMatch = false; + for (int fileIndex = 0; fileIndex < names.Length; ++fileIndex) + { + if (!fileFilter_.IsMatch(names[fileIndex])) + { + names[fileIndex] = null; + } + else + { + hasMatch = true; + } + } + + OnProcessDirectory(directory, hasMatch); + + if (alive_ && hasMatch) + { + foreach (string fileName in names) + { + try + { + if (fileName != null) + { + OnProcessFile(fileName); + if (!alive_) + { + break; + } + } + } + catch (Exception e) + { + if (!OnFileFailure(fileName, e)) + { + throw; + } + } + } + } + } + catch (Exception e) + { + if (!OnDirectoryFailure(directory, e)) + { + throw; + } + } + + if (alive_ && recurse) + { + try + { + string[] names = System.IO.Directory.GetDirectories(directory); + foreach (string fulldir in names) + { + if ((directoryFilter_ == null) || (directoryFilter_.IsMatch(fulldir))) + { + ScanDir(fulldir, true); + if (!alive_) + { + break; + } + } + } + } + catch (Exception e) + { + if (!OnDirectoryFailure(directory, e)) + { + throw; + } + } + } + } + + #region Instance Fields + + /// + /// The file filter currently in use. + /// + private IScanFilter fileFilter_; + + /// + /// The directory filter currently in use. + /// + private IScanFilter directoryFilter_; + + /// + /// Flag indicating if scanning should continue running. + /// + private bool alive_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs.meta new file mode 100644 index 0000000..ff51be7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/FileSystemScanner.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: b0f4da1967c454ea4be9fa4471d13ba1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs new file mode 100644 index 0000000..492e2a9 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs @@ -0,0 +1,22 @@ +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// INameTransform defines how file system names are transformed for use with archives, or vice versa. + /// + public interface INameTransform + { + /// + /// Given a file name determine the transformed value. + /// + /// The name to transform. + /// The transformed file name. + string TransformFile(string name); + + /// + /// Given a directory name determine the transformed value. + /// + /// The name to transform. + /// The transformed directory name + string TransformDirectory(string name); + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs.meta new file mode 100644 index 0000000..8508632 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/INameTransform.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: f1c89fa356eb34568957106d4454e054 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs new file mode 100644 index 0000000..ac07fd1 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs @@ -0,0 +1,15 @@ +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// Scanning filters support filtering of names. + /// + public interface IScanFilter + { + /// + /// Test a name to see if it 'matches' the filter. + /// + /// The name to test. + /// Returns true if the name matches the filter, false if it does not match. + bool IsMatch(string name); + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs.meta new file mode 100644 index 0000000..fb383a4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/IScanFilter.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: c3e76335e87e4469ea6dff583ec18ada +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs new file mode 100644 index 0000000..6647631 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs @@ -0,0 +1,53 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// InvalidNameException is thrown for invalid names such as directory traversal paths and names with invalid characters + /// + [Serializable] + public class InvalidNameException : SharpZipBaseException + { + /// + /// Initializes a new instance of the InvalidNameException class with a default error message. + /// + public InvalidNameException() : base("An invalid name was specified") + { + } + + /// + /// Initializes a new instance of the InvalidNameException class with a specified error message. + /// + /// A message describing the exception. + public InvalidNameException(string message) : base(message) + { + } + + /// + /// Initializes a new instance of the InvalidNameException class with a specified + /// error message and a reference to the inner exception that is the cause of this exception. + /// + /// A message describing the exception. + /// The inner exception + public InvalidNameException(string message, Exception innerException) : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the InvalidNameException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected InvalidNameException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs.meta new file mode 100644 index 0000000..c62a3fd --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/InvalidNameException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 6b27e25b6954d40cab60e7348d8864d1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs new file mode 100644 index 0000000..5775189 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs @@ -0,0 +1,284 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.Text.RegularExpressions; + +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// NameFilter is a string matching class which allows for both positive and negative + /// matching. + /// A filter is a sequence of independant regular expressions separated by semi-colons ';'. + /// To include a semi-colon it may be quoted as in \;. Each expression can be prefixed by a plus '+' sign or + /// a minus '-' sign to denote the expression is intended to include or exclude names. + /// If neither a plus or minus sign is found include is the default. + /// A given name is tested for inclusion before checking exclusions. Only names matching an include spec + /// and not matching an exclude spec are deemed to match the filter. + /// An empty filter matches any name. + /// + /// The following expression includes all name ending in '.dat' with the exception of 'dummy.dat' + /// "+\.dat$;-^dummy\.dat$" + /// + public class NameFilter : IScanFilter + { + #region Constructors + + /// + /// Construct an instance based on the filter expression passed + /// + /// The filter expression. + public NameFilter(string filter) + { + filter_ = filter; + inclusions_ = new List(); + exclusions_ = new List(); + Compile(); + } + + #endregion Constructors + + /// + /// Test a string to see if it is a valid regular expression. + /// + /// The expression to test. + /// True if expression is a valid false otherwise. + public static bool IsValidExpression(string expression) + { + bool result = true; + try + { + var exp = new Regex(expression, RegexOptions.IgnoreCase | RegexOptions.Singleline); + } + catch (ArgumentException) + { + result = false; + } + return result; + } + + /// + /// Test an expression to see if it is valid as a filter. + /// + /// The filter expression to test. + /// True if the expression is valid, false otherwise. + public static bool IsValidFilterExpression(string toTest) + { + bool result = true; + + try + { + if (toTest != null) + { + string[] items = SplitQuoted(toTest); + for (int i = 0; i < items.Length; ++i) + { + if ((items[i] != null) && (items[i].Length > 0)) + { + string toCompile; + + if (items[i][0] == '+') + { + toCompile = items[i].Substring(1, items[i].Length - 1); + } + else if (items[i][0] == '-') + { + toCompile = items[i].Substring(1, items[i].Length - 1); + } + else + { + toCompile = items[i]; + } + + var testRegex = new Regex(toCompile, RegexOptions.IgnoreCase | RegexOptions.Singleline); + } + } + } + } + catch (ArgumentException) + { + result = false; + } + + return result; + } + + /// + /// Split a string into its component pieces + /// + /// The original string + /// Returns an array of values containing the individual filter elements. + public static string[] SplitQuoted(string original) + { + char escape = '\\'; + char[] separators = { ';' }; + + var result = new List(); + + if (!string.IsNullOrEmpty(original)) + { + int endIndex = -1; + var b = new StringBuilder(); + + while (endIndex < original.Length) + { + endIndex += 1; + if (endIndex >= original.Length) + { + result.Add(b.ToString()); + } + else if (original[endIndex] == escape) + { + endIndex += 1; + if (endIndex >= original.Length) + { + throw new ArgumentException("Missing terminating escape character", nameof(original)); + } + // include escape if this is not an escaped separator + if (Array.IndexOf(separators, original[endIndex]) < 0) + b.Append(escape); + + b.Append(original[endIndex]); + } + else + { + if (Array.IndexOf(separators, original[endIndex]) >= 0) + { + result.Add(b.ToString()); + b.Length = 0; + } + else + { + b.Append(original[endIndex]); + } + } + } + } + + return result.ToArray(); + } + + /// + /// Convert this filter to its string equivalent. + /// + /// The string equivalent for this filter. + public override string ToString() + { + return filter_; + } + + /// + /// Test a value to see if it is included by the filter. + /// + /// The value to test. + /// True if the value is included, false otherwise. + public bool IsIncluded(string name) + { + bool result = false; + if (inclusions_.Count == 0) + { + result = true; + } + else + { + foreach (Regex r in inclusions_) + { + if (r.IsMatch(name)) + { + result = true; + break; + } + } + } + return result; + } + + /// + /// Test a value to see if it is excluded by the filter. + /// + /// The value to test. + /// True if the value is excluded, false otherwise. + public bool IsExcluded(string name) + { + bool result = false; + foreach (Regex r in exclusions_) + { + if (r.IsMatch(name)) + { + result = true; + break; + } + } + return result; + } + + #region IScanFilter Members + + /// + /// Test a value to see if it matches the filter. + /// + /// The value to test. + /// True if the value matches, false otherwise. + public bool IsMatch(string name) + { + return (IsIncluded(name) && !IsExcluded(name)); + } + + #endregion IScanFilter Members + + /// + /// Compile this filter. + /// + private void Compile() + { + // TODO: Check to see if combining RE's makes it faster/smaller. + // simple scheme would be to have one RE for inclusion and one for exclusion. + if (filter_ == null) + { + return; + } + + string[] items = SplitQuoted(filter_); + for (int i = 0; i < items.Length; ++i) + { + if ((items[i] != null) && (items[i].Length > 0)) + { + bool include = (items[i][0] != '-'); + string toCompile; + + if (items[i][0] == '+') + { + toCompile = items[i].Substring(1, items[i].Length - 1); + } + else if (items[i][0] == '-') + { + toCompile = items[i].Substring(1, items[i].Length - 1); + } + else + { + toCompile = items[i]; + } + + // NOTE: Regular expressions can fail to compile here for a number of reasons that cause an exception + // these are left unhandled here as the caller is responsible for ensuring all is valid. + // several functions IsValidFilterExpression and IsValidExpression are provided for such checking + if (include) + { + inclusions_.Add(new Regex(toCompile, RegexOptions.IgnoreCase | RegexOptions.Compiled | RegexOptions.Singleline)); + } + else + { + exclusions_.Add(new Regex(toCompile, RegexOptions.IgnoreCase | RegexOptions.Compiled | RegexOptions.Singleline)); + } + } + } + } + + #region Instance Fields + + private string filter_; + private List inclusions_; + private List exclusions_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs.meta new file mode 100644 index 0000000..61149ea --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/NameFilter.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 88ff039cc1277465c9f0208e921d5dd6 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs new file mode 100644 index 0000000..e70109c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs @@ -0,0 +1,318 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// PathFilter filters directories and files using a form of regular expressions + /// by full path name. + /// See NameFilter for more detail on filtering. + /// + public class PathFilter : IScanFilter + { + #region Constructors + + /// + /// Initialise a new instance of . + /// + /// The filter expression to apply. + public PathFilter(string filter) + { + nameFilter_ = new NameFilter(filter); + } + + #endregion Constructors + + #region IScanFilter Members + + /// + /// Test a name to see if it matches the filter. + /// + /// The name to test. + /// True if the name matches, false otherwise. + /// is used to get the full path before matching. + public virtual bool IsMatch(string name) + { + bool result = false; + + if (name != null) + { + string cooked = (name.Length > 0) ? Path.GetFullPath(name) : ""; + result = nameFilter_.IsMatch(cooked); + } + return result; + } + + private readonly + + #endregion IScanFilter Members + + #region Instance Fields + + NameFilter nameFilter_; + + #endregion Instance Fields + } + + /// + /// ExtendedPathFilter filters based on name, file size, and the last write time of the file. + /// + /// Provides an example of how to customise filtering. + public class ExtendedPathFilter : PathFilter + { + #region Constructors + + /// + /// Initialise a new instance of ExtendedPathFilter. + /// + /// The filter to apply. + /// The minimum file size to include. + /// The maximum file size to include. + public ExtendedPathFilter(string filter, + long minSize, long maxSize) + : base(filter) + { + MinSize = minSize; + MaxSize = maxSize; + } + + /// + /// Initialise a new instance of ExtendedPathFilter. + /// + /// The filter to apply. + /// The minimum to include. + /// The maximum to include. + public ExtendedPathFilter(string filter, + DateTime minDate, DateTime maxDate) + : base(filter) + { + MinDate = minDate; + MaxDate = maxDate; + } + + /// + /// Initialise a new instance of ExtendedPathFilter. + /// + /// The filter to apply. + /// The minimum file size to include. + /// The maximum file size to include. + /// The minimum to include. + /// The maximum to include. + public ExtendedPathFilter(string filter, + long minSize, long maxSize, + DateTime minDate, DateTime maxDate) + : base(filter) + { + MinSize = minSize; + MaxSize = maxSize; + MinDate = minDate; + MaxDate = maxDate; + } + + #endregion Constructors + + #region IScanFilter Members + + /// + /// Test a filename to see if it matches the filter. + /// + /// The filename to test. + /// True if the filter matches, false otherwise. + /// The doesnt exist + public override bool IsMatch(string name) + { + bool result = base.IsMatch(name); + + if (result) + { + var fileInfo = new FileInfo(name); + result = + (MinSize <= fileInfo.Length) && + (MaxSize >= fileInfo.Length) && + (MinDate <= fileInfo.LastWriteTime) && + (MaxDate >= fileInfo.LastWriteTime) + ; + } + return result; + } + + #endregion IScanFilter Members + + #region Properties + + /// + /// Get/set the minimum size/length for a file that will match this filter. + /// + /// The default value is zero. + /// value is less than zero; greater than + public long MinSize + { + get { return minSize_; } + set + { + if ((value < 0) || (maxSize_ < value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + minSize_ = value; + } + } + + /// + /// Get/set the maximum size/length for a file that will match this filter. + /// + /// The default value is + /// value is less than zero or less than + public long MaxSize + { + get { return maxSize_; } + set + { + if ((value < 0) || (minSize_ > value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + maxSize_ = value; + } + } + + /// + /// Get/set the minimum value that will match for this filter. + /// + /// Files with a LastWrite time less than this value are excluded by the filter. + public DateTime MinDate + { + get + { + return minDate_; + } + + set + { + if (value > maxDate_) + { + throw new ArgumentOutOfRangeException(nameof(value), "Exceeds MaxDate"); + } + + minDate_ = value; + } + } + + /// + /// Get/set the maximum value that will match for this filter. + /// + /// Files with a LastWrite time greater than this value are excluded by the filter. + public DateTime MaxDate + { + get + { + return maxDate_; + } + + set + { + if (minDate_ > value) + { + throw new ArgumentOutOfRangeException(nameof(value), "Exceeds MinDate"); + } + + maxDate_ = value; + } + } + + #endregion Properties + + #region Instance Fields + + private long minSize_; + private long maxSize_ = long.MaxValue; + private DateTime minDate_ = DateTime.MinValue; + private DateTime maxDate_ = DateTime.MaxValue; + + #endregion Instance Fields + } + + /// + /// NameAndSizeFilter filters based on name and file size. + /// + /// A sample showing how filters might be extended. + [Obsolete("Use ExtendedPathFilter instead")] + public class NameAndSizeFilter : PathFilter + { + /// + /// Initialise a new instance of NameAndSizeFilter. + /// + /// The filter to apply. + /// The minimum file size to include. + /// The maximum file size to include. + public NameAndSizeFilter(string filter, long minSize, long maxSize) + : base(filter) + { + MinSize = minSize; + MaxSize = maxSize; + } + + /// + /// Test a filename to see if it matches the filter. + /// + /// The filename to test. + /// True if the filter matches, false otherwise. + public override bool IsMatch(string name) + { + bool result = base.IsMatch(name); + + if (result) + { + var fileInfo = new FileInfo(name); + long length = fileInfo.Length; + result = + (MinSize <= length) && + (MaxSize >= length); + } + return result; + } + + /// + /// Get/set the minimum size for a file that will match this filter. + /// + public long MinSize + { + get { return minSize_; } + set + { + if ((value < 0) || (maxSize_ < value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + minSize_ = value; + } + } + + /// + /// Get/set the maximum size for a file that will match this filter. + /// + public long MaxSize + { + get { return maxSize_; } + set + { + if ((value < 0) || (minSize_ > value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + maxSize_ = value; + } + } + + #region Instance Fields + + private long minSize_; + private long maxSize_ = long.MaxValue; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs.meta new file mode 100644 index 0000000..4298264 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/PathFilter.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: afd26c2c133b447a0ac9755c5c7972b1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs new file mode 100644 index 0000000..6d0d9b3 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs @@ -0,0 +1,284 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// Provides simple " utilities. + /// + public sealed class StreamUtils + { + /// + /// Read from a ensuring all the required data is read. + /// + /// The stream to read. + /// The buffer to fill. + /// + static public void ReadFully(Stream stream, byte[] buffer) + { + ReadFully(stream, buffer, 0, buffer.Length); + } + + /// + /// Read from a " ensuring all the required data is read. + /// + /// The stream to read data from. + /// The buffer to store data in. + /// The offset at which to begin storing data. + /// The number of bytes of data to store. + /// Required parameter is null + /// and or are invalid. + /// End of stream is encountered before all the data has been read. + static public void ReadFully(Stream stream, byte[] buffer, int offset, int count) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + // Offset can equal length when buffer and count are 0. + if ((offset < 0) || (offset > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if ((count < 0) || (offset + count > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + while (count > 0) + { + int readCount = stream.Read(buffer, offset, count); + if (readCount <= 0) + { + throw new EndOfStreamException(); + } + offset += readCount; + count -= readCount; + } + } + + /// + /// Read as much data as possible from a ", up to the requested number of bytes + /// + /// The stream to read data from. + /// The buffer to store data in. + /// The offset at which to begin storing data. + /// The number of bytes of data to store. + /// Required parameter is null + /// and or are invalid. + static public int ReadRequestedBytes(Stream stream, byte[] buffer, int offset, int count) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + // Offset can equal length when buffer and count are 0. + if ((offset < 0) || (offset > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if ((count < 0) || (offset + count > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + int totalReadCount = 0; + while (count > 0) + { + int readCount = stream.Read(buffer, offset, count); + if (readCount <= 0) + { + break; + } + offset += readCount; + count -= readCount; + totalReadCount += readCount; + } + + return totalReadCount; + } + + /// + /// Copy the contents of one to another. + /// + /// The stream to source data from. + /// The stream to write data to. + /// The buffer to use during copying. + static public void Copy(Stream source, Stream destination, byte[] buffer) + { + if (source == null) + { + throw new ArgumentNullException(nameof(source)); + } + + if (destination == null) + { + throw new ArgumentNullException(nameof(destination)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + // Ensure a reasonable size of buffer is used without being prohibitive. + if (buffer.Length < 128) + { + throw new ArgumentException("Buffer is too small", nameof(buffer)); + } + + bool copying = true; + + while (copying) + { + int bytesRead = source.Read(buffer, 0, buffer.Length); + if (bytesRead > 0) + { + destination.Write(buffer, 0, bytesRead); + } + else + { + destination.Flush(); + copying = false; + } + } + } + + /// + /// Copy the contents of one to another. + /// + /// The stream to source data from. + /// The stream to write data to. + /// The buffer to use during copying. + /// The progress handler delegate to use. + /// The minimum between progress updates. + /// The source for this event. + /// The name to use with the event. + /// This form is specialised for use within #Zip to support events during archive operations. + static public void Copy(Stream source, Stream destination, + byte[] buffer, ProgressHandler progressHandler, TimeSpan updateInterval, object sender, string name) + { + Copy(source, destination, buffer, progressHandler, updateInterval, sender, name, -1); + } + + /// + /// Copy the contents of one to another. + /// + /// The stream to source data from. + /// The stream to write data to. + /// The buffer to use during copying. + /// The progress handler delegate to use. + /// The minimum between progress updates. + /// The source for this event. + /// The name to use with the event. + /// A predetermined fixed target value to use with progress updates. + /// If the value is negative the target is calculated by looking at the stream. + /// This form is specialised for use within #Zip to support events during archive operations. + static public void Copy(Stream source, Stream destination, + byte[] buffer, + ProgressHandler progressHandler, TimeSpan updateInterval, + object sender, string name, long fixedTarget) + { + if (source == null) + { + throw new ArgumentNullException(nameof(source)); + } + + if (destination == null) + { + throw new ArgumentNullException(nameof(destination)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + // Ensure a reasonable size of buffer is used without being prohibitive. + if (buffer.Length < 128) + { + throw new ArgumentException("Buffer is too small", nameof(buffer)); + } + + if (progressHandler == null) + { + throw new ArgumentNullException(nameof(progressHandler)); + } + + bool copying = true; + + DateTime marker = DateTime.Now; + long processed = 0; + long target = 0; + + if (fixedTarget >= 0) + { + target = fixedTarget; + } + else if (source.CanSeek) + { + target = source.Length - source.Position; + } + + // Always fire 0% progress.. + var args = new ProgressEventArgs(name, processed, target); + progressHandler(sender, args); + + bool progressFired = true; + + while (copying) + { + int bytesRead = source.Read(buffer, 0, buffer.Length); + if (bytesRead > 0) + { + processed += bytesRead; + progressFired = false; + destination.Write(buffer, 0, bytesRead); + } + else + { + destination.Flush(); + copying = false; + } + + if (DateTime.Now - marker > updateInterval) + { + progressFired = true; + marker = DateTime.Now; + args = new ProgressEventArgs(name, processed, target); + progressHandler(sender, args); + + copying = args.ContinueRunning; + } + } + + if (!progressFired) + { + args = new ProgressEventArgs(name, processed, target); + progressHandler(sender, args); + } + } + + /// + /// Initialise an instance of + /// + private StreamUtils() + { + // Do nothing. + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs.meta new file mode 100644 index 0000000..5e84d04 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/StreamUtils.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 370bf850b805d4195bd6ca7bd122066d +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs new file mode 100644 index 0000000..f02a0af --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs @@ -0,0 +1,67 @@ +namespace ICSharpCode.SharpZipLib.Core +{ + /// + /// WindowsPathUtils provides simple utilities for handling windows paths. + /// + public abstract class WindowsPathUtils + { + /// + /// Initializes a new instance of the class. + /// + internal WindowsPathUtils() + { + } + + /// + /// Remove any path root present in the path + /// + /// A containing path information. + /// The path with the root removed if it was present; path otherwise. + /// Unlike the class the path isnt otherwise checked for validity. + public static string DropPathRoot(string path) + { + string result = path; + + if (!string.IsNullOrEmpty(path)) + { + if ((path[0] == '\\') || (path[0] == '/')) + { + // UNC name ? + if ((path.Length > 1) && ((path[1] == '\\') || (path[1] == '/'))) + { + int index = 2; + int elements = 2; + + // Scan for two separate elements \\machine\share\restofpath + while ((index <= path.Length) && + (((path[index] != '\\') && (path[index] != '/')) || (--elements > 0))) + { + index++; + } + + index++; + + if (index < path.Length) + { + result = path.Substring(index); + } + else + { + result = ""; + } + } + } + else if ((path.Length > 1) && (path[1] == ':')) + { + int dropCount = 2; + if ((path.Length > 2) && ((path[2] == '\\') || (path[2] == '/'))) + { + dropCount = 3; + } + result = result.Remove(0, dropCount); + } + } + return result; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs.meta new file mode 100644 index 0000000..43c4d44 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Core/WindowsPathUtils.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 24d67d242a79f46d78e066b9e868c01b +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption.meta new file mode 100644 index 0000000..4cc5fbf --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 172184c1ed0b84ba7940eb831b66bc36 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs new file mode 100644 index 0000000..7a8c55e --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs @@ -0,0 +1,485 @@ +using ICSharpCode.SharpZipLib.Checksum; +using System; +using System.Security.Cryptography; + +namespace ICSharpCode.SharpZipLib.Encryption +{ + /// + /// PkzipClassic embodies the classic or original encryption facilities used in Pkzip archives. + /// While it has been superceded by more recent and more powerful algorithms, its still in use and + /// is viable for preventing casual snooping + /// + public abstract class PkzipClassic : SymmetricAlgorithm + { + /// + /// Generates new encryption keys based on given seed + /// + /// The seed value to initialise keys with. + /// A new key value. + static public byte[] GenerateKeys(byte[] seed) + { + if (seed == null) + { + throw new ArgumentNullException(nameof(seed)); + } + + if (seed.Length == 0) + { + throw new ArgumentException("Length is zero", nameof(seed)); + } + + uint[] newKeys = { + 0x12345678, + 0x23456789, + 0x34567890 + }; + + for (int i = 0; i < seed.Length; ++i) + { + newKeys[0] = Crc32.ComputeCrc32(newKeys[0], seed[i]); + newKeys[1] = newKeys[1] + (byte)newKeys[0]; + newKeys[1] = newKeys[1] * 134775813 + 1; + newKeys[2] = Crc32.ComputeCrc32(newKeys[2], (byte)(newKeys[1] >> 24)); + } + + byte[] result = new byte[12]; + result[0] = (byte)(newKeys[0] & 0xff); + result[1] = (byte)((newKeys[0] >> 8) & 0xff); + result[2] = (byte)((newKeys[0] >> 16) & 0xff); + result[3] = (byte)((newKeys[0] >> 24) & 0xff); + result[4] = (byte)(newKeys[1] & 0xff); + result[5] = (byte)((newKeys[1] >> 8) & 0xff); + result[6] = (byte)((newKeys[1] >> 16) & 0xff); + result[7] = (byte)((newKeys[1] >> 24) & 0xff); + result[8] = (byte)(newKeys[2] & 0xff); + result[9] = (byte)((newKeys[2] >> 8) & 0xff); + result[10] = (byte)((newKeys[2] >> 16) & 0xff); + result[11] = (byte)((newKeys[2] >> 24) & 0xff); + return result; + } + } + + /// + /// PkzipClassicCryptoBase provides the low level facilities for encryption + /// and decryption using the PkzipClassic algorithm. + /// + internal class PkzipClassicCryptoBase + { + /// + /// Transform a single byte + /// + /// + /// The transformed value + /// + protected byte TransformByte() + { + uint temp = ((keys[2] & 0xFFFF) | 2); + return (byte)((temp * (temp ^ 1)) >> 8); + } + + /// + /// Set the key schedule for encryption/decryption. + /// + /// The data use to set the keys from. + protected void SetKeys(byte[] keyData) + { + if (keyData == null) + { + throw new ArgumentNullException(nameof(keyData)); + } + + if (keyData.Length != 12) + { + throw new InvalidOperationException("Key length is not valid"); + } + + keys = new uint[3]; + keys[0] = (uint)((keyData[3] << 24) | (keyData[2] << 16) | (keyData[1] << 8) | keyData[0]); + keys[1] = (uint)((keyData[7] << 24) | (keyData[6] << 16) | (keyData[5] << 8) | keyData[4]); + keys[2] = (uint)((keyData[11] << 24) | (keyData[10] << 16) | (keyData[9] << 8) | keyData[8]); + } + + /// + /// Update encryption keys + /// + protected void UpdateKeys(byte ch) + { + keys[0] = Crc32.ComputeCrc32(keys[0], ch); + keys[1] = keys[1] + (byte)keys[0]; + keys[1] = keys[1] * 134775813 + 1; + keys[2] = Crc32.ComputeCrc32(keys[2], (byte)(keys[1] >> 24)); + } + + /// + /// Reset the internal state. + /// + protected void Reset() + { + keys[0] = 0; + keys[1] = 0; + keys[2] = 0; + } + + #region Instance Fields + + private uint[] keys; + + #endregion Instance Fields + } + + /// + /// PkzipClassic CryptoTransform for encryption. + /// + internal class PkzipClassicEncryptCryptoTransform : PkzipClassicCryptoBase, ICryptoTransform + { + /// + /// Initialise a new instance of + /// + /// The key block to use. + internal PkzipClassicEncryptCryptoTransform(byte[] keyBlock) + { + SetKeys(keyBlock); + } + + #region ICryptoTransform Members + + /// + /// Transforms the specified region of the specified byte array. + /// + /// The input for which to compute the transform. + /// The offset into the byte array from which to begin using data. + /// The number of bytes in the byte array to use as data. + /// The computed transform. + public byte[] TransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) + { + byte[] result = new byte[inputCount]; + TransformBlock(inputBuffer, inputOffset, inputCount, result, 0); + return result; + } + + /// + /// Transforms the specified region of the input byte array and copies + /// the resulting transform to the specified region of the output byte array. + /// + /// The input for which to compute the transform. + /// The offset into the input byte array from which to begin using data. + /// The number of bytes in the input byte array to use as data. + /// The output to which to write the transform. + /// The offset into the output byte array from which to begin writing data. + /// The number of bytes written. + public int TransformBlock(byte[] inputBuffer, int inputOffset, int inputCount, byte[] outputBuffer, int outputOffset) + { + for (int i = inputOffset; i < inputOffset + inputCount; ++i) + { + byte oldbyte = inputBuffer[i]; + outputBuffer[outputOffset++] = (byte)(inputBuffer[i] ^ TransformByte()); + UpdateKeys(oldbyte); + } + return inputCount; + } + + /// + /// Gets a value indicating whether the current transform can be reused. + /// + public bool CanReuseTransform + { + get + { + return true; + } + } + + /// + /// Gets the size of the input data blocks in bytes. + /// + public int InputBlockSize + { + get + { + return 1; + } + } + + /// + /// Gets the size of the output data blocks in bytes. + /// + public int OutputBlockSize + { + get + { + return 1; + } + } + + /// + /// Gets a value indicating whether multiple blocks can be transformed. + /// + public bool CanTransformMultipleBlocks + { + get + { + return true; + } + } + + #endregion ICryptoTransform Members + + #region IDisposable Members + + /// + /// Cleanup internal state. + /// + public void Dispose() + { + Reset(); + } + + #endregion IDisposable Members + } + + /// + /// PkzipClassic CryptoTransform for decryption. + /// + internal class PkzipClassicDecryptCryptoTransform : PkzipClassicCryptoBase, ICryptoTransform + { + /// + /// Initialise a new instance of . + /// + /// The key block to decrypt with. + internal PkzipClassicDecryptCryptoTransform(byte[] keyBlock) + { + SetKeys(keyBlock); + } + + #region ICryptoTransform Members + + /// + /// Transforms the specified region of the specified byte array. + /// + /// The input for which to compute the transform. + /// The offset into the byte array from which to begin using data. + /// The number of bytes in the byte array to use as data. + /// The computed transform. + public byte[] TransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) + { + byte[] result = new byte[inputCount]; + TransformBlock(inputBuffer, inputOffset, inputCount, result, 0); + return result; + } + + /// + /// Transforms the specified region of the input byte array and copies + /// the resulting transform to the specified region of the output byte array. + /// + /// The input for which to compute the transform. + /// The offset into the input byte array from which to begin using data. + /// The number of bytes in the input byte array to use as data. + /// The output to which to write the transform. + /// The offset into the output byte array from which to begin writing data. + /// The number of bytes written. + public int TransformBlock(byte[] inputBuffer, int inputOffset, int inputCount, byte[] outputBuffer, int outputOffset) + { + for (int i = inputOffset; i < inputOffset + inputCount; ++i) + { + var newByte = (byte)(inputBuffer[i] ^ TransformByte()); + outputBuffer[outputOffset++] = newByte; + UpdateKeys(newByte); + } + return inputCount; + } + + /// + /// Gets a value indicating whether the current transform can be reused. + /// + public bool CanReuseTransform + { + get + { + return true; + } + } + + /// + /// Gets the size of the input data blocks in bytes. + /// + public int InputBlockSize + { + get + { + return 1; + } + } + + /// + /// Gets the size of the output data blocks in bytes. + /// + public int OutputBlockSize + { + get + { + return 1; + } + } + + /// + /// Gets a value indicating whether multiple blocks can be transformed. + /// + public bool CanTransformMultipleBlocks + { + get + { + return true; + } + } + + #endregion ICryptoTransform Members + + #region IDisposable Members + + /// + /// Cleanup internal state. + /// + public void Dispose() + { + Reset(); + } + + #endregion IDisposable Members + } + + /// + /// Defines a wrapper object to access the Pkzip algorithm. + /// This class cannot be inherited. + /// + public sealed class PkzipClassicManaged : PkzipClassic + { + /// + /// Get / set the applicable block size in bits. + /// + /// The only valid block size is 8. + public override int BlockSize + { + get + { + return 8; + } + + set + { + if (value != 8) + { + throw new CryptographicException("Block size is invalid"); + } + } + } + + /// + /// Get an array of legal key sizes. + /// + public override KeySizes[] LegalKeySizes + { + get + { + KeySizes[] keySizes = new KeySizes[1]; + keySizes[0] = new KeySizes(12 * 8, 12 * 8, 0); + return keySizes; + } + } + + /// + /// Generate an initial vector. + /// + public override void GenerateIV() + { + // Do nothing. + } + + /// + /// Get an array of legal block sizes. + /// + public override KeySizes[] LegalBlockSizes + { + get + { + KeySizes[] keySizes = new KeySizes[1]; + keySizes[0] = new KeySizes(1 * 8, 1 * 8, 0); + return keySizes; + } + } + + /// + /// Get / set the key value applicable. + /// + public override byte[] Key + { + get + { + if (key_ == null) + { + GenerateKey(); + } + + return (byte[])key_.Clone(); + } + + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + + if (value.Length != 12) + { + throw new CryptographicException("Key size is illegal"); + } + + key_ = (byte[])value.Clone(); + } + } + + /// + /// Generate a new random key. + /// + public override void GenerateKey() + { + key_ = new byte[12]; + var rnd = new Random(); + rnd.NextBytes(key_); + } + + /// + /// Create an encryptor. + /// + /// The key to use for this encryptor. + /// Initialisation vector for the new encryptor. + /// Returns a new PkzipClassic encryptor + public override ICryptoTransform CreateEncryptor( + byte[] rgbKey, + byte[] rgbIV) + { + key_ = rgbKey; + return new PkzipClassicEncryptCryptoTransform(Key); + } + + /// + /// Create a decryptor. + /// + /// Keys to use for this new decryptor. + /// Initialisation vector for the new decryptor. + /// Returns a new decryptor. + public override ICryptoTransform CreateDecryptor( + byte[] rgbKey, + byte[] rgbIV) + { + key_ = rgbKey; + return new PkzipClassicDecryptCryptoTransform(Key); + } + + #region Instance Fields + + private byte[] key_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs.meta new file mode 100644 index 0000000..3042b90 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/PkzipClassic.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: eb281df25ebea4e62aa225052969402e +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs new file mode 100644 index 0000000..4f649e8 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs @@ -0,0 +1,220 @@ +using System; +using System.IO; +using System.Security.Cryptography; +using ICSharpCode.SharpZipLib.Core; + +namespace ICSharpCode.SharpZipLib.Encryption +{ + /// + /// Encrypts and decrypts AES ZIP + /// + /// + /// Based on information from http://www.winzip.com/aes_info.htm + /// and http://www.gladman.me.uk/cryptography_technology/fileencrypt/ + /// + internal class ZipAESStream : CryptoStream + { + /// + /// Constructor + /// + /// The stream on which to perform the cryptographic transformation. + /// Instance of ZipAESTransform + /// Read or Write + public ZipAESStream(Stream stream, ZipAESTransform transform, CryptoStreamMode mode) + : base(stream, transform, mode) + { + _stream = stream; + _transform = transform; + _slideBuffer = new byte[1024]; + + // mode: + // CryptoStreamMode.Read means we read from "stream" and pass decrypted to our Read() method. + // Write bypasses this stream and uses the Transform directly. + if (mode != CryptoStreamMode.Read) + { + throw new Exception("ZipAESStream only for read"); + } + } + + // The final n bytes of the AES stream contain the Auth Code. + private const int AUTH_CODE_LENGTH = 10; + + // Blocksize is always 16 here, even for AES-256 which has transform.InputBlockSize of 32. + private const int CRYPTO_BLOCK_SIZE = 16; + + // total length of block + auth code + private const int BLOCK_AND_AUTH = CRYPTO_BLOCK_SIZE + AUTH_CODE_LENGTH; + + private Stream _stream; + private ZipAESTransform _transform; + private byte[] _slideBuffer; + private int _slideBufStartPos; + private int _slideBufFreePos; + + // Buffer block transforms to enable partial reads + private byte[] _transformBuffer = null;// new byte[CRYPTO_BLOCK_SIZE]; + private int _transformBufferFreePos; + private int _transformBufferStartPos; + + // Do we have some buffered data available? + private bool HasBufferedData =>_transformBuffer != null && _transformBufferStartPos < _transformBufferFreePos; + + /// + /// Reads a sequence of bytes from the current CryptoStream into buffer, + /// and advances the position within the stream by the number of bytes read. + /// + public override int Read(byte[] buffer, int offset, int count) + { + // Nothing to do + if (count == 0) + return 0; + + // If we have buffered data, read that first + int nBytes = 0; + if (HasBufferedData) + { + nBytes = ReadBufferedData(buffer, offset, count); + + // Read all requested data from the buffer + if (nBytes == count) + return nBytes; + + offset += nBytes; + count -= nBytes; + } + + // Read more data from the input, if available + if (_slideBuffer != null) + nBytes += ReadAndTransform(buffer, offset, count); + + return nBytes; + } + + // Read data from the underlying stream and decrypt it + private int ReadAndTransform(byte[] buffer, int offset, int count) + { + int nBytes = 0; + while (nBytes < count) + { + int bytesLeftToRead = count - nBytes; + + // Calculate buffer quantities vs read-ahead size, and check for sufficient free space + int byteCount = _slideBufFreePos - _slideBufStartPos; + + // Need to handle final block and Auth Code specially, but don't know total data length. + // Maintain a read-ahead equal to the length of (crypto block + Auth Code). + // When that runs out we can detect these final sections. + int lengthToRead = BLOCK_AND_AUTH - byteCount; + if (_slideBuffer.Length - _slideBufFreePos < lengthToRead) + { + // Shift the data to the beginning of the buffer + int iTo = 0; + for (int iFrom = _slideBufStartPos; iFrom < _slideBufFreePos; iFrom++, iTo++) + { + _slideBuffer[iTo] = _slideBuffer[iFrom]; + } + _slideBufFreePos -= _slideBufStartPos; // Note the -= + _slideBufStartPos = 0; + } + int obtained = StreamUtils.ReadRequestedBytes(_stream, _slideBuffer, _slideBufFreePos, lengthToRead); + _slideBufFreePos += obtained; + + // Recalculate how much data we now have + byteCount = _slideBufFreePos - _slideBufStartPos; + if (byteCount >= BLOCK_AND_AUTH) + { + var read = TransformAndBufferBlock(buffer, offset, bytesLeftToRead, CRYPTO_BLOCK_SIZE); + nBytes += read; + offset += read; + } + else + { + // Last round. + if (byteCount > AUTH_CODE_LENGTH) + { + // At least one byte of data plus auth code + int finalBlock = byteCount - AUTH_CODE_LENGTH; + nBytes += TransformAndBufferBlock(buffer, offset, bytesLeftToRead, finalBlock); + } + else if (byteCount < AUTH_CODE_LENGTH) + throw new Exception("Internal error missed auth code"); // Coding bug + // Final block done. Check Auth code. + byte[] calcAuthCode = _transform.GetAuthCode(); + for (int i = 0; i < AUTH_CODE_LENGTH; i++) + { + if (calcAuthCode[i] != _slideBuffer[_slideBufStartPos + i]) + { + throw new Exception("AES Authentication Code does not match. This is a super-CRC check on the data in the file after compression and encryption. \r\n" + + "The file may be damaged."); + } + } + + // don't need this any more, so use it as a 'complete' flag + _slideBuffer = null; + + break; // Reached the auth code + } + } + return nBytes; + } + + // read some buffered data + private int ReadBufferedData(byte[] buffer, int offset, int count) + { + int copyCount = Math.Min(count, _transformBufferFreePos - _transformBufferStartPos); + + Array.Copy(_transformBuffer, _transformBufferStartPos, buffer, offset, copyCount); + _transformBufferStartPos += copyCount; + + return copyCount; + } + + // Perform the crypto transform, and buffer the data if less than one block has been requested. + private int TransformAndBufferBlock(byte[] buffer, int offset, int count, int blockSize) + { + // If the requested data is greater than one block, transform it directly into the output + // If it's smaller, do it into a temporary buffer and copy the requested part + bool bufferRequired = (blockSize > count); + + if (bufferRequired && _transformBuffer == null) + _transformBuffer = new byte[CRYPTO_BLOCK_SIZE]; + + var targetBuffer = bufferRequired ? _transformBuffer : buffer; + var targetOffset = bufferRequired ? 0 : offset; + + // Transform the data + _transform.TransformBlock(_slideBuffer, + _slideBufStartPos, + blockSize, + targetBuffer, + targetOffset); + + _slideBufStartPos += blockSize; + + if (!bufferRequired) + { + return blockSize; + } + else + { + Array.Copy(_transformBuffer, 0, buffer, offset, count); + _transformBufferStartPos = count; + _transformBufferFreePos = blockSize; + + return count; + } + } + + /// + /// Writes a sequence of bytes to the current stream and advances the current position within this stream by the number of bytes written. + /// + /// An array of bytes. This method copies count bytes from buffer to the current stream. + /// The byte offset in buffer at which to begin copying bytes to the current stream. + /// The number of bytes to be written to the current stream. + public override void Write(byte[] buffer, int offset, int count) + { + // ZipAESStream is used for reading but not for writing. Writing uses the ZipAESTransform directly. + throw new NotImplementedException(); + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs.meta new file mode 100644 index 0000000..b47046e --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: a78cc67e59b394ca791b6240248dbc85 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs new file mode 100644 index 0000000..437e25c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs @@ -0,0 +1,223 @@ +using System; +using System.Security.Cryptography; + +namespace ICSharpCode.SharpZipLib.Encryption +{ + /// + /// Transforms stream using AES in CTR mode + /// + internal class ZipAESTransform : ICryptoTransform + { +#if NET45 + class IncrementalHash : HMACSHA1 + { + bool _finalised; + public IncrementalHash(byte[] key) : base(key) { } + public static IncrementalHash CreateHMAC(string n, byte[] key) => new IncrementalHash(key); + public void AppendData(byte[] buffer, int offset, int count) => TransformBlock(buffer, offset, count, buffer, offset); + public byte[] GetHashAndReset() + { + if (!_finalised) + { + byte[] dummy = new byte[0]; + TransformFinalBlock(dummy, 0, 0); + _finalised = true; + } + return Hash; + } + } + + static class HashAlgorithmName + { + public static string SHA1 = null; + } +#endif + + private const int PWD_VER_LENGTH = 2; + + // WinZip use iteration count of 1000 for PBKDF2 key generation + private const int KEY_ROUNDS = 1000; + + // For 128-bit AES (16 bytes) the encryption is implemented as expected. + // For 256-bit AES (32 bytes) WinZip do full 256 bit AES of the nonce to create the encryption + // block but use only the first 16 bytes of it, and discard the second half. + private const int ENCRYPT_BLOCK = 16; + + private int _blockSize; + private readonly ICryptoTransform _encryptor; + private readonly byte[] _counterNonce; + private byte[] _encryptBuffer; + private int _encrPos; + private byte[] _pwdVerifier; + private IncrementalHash _hmacsha1; + private byte[] _authCode = null; + + private bool _writeMode; + + /// + /// Constructor. + /// + /// Password string + /// Random bytes, length depends on encryption strength. + /// 128 bits = 8 bytes, 192 bits = 12 bytes, 256 bits = 16 bytes. + /// The encryption strength, in bytes eg 16 for 128 bits. + /// True when creating a zip, false when reading. For the AuthCode. + /// + public ZipAESTransform(string key, byte[] saltBytes, int blockSize, bool writeMode) + { + if (blockSize != 16 && blockSize != 32) // 24 valid for AES but not supported by Winzip + throw new Exception("Invalid blocksize " + blockSize + ". Must be 16 or 32."); + if (saltBytes.Length != blockSize / 2) + throw new Exception("Invalid salt len. Must be " + blockSize / 2 + " for blocksize " + blockSize); + // initialise the encryption buffer and buffer pos + _blockSize = blockSize; + _encryptBuffer = new byte[_blockSize]; + _encrPos = ENCRYPT_BLOCK; + + // Performs the equivalent of derive_key in Dr Brian Gladman's pwd2key.c + var pdb = new Rfc2898DeriveBytes(key, saltBytes, KEY_ROUNDS); + var rm = Aes.Create(); + rm.Mode = CipherMode.ECB; // No feedback from cipher for CTR mode + _counterNonce = new byte[_blockSize]; + byte[] key1bytes = pdb.GetBytes(_blockSize); + byte[] key2bytes = pdb.GetBytes(_blockSize); + + // Use empty IV for AES + _encryptor = rm.CreateEncryptor(key1bytes, new byte[16]); + _pwdVerifier = pdb.GetBytes(PWD_VER_LENGTH); + // + _hmacsha1 = IncrementalHash.CreateHMAC(HashAlgorithmName.SHA1, key2bytes); + _writeMode = writeMode; + } + + /// + /// Implement the ICryptoTransform method. + /// + public int TransformBlock(byte[] inputBuffer, int inputOffset, int inputCount, byte[] outputBuffer, int outputOffset) + { + // Pass the data stream to the hash algorithm for generating the Auth Code. + // This does not change the inputBuffer. Do this before decryption for read mode. + if (!_writeMode) + { + _hmacsha1.AppendData(inputBuffer, inputOffset, inputCount); + } + // Encrypt with AES in CTR mode. Regards to Dr Brian Gladman for this. + int ix = 0; + while (ix < inputCount) + { + if (_encrPos == ENCRYPT_BLOCK) + { + /* increment encryption nonce */ + int j = 0; + while (++_counterNonce[j] == 0) + { + ++j; + } + /* encrypt the nonce to form next xor buffer */ + _encryptor.TransformBlock(_counterNonce, 0, _blockSize, _encryptBuffer, 0); + _encrPos = 0; + } + outputBuffer[ix + outputOffset] = (byte)(inputBuffer[ix + inputOffset] ^ _encryptBuffer[_encrPos++]); + // + ix++; + } + if (_writeMode) + { + // This does not change the buffer. + _hmacsha1.AppendData(outputBuffer, outputOffset, inputCount); + } + return inputCount; + } + + /// + /// Returns the 2 byte password verifier + /// + public byte[] PwdVerifier + { + get + { + return _pwdVerifier; + } + } + + /// + /// Returns the 10 byte AUTH CODE to be checked or appended immediately following the AES data stream. + /// + public byte[] GetAuthCode() + { + if (_authCode == null) + { + _authCode = _hmacsha1.GetHashAndReset(); + } + return _authCode; + } + + #region ICryptoTransform Members + + /// + /// Not implemented. + /// + public byte[] TransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) + { + if(inputCount > 0) + { + throw new NotImplementedException("TransformFinalBlock is not implemented and inputCount is greater than 0"); + } + return new byte[0]; + } + + /// + /// Gets the size of the input data blocks in bytes. + /// + public int InputBlockSize + { + get + { + return _blockSize; + } + } + + /// + /// Gets the size of the output data blocks in bytes. + /// + public int OutputBlockSize + { + get + { + return _blockSize; + } + } + + /// + /// Gets a value indicating whether multiple blocks can be transformed. + /// + public bool CanTransformMultipleBlocks + { + get + { + return true; + } + } + + /// + /// Gets a value indicating whether the current transform can be reused. + /// + public bool CanReuseTransform + { + get + { + return true; + } + } + + /// + /// Cleanup internal state. + /// + public void Dispose() + { + _encryptor.Dispose(); + } + + #endregion ICryptoTransform Members + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs.meta new file mode 100644 index 0000000..82379de --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Encryption/ZipAESTransform.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d3b7b6767be03458eb118d7c0b3a35a5 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip.meta new file mode 100644 index 0000000..f4fcf8e --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: f56f5e4889dda422d9bef58f04dfdae2 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs new file mode 100644 index 0000000..e7e4763 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs @@ -0,0 +1,92 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.GZip +{ + using static Zip.Compression.Deflater; + + /// + /// An example class to demonstrate compression and decompression of GZip streams. + /// + public static class GZip + { + /// + /// Decompress the input writing + /// uncompressed data to the output stream + /// + /// The readable stream containing data to decompress. + /// The output stream to receive the decompressed data. + /// Both streams are closed on completion if true. + /// Input or output stream is null + public static void Decompress(Stream inStream, Stream outStream, bool isStreamOwner) + { + if (inStream == null) + throw new ArgumentNullException(nameof(inStream), "Input stream is null"); + + if (outStream == null) + throw new ArgumentNullException(nameof(outStream), "Output stream is null"); + + try + { + using (GZipInputStream gzipInput = new GZipInputStream(inStream)) + { + gzipInput.IsStreamOwner = isStreamOwner; + Core.StreamUtils.Copy(gzipInput, outStream, new byte[4096]); + } + } + finally + { + if (isStreamOwner) + { + // inStream is closed by the GZipInputStream if stream owner + outStream.Dispose(); + } + } + } + + /// + /// Compress the input stream sending + /// result data to output stream + /// + /// The readable stream to compress. + /// The output stream to receive the compressed data. + /// Both streams are closed on completion if true. + /// Deflate buffer size, minimum 512 + /// Deflate compression level, 0-9 + /// Input or output stream is null + /// Buffer Size is smaller than 512 + /// Compression level outside 0-9 + public static void Compress(Stream inStream, Stream outStream, bool isStreamOwner, int bufferSize = 512, int level = 6) + { + if (inStream == null) + throw new ArgumentNullException(nameof(inStream), "Input stream is null"); + + if (outStream == null) + throw new ArgumentNullException(nameof(outStream), "Output stream is null"); + + if (bufferSize < 512) + throw new ArgumentOutOfRangeException(nameof(bufferSize), "Deflate buffer size must be >= 512"); + + if (level < NO_COMPRESSION || level > BEST_COMPRESSION) + throw new ArgumentOutOfRangeException(nameof(level), "Compression level must be 0-9"); + + try + { + using (GZipOutputStream gzipOutput = new GZipOutputStream(outStream, bufferSize)) + { + gzipOutput.SetLevel(level); + gzipOutput.IsStreamOwner = isStreamOwner; + Core.StreamUtils.Copy(inStream, gzipOutput, new byte[bufferSize]); + } + } + finally + { + if (isStreamOwner) + { + // outStream is closed by the GZipOutputStream if stream owner + inStream.Dispose(); + } + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs.meta new file mode 100644 index 0000000..68ab1f8 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZip.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d3c5c0fbd7a064d36a1c5131debd8700 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs new file mode 100644 index 0000000..422cd97 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs @@ -0,0 +1,58 @@ +namespace ICSharpCode.SharpZipLib.GZip +{ + /// + /// This class contains constants used for gzip. + /// + sealed public class GZipConstants + { + /// + /// Magic number found at start of GZIP header + /// + public const int GZIP_MAGIC = 0x1F8B; + + /* The flag byte is divided into individual bits as follows: + + bit 0 FTEXT + bit 1 FHCRC + bit 2 FEXTRA + bit 3 FNAME + bit 4 FCOMMENT + bit 5 reserved + bit 6 reserved + bit 7 reserved + */ + + /// + /// Flag bit mask for text + /// + public const int FTEXT = 0x1; + + /// + /// Flag bitmask for Crc + /// + public const int FHCRC = 0x2; + + /// + /// Flag bit mask for extra + /// + public const int FEXTRA = 0x4; + + /// + /// flag bitmask for name + /// + public const int FNAME = 0x8; + + /// + /// flag bit mask indicating comment is present + /// + public const int FCOMMENT = 0x10; + + /// + /// Initialise default instance. + /// + /// Constructor is private to prevent instances being created. + private GZipConstants() + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs.meta new file mode 100644 index 0000000..81d76d5 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipConstants.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 9e72a999113e346eb8314c5ec74a5139 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs new file mode 100644 index 0000000..a0ec6bb --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs @@ -0,0 +1,54 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.GZip +{ + /// + /// GZipException represents exceptions specific to GZip classes and code. + /// + [Serializable] + public class GZipException : SharpZipBaseException + { + /// + /// Initialise a new instance of . + /// + public GZipException() + { + } + + /// + /// Initialise a new instance of with its message string. + /// + /// A that describes the error. + public GZipException(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of . + /// + /// A that describes the error. + /// The that caused this exception. + public GZipException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the GZipException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected GZipException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs.meta new file mode 100644 index 0000000..9168e6c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GZipException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 2e6d9c4961a214558b7979b5c2db9b5d +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs new file mode 100644 index 0000000..a924a7f --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs @@ -0,0 +1,390 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Zip.Compression; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.GZip +{ + /// + /// This filter stream is used to decompress a "GZIP" format stream. + /// The "GZIP" format is described baseInputStream RFC 1952. + /// + /// author of the original java version : John Leuner + /// + /// This sample shows how to unzip a gzipped file + /// + /// using System; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.Core; + /// using ICSharpCode.SharpZipLib.GZip; + /// + /// class MainClass + /// { + /// public static void Main(string[] args) + /// { + /// using (Stream inStream = new GZipInputStream(File.OpenRead(args[0]))) + /// using (FileStream outStream = File.Create(Path.GetFileNameWithoutExtension(args[0]))) { + /// byte[] buffer = new byte[4096]; + /// StreamUtils.Copy(inStream, outStream, buffer); + /// } + /// } + /// } + /// + /// + public class GZipInputStream : InflaterInputStream + { + #region Instance Fields + + /// + /// CRC-32 value for uncompressed data + /// + protected Crc32 crc; + + /// + /// Flag to indicate if we've read the GZIP header yet for the current member (block of compressed data). + /// This is tracked per-block as the file is parsed. + /// + private bool readGZIPHeader; + + /// + /// Flag to indicate if at least one block in a stream with concatenated blocks was read successfully. + /// This allows us to exit gracefully if downstream data is not in gzip format. + /// + private bool completedLastBlock; + + #endregion Instance Fields + + #region Constructors + + /// + /// Creates a GZipInputStream with the default buffer size + /// + /// + /// The stream to read compressed data from (baseInputStream GZIP format) + /// + public GZipInputStream(Stream baseInputStream) + : this(baseInputStream, 4096) + { + } + + /// + /// Creates a GZIPInputStream with the specified buffer size + /// + /// + /// The stream to read compressed data from (baseInputStream GZIP format) + /// + /// + /// Size of the buffer to use + /// + public GZipInputStream(Stream baseInputStream, int size) + : base(baseInputStream, new Inflater(true), size) + { + } + + #endregion Constructors + + #region Stream overrides + + /// + /// Reads uncompressed data into an array of bytes + /// + /// + /// The buffer to read uncompressed data into + /// + /// + /// The offset indicating where the data should be placed + /// + /// + /// The number of uncompressed bytes to be read + /// + /// Returns the number of bytes actually read. + public override int Read(byte[] buffer, int offset, int count) + { + // A GZIP file can contain multiple blocks of compressed data, although this is quite rare. + // A compressed block could potentially be empty, so we need to loop until we reach EOF or + // we find data. + while (true) + { + // If we haven't read the header for this block, read it + if (!readGZIPHeader) + { + // Try to read header. If there is no header (0 bytes available), this is EOF. If there is + // an incomplete header, this will throw an exception. + try + { + if (!ReadHeader()) + { + return 0; + } + } + catch (Exception ex) when (completedLastBlock && (ex is GZipException || ex is EndOfStreamException)) + { + // if we completed the last block (i.e. we're in a stream that has multiple blocks concatenated + // we want to return gracefully from any header parsing exceptions since sometimes there may + // be trailing garbage on a stream + return 0; + } + } + + // Try to read compressed data + int bytesRead = base.Read(buffer, offset, count); + if (bytesRead > 0) + { + crc.Update(new ArraySegment(buffer, offset, bytesRead)); + } + + // If this is the end of stream, read the footer + if (inf.IsFinished) + { + ReadFooter(); + } + + // Attempting to read 0 bytes will never yield any bytesRead, so we return instead of looping forever + if (bytesRead > 0 || count == 0) + { + return bytesRead; + } + } + } + + #endregion Stream overrides + + #region Support routines + + private bool ReadHeader() + { + // Initialize CRC for this block + crc = new Crc32(); + + // Make sure there is data in file. We can't rely on ReadLeByte() to fill the buffer, as this could be EOF, + // which is fine, but ReadLeByte() throws an exception if it doesn't find data, so we do this part ourselves. + if (inputBuffer.Available <= 0) + { + inputBuffer.Fill(); + if (inputBuffer.Available <= 0) + { + // No header, EOF. + return false; + } + } + + // 1. Check the two magic bytes + var headCRC = new Crc32(); + int magic = inputBuffer.ReadLeByte(); + + if (magic < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + headCRC.Update(magic); + if (magic != (GZipConstants.GZIP_MAGIC >> 8)) + { + throw new GZipException("Error GZIP header, first magic byte doesn't match"); + } + + //magic = baseInputStream.ReadByte(); + magic = inputBuffer.ReadLeByte(); + + if (magic < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + if (magic != (GZipConstants.GZIP_MAGIC & 0xFF)) + { + throw new GZipException("Error GZIP header, second magic byte doesn't match"); + } + + headCRC.Update(magic); + + // 2. Check the compression type (must be 8) + int compressionType = inputBuffer.ReadLeByte(); + + if (compressionType < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + if (compressionType != 8) + { + throw new GZipException("Error GZIP header, data not in deflate format"); + } + headCRC.Update(compressionType); + + // 3. Check the flags + int flags = inputBuffer.ReadLeByte(); + if (flags < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + headCRC.Update(flags); + + /* This flag byte is divided into individual bits as follows: + + bit 0 FTEXT + bit 1 FHCRC + bit 2 FEXTRA + bit 3 FNAME + bit 4 FCOMMENT + bit 5 reserved + bit 6 reserved + bit 7 reserved + */ + + // 3.1 Check the reserved bits are zero + + if ((flags & 0xE0) != 0) + { + throw new GZipException("Reserved flag bits in GZIP header != 0"); + } + + // 4.-6. Skip the modification time, extra flags, and OS type + for (int i = 0; i < 6; i++) + { + int readByte = inputBuffer.ReadLeByte(); + if (readByte < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + headCRC.Update(readByte); + } + + // 7. Read extra field + if ((flags & GZipConstants.FEXTRA) != 0) + { + // XLEN is total length of extra subfields, we will skip them all + int len1, len2; + len1 = inputBuffer.ReadLeByte(); + len2 = inputBuffer.ReadLeByte(); + if ((len1 < 0) || (len2 < 0)) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + headCRC.Update(len1); + headCRC.Update(len2); + + int extraLen = (len2 << 8) | len1; // gzip is LSB first + for (int i = 0; i < extraLen; i++) + { + int readByte = inputBuffer.ReadLeByte(); + if (readByte < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + headCRC.Update(readByte); + } + } + + // 8. Read file name + if ((flags & GZipConstants.FNAME) != 0) + { + int readByte; + while ((readByte = inputBuffer.ReadLeByte()) > 0) + { + headCRC.Update(readByte); + } + + if (readByte < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + headCRC.Update(readByte); + } + + // 9. Read comment + if ((flags & GZipConstants.FCOMMENT) != 0) + { + int readByte; + while ((readByte = inputBuffer.ReadLeByte()) > 0) + { + headCRC.Update(readByte); + } + + if (readByte < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + headCRC.Update(readByte); + } + + // 10. Read header CRC + if ((flags & GZipConstants.FHCRC) != 0) + { + int tempByte; + int crcval = inputBuffer.ReadLeByte(); + if (crcval < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + tempByte = inputBuffer.ReadLeByte(); + if (tempByte < 0) + { + throw new EndOfStreamException("EOS reading GZIP header"); + } + + crcval = (crcval << 8) | tempByte; + if (crcval != ((int)headCRC.Value & 0xffff)) + { + throw new GZipException("Header CRC value mismatch"); + } + } + + readGZIPHeader = true; + return true; + } + + private void ReadFooter() + { + byte[] footer = new byte[8]; + + // End of stream; reclaim all bytes from inf, read the final byte count, and reset the inflator + long bytesRead = inf.TotalOut & 0xffffffff; + inputBuffer.Available += inf.RemainingInput; + inf.Reset(); + + // Read footer from inputBuffer + int needed = 8; + while (needed > 0) + { + int count = inputBuffer.ReadClearTextBuffer(footer, 8 - needed, needed); + if (count <= 0) + { + throw new EndOfStreamException("EOS reading GZIP footer"); + } + needed -= count; // Jewel Jan 16 + } + + // Calculate CRC + int crcval = (footer[0] & 0xff) | ((footer[1] & 0xff) << 8) | ((footer[2] & 0xff) << 16) | (footer[3] << 24); + if (crcval != (int)crc.Value) + { + throw new GZipException("GZIP crc sum mismatch, theirs \"" + crcval + "\" and ours \"" + (int)crc.Value); + } + + // NOTE The total here is the original total modulo 2 ^ 32. + uint total = + (uint)((uint)footer[4] & 0xff) | + (uint)(((uint)footer[5] & 0xff) << 8) | + (uint)(((uint)footer[6] & 0xff) << 16) | + (uint)((uint)footer[7] << 24); + + if (bytesRead != total) + { + throw new GZipException("Number of bytes mismatch in footer"); + } + + // Mark header read as false so if another header exists, we'll continue reading through the file + readGZIPHeader = false; + + // Indicate that we succeeded on at least one block so we can exit gracefully if there is trailing garbage downstream + completedLastBlock = true; + } + + #endregion Support routines + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs.meta new file mode 100644 index 0000000..d128c0d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipInputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 3c20f611c3d6641e48e5b9b04bf4b4e2 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs new file mode 100644 index 0000000..afa43d7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs @@ -0,0 +1,254 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Zip.Compression; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.GZip +{ + /// + /// This filter stream is used to compress a stream into a "GZIP" stream. + /// The "GZIP" format is described in RFC 1952. + /// + /// author of the original java version : John Leuner + /// + /// This sample shows how to gzip a file + /// + /// using System; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.GZip; + /// using ICSharpCode.SharpZipLib.Core; + /// + /// class MainClass + /// { + /// public static void Main(string[] args) + /// { + /// using (Stream s = new GZipOutputStream(File.Create(args[0] + ".gz"))) + /// using (FileStream fs = File.OpenRead(args[0])) { + /// byte[] writeData = new byte[4096]; + /// Streamutils.Copy(s, fs, writeData); + /// } + /// } + /// } + /// } + /// + /// + public class GZipOutputStream : DeflaterOutputStream + { + private enum OutputState + { + Header, + Footer, + Finished, + Closed, + }; + + #region Instance Fields + + /// + /// CRC-32 value for uncompressed data + /// + protected Crc32 crc = new Crc32(); + + private OutputState state_ = OutputState.Header; + + #endregion Instance Fields + + #region Constructors + + /// + /// Creates a GzipOutputStream with the default buffer size + /// + /// + /// The stream to read data (to be compressed) from + /// + public GZipOutputStream(Stream baseOutputStream) + : this(baseOutputStream, 4096) + { + } + + /// + /// Creates a GZipOutputStream with the specified buffer size + /// + /// + /// The stream to read data (to be compressed) from + /// + /// + /// Size of the buffer to use + /// + public GZipOutputStream(Stream baseOutputStream, int size) : base(baseOutputStream, new Deflater(Deflater.DEFAULT_COMPRESSION, true), size) + { + } + + #endregion Constructors + + #region Public API + + /// + /// Sets the active compression level (0-9). The new level will be activated + /// immediately. + /// + /// The compression level to set. + /// + /// Level specified is not supported. + /// + /// + public void SetLevel(int level) + { + if (level < Deflater.NO_COMPRESSION || level > Deflater.BEST_COMPRESSION) + throw new ArgumentOutOfRangeException(nameof(level), "Compression level must be 0-9"); + + deflater_.SetLevel(level); + } + + /// + /// Get the current compression level. + /// + /// The current compression level. + public int GetLevel() + { + return deflater_.GetLevel(); + } + + #endregion Public API + + #region Stream overrides + + /// + /// Write given buffer to output updating crc + /// + /// Buffer to write + /// Offset of first byte in buf to write + /// Number of bytes to write + public override void Write(byte[] buffer, int offset, int count) + { + if (state_ == OutputState.Header) + { + WriteHeader(); + } + + if (state_ != OutputState.Footer) + { + throw new InvalidOperationException("Write not permitted in current state"); + } + + crc.Update(new ArraySegment(buffer, offset, count)); + base.Write(buffer, offset, count); + } + + /// + /// Writes remaining compressed output data to the output stream + /// and closes it. + /// + protected override void Dispose(bool disposing) + { + try + { + Finish(); + } + finally + { + if (state_ != OutputState.Closed) + { + state_ = OutputState.Closed; + if (IsStreamOwner) + { + baseOutputStream_.Dispose(); + } + } + } + } + + /// + /// Flushes the stream by ensuring the header is written, and then calling Flush + /// on the deflater. + /// + public override void Flush() + { + if (state_ == OutputState.Header) + { + WriteHeader(); + } + + base.Flush(); + } + + #endregion Stream overrides + + #region DeflaterOutputStream overrides + + /// + /// Finish compression and write any footer information required to stream + /// + public override void Finish() + { + // If no data has been written a header should be added. + if (state_ == OutputState.Header) + { + WriteHeader(); + } + + if (state_ == OutputState.Footer) + { + state_ = OutputState.Finished; + base.Finish(); + + var totalin = (uint)(deflater_.TotalIn & 0xffffffff); + var crcval = (uint)(crc.Value & 0xffffffff); + + byte[] gzipFooter; + + unchecked + { + gzipFooter = new byte[] { + (byte) crcval, (byte) (crcval >> 8), + (byte) (crcval >> 16), (byte) (crcval >> 24), + + (byte) totalin, (byte) (totalin >> 8), + (byte) (totalin >> 16), (byte) (totalin >> 24) + }; + } + + baseOutputStream_.Write(gzipFooter, 0, gzipFooter.Length); + } + } + + #endregion DeflaterOutputStream overrides + + #region Support Routines + + private void WriteHeader() + { + if (state_ == OutputState.Header) + { + state_ = OutputState.Footer; + + var mod_time = (int)((DateTime.Now.Ticks - new DateTime(1970, 1, 1).Ticks) / 10000000L); // Ticks give back 100ns intervals + byte[] gzipHeader = { + // The two magic bytes + (byte) (GZipConstants.GZIP_MAGIC >> 8), (byte) (GZipConstants.GZIP_MAGIC & 0xff), + + // The compression type + (byte) Deflater.DEFLATED, + + // The flags (not set) + 0, + + // The modification time + (byte) mod_time, (byte) (mod_time >> 8), + (byte) (mod_time >> 16), (byte) (mod_time >> 24), + + // The extra flags + 0, + + // The OS type (unknown) + (byte) 255 + }; + baseOutputStream_.Write(gzipHeader, 0, gzipHeader.Length); + } + } + + #endregion Support Routines + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs.meta new file mode 100644 index 0000000..f3f0171 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/GZip/GzipOutputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 9aa7276442bb84833b6a8984e9afa15f +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj new file mode 100644 index 0000000..ea7bdf7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj @@ -0,0 +1,37 @@ + + + + netstandard2;net45 + True + ICSharpCode.SharpZipLib.snk + true + true + $(AllowedOutputExtensionsInPackageBuildOutputFolder);.pdb + + + + + 1.2.0.7 + 1.2.0.7 + 1.2.0 + SharpZipLib + ICSharpCode + ICSharpCode + SharpZipLib (#ziplib, formerly NZipLib) is a compression library for Zip, GZip, BZip2, and Tar written entirely in C# for .NET. It is implemented as an assembly (installable in the GAC), and thus can easily be incorporated into other projects (in any .NET language) + MIT + http://icsharpcode.github.io/SharpZipLib/ + http://icsharpcode.github.io/SharpZipLib/assets/sharpziplib-nuget-256x256.png + https://github.com/icsharpcode/SharpZipLib + Copyright © 2000-2019 SharpZipLib Contributors + Compression Library Zip GZip BZip2 LZW Tar + en-US + +Please see https://github.com/icsharpcode/SharpZipLib/wiki/Release-1.2 for more information. + https://github.com/icsharpcode/SharpZipLib + + + + + + + diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj.meta new file mode 100644 index 0000000..40dd4b3 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.csproj.meta @@ -0,0 +1,7 @@ +fileFormatVersion: 2 +guid: 190a813700a724676bc36f7718fe79b4 +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk new file mode 100644 index 0000000..58cf194 Binary files /dev/null and b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk differ diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk.meta new file mode 100644 index 0000000..8393e03 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/ICSharpCode.SharpZipLib.snk.meta @@ -0,0 +1,7 @@ +fileFormatVersion: 2 +guid: c1f74acd797f747a9890e0013aa6eeac +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw.meta new file mode 100644 index 0000000..5ce2e64 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 6ca7c9265926345529876bd132d2246a +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs new file mode 100644 index 0000000..b7dc60f --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs @@ -0,0 +1,62 @@ +namespace ICSharpCode.SharpZipLib.Lzw +{ + /// + /// This class contains constants used for LZW + /// + sealed public class LzwConstants + { + /// + /// Magic number found at start of LZW header: 0x1f 0x9d + /// + public const int MAGIC = 0x1f9d; + + /// + /// Maximum number of bits per code + /// + public const int MAX_BITS = 16; + + /* 3rd header byte: + * bit 0..4 Number of compression bits + * bit 5 Extended header + * bit 6 Free + * bit 7 Block mode + */ + + /// + /// Mask for 'number of compression bits' + /// + public const int BIT_MASK = 0x1f; + + /// + /// Indicates the presence of a fourth header byte + /// + public const int EXTENDED_MASK = 0x20; + + //public const int FREE_MASK = 0x40; + + /// + /// Reserved bits + /// + public const int RESERVED_MASK = 0x60; + + /// + /// Block compression: if table is full and compression rate is dropping, + /// clear the dictionary. + /// + public const int BLOCK_MODE_MASK = 0x80; + + /// + /// LZW file header size (in bytes) + /// + public const int HDR_SIZE = 3; + + /// + /// Initial number of bits per code + /// + public const int INIT_BITS = 9; + + private LzwConstants() + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs.meta new file mode 100644 index 0000000..31f4266 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwConstants.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 09848a95c5cea4dc09fe51aa46c5396d +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs new file mode 100644 index 0000000..1d5c44c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs @@ -0,0 +1,54 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.Lzw +{ + /// + /// LzwException represents exceptions specific to LZW classes and code. + /// + [Serializable] + public class LzwException : SharpZipBaseException + { + /// + /// Initialise a new instance of . + /// + public LzwException() + { + } + + /// + /// Initialise a new instance of with its message string. + /// + /// A that describes the error. + public LzwException(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of . + /// + /// A that describes the error. + /// The that caused this exception. + public LzwException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the LzwException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected LzwException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs.meta new file mode 100644 index 0000000..c5beb0c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 4b4691cc787d54e4b994b076ae866245 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs new file mode 100644 index 0000000..1045ef7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs @@ -0,0 +1,572 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Lzw +{ + /// + /// This filter stream is used to decompress a LZW format stream. + /// Specifically, a stream that uses the LZC compression method. + /// This file format is usually associated with the .Z file extension. + /// + /// See http://en.wikipedia.org/wiki/Compress + /// See http://wiki.wxwidgets.org/Development:_Z_File_Format + /// + /// The file header consists of 3 (or optionally 4) bytes. The first two bytes + /// contain the magic marker "0x1f 0x9d", followed by a byte of flags. + /// + /// Based on Java code by Ronald Tschalar, which in turn was based on the unlzw.c + /// code in the gzip package. + /// + /// This sample shows how to unzip a compressed file + /// + /// using System; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.Core; + /// using ICSharpCode.SharpZipLib.LZW; + /// + /// class MainClass + /// { + /// public static void Main(string[] args) + /// { + /// using (Stream inStream = new LzwInputStream(File.OpenRead(args[0]))) + /// using (FileStream outStream = File.Create(Path.GetFileNameWithoutExtension(args[0]))) { + /// byte[] buffer = new byte[4096]; + /// StreamUtils.Copy(inStream, outStream, buffer); + /// // OR + /// inStream.Read(buffer, 0, buffer.Length); + /// // now do something with the buffer + /// } + /// } + /// } + /// + /// + public class LzwInputStream : Stream + { + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = true; + + /// + /// Creates a LzwInputStream + /// + /// + /// The stream to read compressed data from (baseInputStream LZW format) + /// + public LzwInputStream(Stream baseInputStream) + { + this.baseInputStream = baseInputStream; + } + + /// + /// See + /// + /// + public override int ReadByte() + { + int b = Read(one, 0, 1); + if (b == 1) + return (one[0] & 0xff); + return -1; + } + + /// + /// Reads decompressed data into the provided buffer byte array + /// + /// + /// The array to read and decompress data into + /// + /// + /// The offset indicating where the data should be placed + /// + /// + /// The number of bytes to decompress + /// + /// The number of bytes read. Zero signals the end of stream + public override int Read(byte[] buffer, int offset, int count) + { + if (!headerParsed) + ParseHeader(); + + if (eof) + return 0; + + int start = offset; + + /* Using local copies of various variables speeds things up by as + * much as 30% in Java! Performance not tested in C#. + */ + int[] lTabPrefix = tabPrefix; + byte[] lTabSuffix = tabSuffix; + byte[] lStack = stack; + int lNBits = nBits; + int lMaxCode = maxCode; + int lMaxMaxCode = maxMaxCode; + int lBitMask = bitMask; + int lOldCode = oldCode; + byte lFinChar = finChar; + int lStackP = stackP; + int lFreeEnt = freeEnt; + byte[] lData = data; + int lBitPos = bitPos; + + // empty stack if stuff still left + int sSize = lStack.Length - lStackP; + if (sSize > 0) + { + int num = (sSize >= count) ? count : sSize; + Array.Copy(lStack, lStackP, buffer, offset, num); + offset += num; + count -= num; + lStackP += num; + } + + if (count == 0) + { + stackP = lStackP; + return offset - start; + } + + // loop, filling local buffer until enough data has been decompressed + MainLoop: + do + { + if (end < EXTRA) + { + Fill(); + } + + int bitIn = (got > 0) ? (end - end % lNBits) << 3 : + (end << 3) - (lNBits - 1); + + while (lBitPos < bitIn) + { + #region A + + // handle 1-byte reads correctly + if (count == 0) + { + nBits = lNBits; + maxCode = lMaxCode; + maxMaxCode = lMaxMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; + + return offset - start; + } + + // check for code-width expansion + if (lFreeEnt > lMaxCode) + { + int nBytes = lNBits << 3; + lBitPos = (lBitPos - 1) + + nBytes - (lBitPos - 1 + nBytes) % nBytes; + + lNBits++; + lMaxCode = (lNBits == maxBits) ? lMaxMaxCode : + (1 << lNBits) - 1; + + lBitMask = (1 << lNBits) - 1; + lBitPos = ResetBuf(lBitPos); + goto MainLoop; + } + + #endregion A + + #region B + + // read next code + int pos = lBitPos >> 3; + int code = (((lData[pos] & 0xFF) | + ((lData[pos + 1] & 0xFF) << 8) | + ((lData[pos + 2] & 0xFF) << 16)) >> + (lBitPos & 0x7)) & lBitMask; + + lBitPos += lNBits; + + // handle first iteration + if (lOldCode == -1) + { + if (code >= 256) + throw new LzwException("corrupt input: " + code + " > 255"); + + lFinChar = (byte)(lOldCode = code); + buffer[offset++] = lFinChar; + count--; + continue; + } + + // handle CLEAR code + if (code == TBL_CLEAR && blockMode) + { + Array.Copy(zeros, 0, lTabPrefix, 0, zeros.Length); + lFreeEnt = TBL_FIRST - 1; + + int nBytes = lNBits << 3; + lBitPos = (lBitPos - 1) + nBytes - (lBitPos - 1 + nBytes) % nBytes; + lNBits = LzwConstants.INIT_BITS; + lMaxCode = (1 << lNBits) - 1; + lBitMask = lMaxCode; + + // Code tables reset + + lBitPos = ResetBuf(lBitPos); + goto MainLoop; + } + + #endregion B + + #region C + + // setup + int inCode = code; + lStackP = lStack.Length; + + // Handle KwK case + if (code >= lFreeEnt) + { + if (code > lFreeEnt) + { + throw new LzwException("corrupt input: code=" + code + + ", freeEnt=" + lFreeEnt); + } + + lStack[--lStackP] = lFinChar; + code = lOldCode; + } + + // Generate output characters in reverse order + while (code >= 256) + { + lStack[--lStackP] = lTabSuffix[code]; + code = lTabPrefix[code]; + } + + lFinChar = lTabSuffix[code]; + buffer[offset++] = lFinChar; + count--; + + // And put them out in forward order + sSize = lStack.Length - lStackP; + int num = (sSize >= count) ? count : sSize; + Array.Copy(lStack, lStackP, buffer, offset, num); + offset += num; + count -= num; + lStackP += num; + + #endregion C + + #region D + + // generate new entry in table + if (lFreeEnt < lMaxMaxCode) + { + lTabPrefix[lFreeEnt] = lOldCode; + lTabSuffix[lFreeEnt] = lFinChar; + lFreeEnt++; + } + + // Remember previous code + lOldCode = inCode; + + // if output buffer full, then return + if (count == 0) + { + nBits = lNBits; + maxCode = lMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; + + return offset - start; + } + + #endregion D + } // while + + lBitPos = ResetBuf(lBitPos); + } while (got > 0); // do..while + + nBits = lNBits; + maxCode = lMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; + + eof = true; + return offset - start; + } + + /// + /// Moves the unread data in the buffer to the beginning and resets + /// the pointers. + /// + /// + /// + private int ResetBuf(int bitPosition) + { + int pos = bitPosition >> 3; + Array.Copy(data, pos, data, 0, end - pos); + end -= pos; + return 0; + } + + private void Fill() + { + got = baseInputStream.Read(data, end, data.Length - 1 - end); + if (got > 0) + { + end += got; + } + } + + private void ParseHeader() + { + headerParsed = true; + + byte[] hdr = new byte[LzwConstants.HDR_SIZE]; + + int result = baseInputStream.Read(hdr, 0, hdr.Length); + + // Check the magic marker + if (result < 0) + throw new LzwException("Failed to read LZW header"); + + if (hdr[0] != (LzwConstants.MAGIC >> 8) || hdr[1] != (LzwConstants.MAGIC & 0xff)) + { + throw new LzwException(String.Format( + "Wrong LZW header. Magic bytes don't match. 0x{0:x2} 0x{1:x2}", + hdr[0], hdr[1])); + } + + // Check the 3rd header byte + blockMode = (hdr[2] & LzwConstants.BLOCK_MODE_MASK) > 0; + maxBits = hdr[2] & LzwConstants.BIT_MASK; + + if (maxBits > LzwConstants.MAX_BITS) + { + throw new LzwException("Stream compressed with " + maxBits + + " bits, but decompression can only handle " + + LzwConstants.MAX_BITS + " bits."); + } + + if ((hdr[2] & LzwConstants.RESERVED_MASK) > 0) + { + throw new LzwException("Unsupported bits set in the header."); + } + + // Initialize variables + maxMaxCode = 1 << maxBits; + nBits = LzwConstants.INIT_BITS; + maxCode = (1 << nBits) - 1; + bitMask = maxCode; + oldCode = -1; + finChar = 0; + freeEnt = blockMode ? TBL_FIRST : 256; + + tabPrefix = new int[1 << maxBits]; + tabSuffix = new byte[1 << maxBits]; + stack = new byte[1 << maxBits]; + stackP = stack.Length; + + for (int idx = 255; idx >= 0; idx--) + tabSuffix[idx] = (byte)idx; + } + + #region Stream Overrides + + /// + /// Gets a value indicating whether the current stream supports reading + /// + public override bool CanRead + { + get + { + return baseInputStream.CanRead; + } + } + + /// + /// Gets a value of false indicating seeking is not supported for this stream. + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Gets a value of false indicating that this stream is not writeable. + /// + public override bool CanWrite + { + get + { + return false; + } + } + + /// + /// A value representing the length of the stream in bytes. + /// + public override long Length + { + get + { + return got; + } + } + + /// + /// The current position within the stream. + /// Throws a NotSupportedException when attempting to set the position + /// + /// Attempting to set the position + public override long Position + { + get + { + return baseInputStream.Position; + } + set + { + throw new NotSupportedException("InflaterInputStream Position not supported"); + } + } + + /// + /// Flushes the baseInputStream + /// + public override void Flush() + { + baseInputStream.Flush(); + } + + /// + /// Sets the position within the current stream + /// Always throws a NotSupportedException + /// + /// The relative offset to seek to. + /// The defining where to seek from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Seek not supported"); + } + + /// + /// Set the length of the current stream + /// Always throws a NotSupportedException + /// + /// The new length value for the stream. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("InflaterInputStream SetLength not supported"); + } + + /// + /// Writes a sequence of bytes to stream and advances the current position + /// This method always throws a NotSupportedException + /// + /// The buffer containing data to write. + /// The offset of the first byte to write. + /// The number of bytes to write. + /// Any access + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("InflaterInputStream Write not supported"); + } + + /// + /// Writes one byte to the current stream and advances the current position + /// Always throws a NotSupportedException + /// + /// The byte to write. + /// Any access + public override void WriteByte(byte value) + { + throw new NotSupportedException("InflaterInputStream WriteByte not supported"); + } + + /// + /// Closes the input stream. When + /// is true the underlying stream is also closed. + /// + protected override void Dispose(bool disposing) + { + if (!isClosed) + { + isClosed = true; + if (IsStreamOwner) + { + baseInputStream.Dispose(); + } + } + } + + #endregion Stream Overrides + + #region Instance Fields + + private Stream baseInputStream; + + /// + /// Flag indicating wether this instance has been closed or not. + /// + private bool isClosed; + + private readonly byte[] one = new byte[1]; + private bool headerParsed; + + // string table stuff + private const int TBL_CLEAR = 0x100; + + private const int TBL_FIRST = TBL_CLEAR + 1; + + private int[] tabPrefix; + private byte[] tabSuffix; + private readonly int[] zeros = new int[256]; + private byte[] stack; + + // various state + private bool blockMode; + + private int nBits; + private int maxBits; + private int maxMaxCode; + private int maxCode; + private int bitMask; + private int oldCode; + private byte finChar; + private int stackP; + private int freeEnt; + + // input buffer + private readonly byte[] data = new byte[1024 * 8]; + + private int bitPos; + private int end; + private int got; + private bool eof; + private const int EXTRA = 64; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs.meta new file mode 100644 index 0000000..0fb8824 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Lzw/LzwInputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: afdbe7bd422e24220bb3126a0e5753d6 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar.meta new file mode 100644 index 0000000..4803cd4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: a66aab87c6971486cb116235c9eb1882 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs new file mode 100644 index 0000000..9f385e4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs @@ -0,0 +1,55 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// This exception is used to indicate that there is a problem + /// with a TAR archive header. + /// + [Serializable] + public class InvalidHeaderException : TarException + { + /// + /// Initialise a new instance of the InvalidHeaderException class. + /// + public InvalidHeaderException() + { + } + + /// + /// Initialises a new instance of the InvalidHeaderException class with a specified message. + /// + /// Message describing the exception cause. + public InvalidHeaderException(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of InvalidHeaderException + /// + /// Message describing the problem. + /// The exception that is the cause of the current exception. + public InvalidHeaderException(string message, Exception exception) + : base(message, exception) + { + } + + /// + /// Initializes a new instance of the InvalidHeaderException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected InvalidHeaderException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs.meta new file mode 100644 index 0000000..8b6ee56 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/InvalidHeaderException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 3d6e8e1ea62094d63bb5529adf69b007 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs new file mode 100644 index 0000000..562480a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs @@ -0,0 +1,1007 @@ +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// Used to advise clients of 'events' while processing archives + /// + public delegate void ProgressMessageHandler(TarArchive archive, TarEntry entry, string message); + + /// + /// The TarArchive class implements the concept of a + /// 'Tape Archive'. A tar archive is a series of entries, each of + /// which represents a file system object. Each entry in + /// the archive consists of a header block followed by 0 or more data blocks. + /// Directory entries consist only of the header block, and are followed by entries + /// for the directory's contents. File entries consist of a + /// header followed by the number of blocks needed to + /// contain the file's contents. All entries are written on + /// block boundaries. Blocks are 512 bytes long. + /// + /// TarArchives are instantiated in either read or write mode, + /// based upon whether they are instantiated with an InputStream + /// or an OutputStream. Once instantiated TarArchives read/write + /// mode can not be changed. + /// + /// There is currently no support for random access to tar archives. + /// However, it seems that subclassing TarArchive, and using the + /// TarBuffer.CurrentRecord and TarBuffer.CurrentBlock + /// properties, this would be rather trivial. + /// + public class TarArchive : IDisposable + { + /// + /// Client hook allowing detailed information to be reported during processing + /// + public event ProgressMessageHandler ProgressMessageEvent; + + /// + /// Raises the ProgressMessage event + /// + /// The TarEntry for this event + /// message for this event. Null is no message + protected virtual void OnProgressMessageEvent(TarEntry entry, string message) + { + ProgressMessageHandler handler = ProgressMessageEvent; + if (handler != null) + { + handler(this, entry, message); + } + } + + #region Constructors + + /// + /// Constructor for a default . + /// + protected TarArchive() + { + } + + /// + /// Initialise a TarArchive for input. + /// + /// The to use for input. + protected TarArchive(TarInputStream stream) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + tarIn = stream; + } + + /// + /// Initialise a TarArchive for output. + /// + /// The to use for output. + protected TarArchive(TarOutputStream stream) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + tarOut = stream; + } + + #endregion Constructors + + #region Static factory methods + + /// + /// The InputStream based constructors create a TarArchive for the + /// purposes of extracting or listing a tar archive. Thus, use + /// these constructors when you wish to extract files from or list + /// the contents of an existing tar archive. + /// + /// The stream to retrieve archive data from. + /// Returns a new suitable for reading from. + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public static TarArchive CreateInputTarArchive(Stream inputStream) + { + return CreateInputTarArchive(inputStream, null); + } + + /// + /// The InputStream based constructors create a TarArchive for the + /// purposes of extracting or listing a tar archive. Thus, use + /// these constructors when you wish to extract files from or list + /// the contents of an existing tar archive. + /// + /// The stream to retrieve archive data from. + /// The used for the Name fields, or null for ASCII only + /// Returns a new suitable for reading from. + public static TarArchive CreateInputTarArchive(Stream inputStream, Encoding nameEncoding) + { + if (inputStream == null) + { + throw new ArgumentNullException(nameof(inputStream)); + } + + var tarStream = inputStream as TarInputStream; + + TarArchive result; + if (tarStream != null) + { + result = new TarArchive(tarStream); + } + else + { + result = CreateInputTarArchive(inputStream, TarBuffer.DefaultBlockFactor, nameEncoding); + } + return result; + } + + /// + /// Create TarArchive for reading setting block factor + /// + /// A stream containing the tar archive contents + /// The blocking factor to apply + /// Returns a suitable for reading. + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public static TarArchive CreateInputTarArchive(Stream inputStream, int blockFactor) + { + return CreateInputTarArchive(inputStream, blockFactor, null); + } + + /// + /// Create TarArchive for reading setting block factor + /// + /// A stream containing the tar archive contents + /// The blocking factor to apply + /// The used for the Name fields, or null for ASCII only + /// Returns a suitable for reading. + public static TarArchive CreateInputTarArchive(Stream inputStream, int blockFactor, Encoding nameEncoding) + { + if (inputStream == null) + { + throw new ArgumentNullException(nameof(inputStream)); + } + + if (inputStream is TarInputStream) + { + throw new ArgumentException("TarInputStream not valid"); + } + + return new TarArchive(new TarInputStream(inputStream, blockFactor, nameEncoding)); + } + /// + /// Create a TarArchive for writing to, using the default blocking factor + /// + /// The to write to + /// The used for the Name fields, or null for ASCII only + /// Returns a suitable for writing. + public static TarArchive CreateOutputTarArchive(Stream outputStream, Encoding nameEncoding) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + var tarStream = outputStream as TarOutputStream; + + TarArchive result; + if (tarStream != null) + { + result = new TarArchive(tarStream); + } + else + { + result = CreateOutputTarArchive(outputStream, TarBuffer.DefaultBlockFactor, nameEncoding); + } + return result; + } + /// + /// Create a TarArchive for writing to, using the default blocking factor + /// + /// The to write to + /// Returns a suitable for writing. + public static TarArchive CreateOutputTarArchive(Stream outputStream) + { + return CreateOutputTarArchive(outputStream, null); + } + + /// + /// Create a tar archive for writing. + /// + /// The stream to write to + /// The blocking factor to use for buffering. + /// Returns a suitable for writing. + public static TarArchive CreateOutputTarArchive(Stream outputStream, int blockFactor) + { + return CreateOutputTarArchive(outputStream, blockFactor, null); + } + /// + /// Create a tar archive for writing. + /// + /// The stream to write to + /// The blocking factor to use for buffering. + /// The used for the Name fields, or null for ASCII only + /// Returns a suitable for writing. + public static TarArchive CreateOutputTarArchive(Stream outputStream, int blockFactor, Encoding nameEncoding) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + if (outputStream is TarOutputStream) + { + throw new ArgumentException("TarOutputStream is not valid"); + } + + return new TarArchive(new TarOutputStream(outputStream, blockFactor, nameEncoding)); + } + + #endregion Static factory methods + + /// + /// Set the flag that determines whether existing files are + /// kept, or overwritten during extraction. + /// + /// + /// If true, do not overwrite existing files. + /// + public void SetKeepOldFiles(bool keepExistingFiles) + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + keepOldFiles = keepExistingFiles; + } + + /// + /// Get/set the ascii file translation flag. If ascii file translation + /// is true, then the file is checked to see if it a binary file or not. + /// If the flag is true and the test indicates it is ascii text + /// file, it will be translated. The translation converts the local + /// operating system's concept of line ends into the UNIX line end, + /// '\n', which is the defacto standard for a TAR archive. This makes + /// text files compatible with UNIX. + /// + public bool AsciiTranslate + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return asciiTranslate; + } + + set + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + asciiTranslate = value; + } + } + + /// + /// Set the ascii file translation flag. + /// + /// + /// If true, translate ascii text files. + /// + [Obsolete("Use the AsciiTranslate property")] + public void SetAsciiTranslation(bool translateAsciiFiles) + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + asciiTranslate = translateAsciiFiles; + } + + /// + /// PathPrefix is added to entry names as they are written if the value is not null. + /// A slash character is appended after PathPrefix + /// + public string PathPrefix + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return pathPrefix; + } + + set + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + pathPrefix = value; + } + } + + /// + /// RootPath is removed from entry names if it is found at the + /// beginning of the name. + /// + public string RootPath + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return rootPath; + } + + set + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + // Convert to forward slashes for matching. Trim trailing / for correct final path + rootPath = value.Replace('\\', '/').TrimEnd('/'); + } + } + + /// + /// Set user and group information that will be used to fill in the + /// tar archive's entry headers. This information is based on that available + /// for the linux operating system, which is not always available on other + /// operating systems. TarArchive allows the programmer to specify values + /// to be used in their place. + /// is set to true by this call. + /// + /// + /// The user id to use in the headers. + /// + /// + /// The user name to use in the headers. + /// + /// + /// The group id to use in the headers. + /// + /// + /// The group name to use in the headers. + /// + public void SetUserInfo(int userId, string userName, int groupId, string groupName) + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + this.userId = userId; + this.userName = userName; + this.groupId = groupId; + this.groupName = groupName; + applyUserInfoOverrides = true; + } + + /// + /// Get or set a value indicating if overrides defined by SetUserInfo should be applied. + /// + /// If overrides are not applied then the values as set in each header will be used. + public bool ApplyUserInfoOverrides + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return applyUserInfoOverrides; + } + + set + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + applyUserInfoOverrides = value; + } + } + + /// + /// Get the archive user id. + /// See ApplyUserInfoOverrides for detail + /// on how to allow setting values on a per entry basis. + /// + /// + /// The current user id. + /// + public int UserId + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return userId; + } + } + + /// + /// Get the archive user name. + /// See ApplyUserInfoOverrides for detail + /// on how to allow setting values on a per entry basis. + /// + /// + /// The current user name. + /// + public string UserName + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return userName; + } + } + + /// + /// Get the archive group id. + /// See ApplyUserInfoOverrides for detail + /// on how to allow setting values on a per entry basis. + /// + /// + /// The current group id. + /// + public int GroupId + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return groupId; + } + } + + /// + /// Get the archive group name. + /// See ApplyUserInfoOverrides for detail + /// on how to allow setting values on a per entry basis. + /// + /// + /// The current group name. + /// + public string GroupName + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + return groupName; + } + } + + /// + /// Get the archive's record size. Tar archives are composed of + /// a series of RECORDS each containing a number of BLOCKS. + /// This allowed tar archives to match the IO characteristics of + /// the physical device being used. Archives are expected + /// to be properly "blocked". + /// + /// + /// The record size this archive is using. + /// + public int RecordSize + { + get + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + if (tarIn != null) + { + return tarIn.RecordSize; + } + else if (tarOut != null) + { + return tarOut.RecordSize; + } + return TarBuffer.DefaultRecordSize; + } + } + + /// + /// Sets the IsStreamOwner property on the underlying stream. + /// Set this to false to prevent the Close of the TarArchive from closing the stream. + /// + public bool IsStreamOwner + { + set + { + if (tarIn != null) + { + tarIn.IsStreamOwner = value; + } + else + { + tarOut.IsStreamOwner = value; + } + } + } + + /// + /// Close the archive. + /// + [Obsolete("Use Close instead")] + public void CloseArchive() + { + Close(); + } + + /// + /// Perform the "list" command for the archive contents. + /// + /// NOTE That this method uses the progress event to actually list + /// the contents. If the progress display event is not set, nothing will be listed! + /// + public void ListContents() + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + while (true) + { + TarEntry entry = tarIn.GetNextEntry(); + + if (entry == null) + { + break; + } + OnProgressMessageEvent(entry, null); + } + } + + /// + /// Perform the "extract" command and extract the contents of the archive. + /// + /// + /// The destination directory into which to extract. + /// + public void ExtractContents(string destinationDirectory) + { + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + while (true) + { + TarEntry entry = tarIn.GetNextEntry(); + + if (entry == null) + { + break; + } + + if (entry.TarHeader.TypeFlag == TarHeader.LF_LINK || entry.TarHeader.TypeFlag == TarHeader.LF_SYMLINK) + continue; + + ExtractEntry(destinationDirectory, entry); + } + } + + /// + /// Extract an entry from the archive. This method assumes that the + /// tarIn stream has been properly set with a call to GetNextEntry(). + /// + /// + /// The destination directory into which to extract. + /// + /// + /// The TarEntry returned by tarIn.GetNextEntry(). + /// + private void ExtractEntry(string destDir, TarEntry entry) + { + OnProgressMessageEvent(entry, null); + + string name = entry.Name; + + if (Path.IsPathRooted(name)) + { + // NOTE: + // for UNC names... \\machine\share\zoom\beet.txt gives \zoom\beet.txt + name = name.Substring(Path.GetPathRoot(name).Length); + } + + name = name.Replace('/', Path.DirectorySeparatorChar); + + string destFile = Path.Combine(destDir, name); + + if (entry.IsDirectory) + { + EnsureDirectoryExists(destFile); + } + else + { + string parentDirectory = Path.GetDirectoryName(destFile); + EnsureDirectoryExists(parentDirectory); + + bool process = true; + var fileInfo = new FileInfo(destFile); + if (fileInfo.Exists) + { + if (keepOldFiles) + { + OnProgressMessageEvent(entry, "Destination file already exists"); + process = false; + } + else if ((fileInfo.Attributes & FileAttributes.ReadOnly) != 0) + { + OnProgressMessageEvent(entry, "Destination file already exists, and is read-only"); + process = false; + } + } + + if (process) + { + using (var outputStream = File.Create(destFile)) + { + if (this.asciiTranslate) + { + // May need to translate the file. + ExtractAndTranslateEntry(destFile, outputStream); + } + else + { + // If translation is disabled, just copy the entry across directly. + tarIn.CopyEntryContents(outputStream); + } + } + } + } + } + + // Extract a TAR entry, and perform an ASCII translation if required. + private void ExtractAndTranslateEntry(string destFile, Stream outputStream) + { + bool asciiTrans = !IsBinary(destFile); + + if (asciiTrans) + { + using (var outw = new StreamWriter(outputStream, new UTF8Encoding(false), 1024, true)) + { + byte[] rdbuf = new byte[32 * 1024]; + + while (true) + { + int numRead = tarIn.Read(rdbuf, 0, rdbuf.Length); + + if (numRead <= 0) + { + break; + } + + for (int off = 0, b = 0; b < numRead; ++b) + { + if (rdbuf[b] == 10) + { + string s = Encoding.ASCII.GetString(rdbuf, off, (b - off)); + outw.WriteLine(s); + off = b + 1; + } + } + } + } + } + else + { + // No translation required. + tarIn.CopyEntryContents(outputStream); + } + } + + /// + /// Write an entry to the archive. This method will call the putNextEntry + /// and then write the contents of the entry, and finally call closeEntry() + /// for entries that are files. For directories, it will call putNextEntry(), + /// and then, if the recurse flag is true, process each entry that is a + /// child of the directory. + /// + /// + /// The TarEntry representing the entry to write to the archive. + /// + /// + /// If true, process the children of directory entries. + /// + public void WriteEntry(TarEntry sourceEntry, bool recurse) + { + if (sourceEntry == null) + { + throw new ArgumentNullException(nameof(sourceEntry)); + } + + if (isDisposed) + { + throw new ObjectDisposedException("TarArchive"); + } + + try + { + if (recurse) + { + TarHeader.SetValueDefaults(sourceEntry.UserId, sourceEntry.UserName, + sourceEntry.GroupId, sourceEntry.GroupName); + } + WriteEntryCore(sourceEntry, recurse); + } + finally + { + if (recurse) + { + TarHeader.RestoreSetValues(); + } + } + } + + /// + /// Write an entry to the archive. This method will call the putNextEntry + /// and then write the contents of the entry, and finally call closeEntry() + /// for entries that are files. For directories, it will call putNextEntry(), + /// and then, if the recurse flag is true, process each entry that is a + /// child of the directory. + /// + /// + /// The TarEntry representing the entry to write to the archive. + /// + /// + /// If true, process the children of directory entries. + /// + private void WriteEntryCore(TarEntry sourceEntry, bool recurse) + { + string tempFileName = null; + string entryFilename = sourceEntry.File; + + var entry = (TarEntry)sourceEntry.Clone(); + + if (applyUserInfoOverrides) + { + entry.GroupId = groupId; + entry.GroupName = groupName; + entry.UserId = userId; + entry.UserName = userName; + } + + OnProgressMessageEvent(entry, null); + + if (asciiTranslate && !entry.IsDirectory) + { + if (!IsBinary(entryFilename)) + { + tempFileName = Path.GetTempFileName(); + + using (StreamReader inStream = File.OpenText(entryFilename)) + { + using (Stream outStream = File.Create(tempFileName)) + { + while (true) + { + string line = inStream.ReadLine(); + if (line == null) + { + break; + } + byte[] data = Encoding.ASCII.GetBytes(line); + outStream.Write(data, 0, data.Length); + outStream.WriteByte((byte)'\n'); + } + + outStream.Flush(); + } + } + + entry.Size = new FileInfo(tempFileName).Length; + entryFilename = tempFileName; + } + } + + string newName = null; + + if (!String.IsNullOrEmpty(rootPath)) + { + if (entry.Name.StartsWith(rootPath, StringComparison.OrdinalIgnoreCase)) + { + newName = entry.Name.Substring(rootPath.Length + 1); + } + } + + if (pathPrefix != null) + { + newName = (newName == null) ? pathPrefix + "/" + entry.Name : pathPrefix + "/" + newName; + } + + if (newName != null) + { + entry.Name = newName; + } + + tarOut.PutNextEntry(entry); + + if (entry.IsDirectory) + { + if (recurse) + { + TarEntry[] list = entry.GetDirectoryEntries(); + for (int i = 0; i < list.Length; ++i) + { + WriteEntryCore(list[i], recurse); + } + } + } + else + { + using (Stream inputStream = File.OpenRead(entryFilename)) + { + byte[] localBuffer = new byte[32 * 1024]; + while (true) + { + int numRead = inputStream.Read(localBuffer, 0, localBuffer.Length); + + if (numRead <= 0) + { + break; + } + + tarOut.Write(localBuffer, 0, numRead); + } + } + + if (!string.IsNullOrEmpty(tempFileName)) + { + File.Delete(tempFileName); + } + + tarOut.CloseEntry(); + } + } + + /// + /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the FileStream and optionally releases the managed resources. + /// + /// true to release both managed and unmanaged resources; + /// false to release only unmanaged resources. + protected virtual void Dispose(bool disposing) + { + if (!isDisposed) + { + isDisposed = true; + if (disposing) + { + if (tarOut != null) + { + tarOut.Flush(); + tarOut.Dispose(); + } + + if (tarIn != null) + { + tarIn.Dispose(); + } + } + } + } + + /// + /// Closes the archive and releases any associated resources. + /// + public virtual void Close() + { + Dispose(true); + } + + /// + /// Ensures that resources are freed and other cleanup operations are performed + /// when the garbage collector reclaims the . + /// + ~TarArchive() + { + Dispose(false); + } + + private static void EnsureDirectoryExists(string directoryName) + { + if (!Directory.Exists(directoryName)) + { + try + { + Directory.CreateDirectory(directoryName); + } + catch (Exception e) + { + throw new TarException("Exception creating directory '" + directoryName + "', " + e.Message, e); + } + } + } + + // TODO: TarArchive - Is there a better way to test for a text file? + // It no longer reads entire files into memory but is still a weak test! + // This assumes that byte values 0-7, 14-31 or 255 are binary + // and that all non text files contain one of these values + private static bool IsBinary(string filename) + { + using (FileStream fs = File.OpenRead(filename)) + { + int sampleSize = Math.Min(4096, (int)fs.Length); + byte[] content = new byte[sampleSize]; + + int bytesRead = fs.Read(content, 0, sampleSize); + + for (int i = 0; i < bytesRead; ++i) + { + byte b = content[i]; + if ((b < 8) || ((b > 13) && (b < 32)) || (b == 255)) + { + return true; + } + } + } + return false; + } + + #region Instance Fields + + private bool keepOldFiles; + private bool asciiTranslate; + + private int userId; + private string userName = string.Empty; + private int groupId; + private string groupName = string.Empty; + + private string rootPath; + private string pathPrefix; + + private bool applyUserInfoOverrides; + + private TarInputStream tarIn; + private TarOutputStream tarOut; + private bool isDisposed; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs.meta new file mode 100644 index 0000000..7d3ab03 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarArchive.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 364a17e2584a649e996e2f95510993e2 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs new file mode 100644 index 0000000..744c131 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs @@ -0,0 +1,599 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// The TarBuffer class implements the tar archive concept + /// of a buffered input stream. This concept goes back to the + /// days of blocked tape drives and special io devices. In the + /// C# universe, the only real function that this class + /// performs is to ensure that files have the correct "record" + /// size, or other tars will complain. + ///

+ /// You should never have a need to access this class directly. + /// TarBuffers are created by Tar IO Streams. + ///

+ ///
+ public class TarBuffer + { + /* A quote from GNU tar man file on blocking and records + A `tar' archive file contains a series of blocks. Each block + contains `BLOCKSIZE' bytes. Although this format may be thought of as + being on magnetic tape, other media are often used. + + Each file archived is represented by a header block which describes + the file, followed by zero or more blocks which give the contents of + the file. At the end of the archive file there may be a block filled + with binary zeros as an end-of-file marker. A reasonable system should + write a block of zeros at the end, but must not assume that such a + block exists when reading an archive. + + The blocks may be "blocked" for physical I/O operations. Each + record of N blocks is written with a single 'write ()' + operation. On magnetic tapes, the result of such a write is a single + record. When writing an archive, the last record of blocks should be + written at the full size, with blocks after the zero block containing + all zeros. When reading an archive, a reasonable system should + properly handle an archive whose last record is shorter than the rest, + or which contains garbage records after a zero block. + */ + + #region Constants + + /// + /// The size of a block in a tar archive in bytes. + /// + /// This is 512 bytes. + public const int BlockSize = 512; + + /// + /// The number of blocks in a default record. + /// + /// + /// The default value is 20 blocks per record. + /// + public const int DefaultBlockFactor = 20; + + /// + /// The size in bytes of a default record. + /// + /// + /// The default size is 10KB. + /// + public const int DefaultRecordSize = BlockSize * DefaultBlockFactor; + + #endregion Constants + + /// + /// Get the record size for this buffer + /// + /// The record size in bytes. + /// This is equal to the multiplied by the + public int RecordSize + { + get + { + return recordSize; + } + } + + /// + /// Get the TAR Buffer's record size. + /// + /// The record size in bytes. + /// This is equal to the multiplied by the + [Obsolete("Use RecordSize property instead")] + public int GetRecordSize() + { + return recordSize; + } + + /// + /// Get the Blocking factor for the buffer + /// + /// This is the number of blocks in each record. + public int BlockFactor + { + get + { + return blockFactor; + } + } + + /// + /// Get the TAR Buffer's block factor + /// + /// The block factor; the number of blocks per record. + [Obsolete("Use BlockFactor property instead")] + public int GetBlockFactor() + { + return blockFactor; + } + + /// + /// Construct a default TarBuffer + /// + protected TarBuffer() + { + } + + /// + /// Create TarBuffer for reading with default BlockFactor + /// + /// Stream to buffer + /// A new suitable for input. + public static TarBuffer CreateInputTarBuffer(Stream inputStream) + { + if (inputStream == null) + { + throw new ArgumentNullException(nameof(inputStream)); + } + + return CreateInputTarBuffer(inputStream, DefaultBlockFactor); + } + + /// + /// Construct TarBuffer for reading inputStream setting BlockFactor + /// + /// Stream to buffer + /// Blocking factor to apply + /// A new suitable for input. + public static TarBuffer CreateInputTarBuffer(Stream inputStream, int blockFactor) + { + if (inputStream == null) + { + throw new ArgumentNullException(nameof(inputStream)); + } + + if (blockFactor <= 0) + { + throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative"); + } + + var tarBuffer = new TarBuffer(); + tarBuffer.inputStream = inputStream; + tarBuffer.outputStream = null; + tarBuffer.Initialize(blockFactor); + + return tarBuffer; + } + + /// + /// Construct TarBuffer for writing with default BlockFactor + /// + /// output stream for buffer + /// A new suitable for output. + public static TarBuffer CreateOutputTarBuffer(Stream outputStream) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + return CreateOutputTarBuffer(outputStream, DefaultBlockFactor); + } + + /// + /// Construct TarBuffer for writing Tar output to streams. + /// + /// Output stream to write to. + /// Blocking factor to apply + /// A new suitable for output. + public static TarBuffer CreateOutputTarBuffer(Stream outputStream, int blockFactor) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + if (blockFactor <= 0) + { + throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative"); + } + + var tarBuffer = new TarBuffer(); + tarBuffer.inputStream = null; + tarBuffer.outputStream = outputStream; + tarBuffer.Initialize(blockFactor); + + return tarBuffer; + } + + /// + /// Initialization common to all constructors. + /// + private void Initialize(int archiveBlockFactor) + { + blockFactor = archiveBlockFactor; + recordSize = archiveBlockFactor * BlockSize; + recordBuffer = new byte[RecordSize]; + + if (inputStream != null) + { + currentRecordIndex = -1; + currentBlockIndex = BlockFactor; + } + else + { + currentRecordIndex = 0; + currentBlockIndex = 0; + } + } + + /// + /// Determine if an archive block indicates End of Archive. End of + /// archive is indicated by a block that consists entirely of null bytes. + /// All remaining blocks for the record should also be null's + /// However some older tars only do a couple of null blocks (Old GNU tar for one) + /// and also partial records + /// + /// The data block to check. + /// Returns true if the block is an EOF block; false otherwise. + [Obsolete("Use IsEndOfArchiveBlock instead")] + public bool IsEOFBlock(byte[] block) + { + if (block == null) + { + throw new ArgumentNullException(nameof(block)); + } + + if (block.Length != BlockSize) + { + throw new ArgumentException("block length is invalid"); + } + + for (int i = 0; i < BlockSize; ++i) + { + if (block[i] != 0) + { + return false; + } + } + + return true; + } + + /// + /// Determine if an archive block indicates the End of an Archive has been reached. + /// End of archive is indicated by a block that consists entirely of null bytes. + /// All remaining blocks for the record should also be null's + /// However some older tars only do a couple of null blocks (Old GNU tar for one) + /// and also partial records + /// + /// The data block to check. + /// Returns true if the block is an EOF block; false otherwise. + public static bool IsEndOfArchiveBlock(byte[] block) + { + if (block == null) + { + throw new ArgumentNullException(nameof(block)); + } + + if (block.Length != BlockSize) + { + throw new ArgumentException("block length is invalid"); + } + + for (int i = 0; i < BlockSize; ++i) + { + if (block[i] != 0) + { + return false; + } + } + + return true; + } + + /// + /// Skip over a block on the input stream. + /// + public void SkipBlock() + { + if (inputStream == null) + { + throw new TarException("no input stream defined"); + } + + if (currentBlockIndex >= BlockFactor) + { + if (!ReadRecord()) + { + throw new TarException("Failed to read a record"); + } + } + + currentBlockIndex++; + } + + /// + /// Read a block from the input stream. + /// + /// + /// The block of data read. + /// + public byte[] ReadBlock() + { + if (inputStream == null) + { + throw new TarException("TarBuffer.ReadBlock - no input stream defined"); + } + + if (currentBlockIndex >= BlockFactor) + { + if (!ReadRecord()) + { + throw new TarException("Failed to read a record"); + } + } + + byte[] result = new byte[BlockSize]; + + Array.Copy(recordBuffer, (currentBlockIndex * BlockSize), result, 0, BlockSize); + currentBlockIndex++; + return result; + } + + /// + /// Read a record from data stream. + /// + /// + /// false if End-Of-File, else true. + /// + private bool ReadRecord() + { + if (inputStream == null) + { + throw new TarException("no input stream defined"); + } + + currentBlockIndex = 0; + + int offset = 0; + int bytesNeeded = RecordSize; + + while (bytesNeeded > 0) + { + long numBytes = inputStream.Read(recordBuffer, offset, bytesNeeded); + + // + // NOTE + // We have found EOF, and the record is not full! + // + // This is a broken archive. It does not follow the standard + // blocking algorithm. However, because we are generous, and + // it requires little effort, we will simply ignore the error + // and continue as if the entire record were read. This does + // not appear to break anything upstream. We used to return + // false in this case. + // + // Thanks to 'Yohann.Roussel@alcatel.fr' for this fix. + // + if (numBytes <= 0) + { + break; + } + + offset += (int)numBytes; + bytesNeeded -= (int)numBytes; + } + + currentRecordIndex++; + return true; + } + + /// + /// Get the current block number, within the current record, zero based. + /// + /// Block numbers are zero based values + /// + public int CurrentBlock + { + get { return currentBlockIndex; } + } + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = true; + + /// + /// Get the current block number, within the current record, zero based. + /// + /// + /// The current zero based block number. + /// + /// + /// The absolute block number = (record number * block factor) + block number. + /// + [Obsolete("Use CurrentBlock property instead")] + public int GetCurrentBlockNum() + { + return currentBlockIndex; + } + + /// + /// Get the current record number. + /// + /// + /// The current zero based record number. + /// + public int CurrentRecord + { + get { return currentRecordIndex; } + } + + /// + /// Get the current record number. + /// + /// + /// The current zero based record number. + /// + [Obsolete("Use CurrentRecord property instead")] + public int GetCurrentRecordNum() + { + return currentRecordIndex; + } + + /// + /// Write a block of data to the archive. + /// + /// + /// The data to write to the archive. + /// + public void WriteBlock(byte[] block) + { + if (block == null) + { + throw new ArgumentNullException(nameof(block)); + } + + if (outputStream == null) + { + throw new TarException("TarBuffer.WriteBlock - no output stream defined"); + } + + if (block.Length != BlockSize) + { + string errorText = string.Format("TarBuffer.WriteBlock - block to write has length '{0}' which is not the block size of '{1}'", + block.Length, BlockSize); + throw new TarException(errorText); + } + + if (currentBlockIndex >= BlockFactor) + { + WriteRecord(); + } + + Array.Copy(block, 0, recordBuffer, (currentBlockIndex * BlockSize), BlockSize); + currentBlockIndex++; + } + + /// + /// Write an archive record to the archive, where the record may be + /// inside of a larger array buffer. The buffer must be "offset plus + /// record size" long. + /// + /// + /// The buffer containing the record data to write. + /// + /// + /// The offset of the record data within buffer. + /// + public void WriteBlock(byte[] buffer, int offset) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (outputStream == null) + { + throw new TarException("TarBuffer.WriteBlock - no output stream defined"); + } + + if ((offset < 0) || (offset >= buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if ((offset + BlockSize) > buffer.Length) + { + string errorText = string.Format("TarBuffer.WriteBlock - record has length '{0}' with offset '{1}' which is less than the record size of '{2}'", + buffer.Length, offset, recordSize); + throw new TarException(errorText); + } + + if (currentBlockIndex >= BlockFactor) + { + WriteRecord(); + } + + Array.Copy(buffer, offset, recordBuffer, (currentBlockIndex * BlockSize), BlockSize); + + currentBlockIndex++; + } + + /// + /// Write a TarBuffer record to the archive. + /// + private void WriteRecord() + { + if (outputStream == null) + { + throw new TarException("TarBuffer.WriteRecord no output stream defined"); + } + + outputStream.Write(recordBuffer, 0, RecordSize); + outputStream.Flush(); + + currentBlockIndex = 0; + currentRecordIndex++; + } + + /// + /// WriteFinalRecord writes the current record buffer to output any unwritten data is present. + /// + /// Any trailing bytes are set to zero which is by definition correct behaviour + /// for the end of a tar stream. + private void WriteFinalRecord() + { + if (outputStream == null) + { + throw new TarException("TarBuffer.WriteFinalRecord no output stream defined"); + } + + if (currentBlockIndex > 0) + { + int dataBytes = currentBlockIndex * BlockSize; + Array.Clear(recordBuffer, dataBytes, RecordSize - dataBytes); + WriteRecord(); + } + + outputStream.Flush(); + } + + /// + /// Close the TarBuffer. If this is an output buffer, also flush the + /// current block before closing. + /// + public void Close() + { + if (outputStream != null) + { + WriteFinalRecord(); + + if (IsStreamOwner) + { + outputStream.Dispose(); + } + outputStream = null; + } + else if (inputStream != null) + { + if (IsStreamOwner) + { + inputStream.Dispose(); + } + inputStream = null; + } + } + + #region Instance Fields + + private Stream inputStream; + private Stream outputStream; + + private byte[] recordBuffer; + private int currentBlockIndex; + private int currentRecordIndex; + + private int recordSize = DefaultRecordSize; + private int blockFactor = DefaultBlockFactor; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs.meta new file mode 100644 index 0000000..6d8e9ab --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarBuffer.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: a9019ddd4cf4047cb98fe8c405d875ee +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs new file mode 100644 index 0000000..64a1e5e --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs @@ -0,0 +1,597 @@ +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// This class represents an entry in a Tar archive. It consists + /// of the entry's header, as well as the entry's File. Entries + /// can be instantiated in one of three ways, depending on how + /// they are to be used. + ///

+ /// TarEntries that are created from the header bytes read from + /// an archive are instantiated with the TarEntry( byte[] ) + /// constructor. These entries will be used when extracting from + /// or listing the contents of an archive. These entries have their + /// header filled in using the header bytes. They also set the File + /// to null, since they reference an archive entry not a file.

+ ///

+ /// TarEntries that are created from files that are to be written + /// into an archive are instantiated with the CreateEntryFromFile(string) + /// pseudo constructor. These entries have their header filled in using + /// the File's information. They also keep a reference to the File + /// for convenience when writing entries.

+ ///

+ /// Finally, TarEntries can be constructed from nothing but a name. + /// This allows the programmer to construct the entry by hand, for + /// instance when only an InputStream is available for writing to + /// the archive, and the header information is constructed from + /// other information. In this case the header fields are set to + /// defaults and the File is set to null.

+ /// + ///
+ public class TarEntry + { + #region Constructors + + /// + /// Initialise a default instance of . + /// + private TarEntry() + { + header = new TarHeader(); + } + + /// + /// Construct an entry from an archive's header bytes. File is set + /// to null. + /// + /// + /// The header bytes from a tar archive entry. + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public TarEntry(byte[] headerBuffer) : this(headerBuffer, null) + { + } + + /// + /// Construct an entry from an archive's header bytes. File is set + /// to null. + /// + /// + /// The header bytes from a tar archive entry. + /// + /// + /// The used for the Name fields, or null for ASCII only + /// + public TarEntry(byte[] headerBuffer, Encoding nameEncoding) + { + header = new TarHeader(); + header.ParseBuffer(headerBuffer, nameEncoding); + } + + /// + /// Construct a TarEntry using the header provided + /// + /// Header details for entry + public TarEntry(TarHeader header) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + this.header = (TarHeader)header.Clone(); + } + + #endregion Constructors + + #region ICloneable Members + + /// + /// Clone this tar entry. + /// + /// Returns a clone of this entry. + public object Clone() + { + var entry = new TarEntry(); + entry.file = file; + entry.header = (TarHeader)header.Clone(); + entry.Name = Name; + return entry; + } + + #endregion ICloneable Members + + /// + /// Construct an entry with only a name. + /// This allows the programmer to construct the entry's header "by hand". + /// + /// The name to use for the entry + /// Returns the newly created + public static TarEntry CreateTarEntry(string name) + { + var entry = new TarEntry(); + TarEntry.NameTarHeader(entry.header, name); + return entry; + } + + /// + /// Construct an entry for a file. File is set to file, and the + /// header is constructed from information from the file. + /// + /// The file name that the entry represents. + /// Returns the newly created + public static TarEntry CreateEntryFromFile(string fileName) + { + var entry = new TarEntry(); + entry.GetFileTarHeader(entry.header, fileName); + return entry; + } + + /// + /// Determine if the two entries are equal. Equality is determined + /// by the header names being equal. + /// + /// The to compare with the current Object. + /// + /// True if the entries are equal; false if not. + /// + public override bool Equals(object obj) + { + var localEntry = obj as TarEntry; + + if (localEntry != null) + { + return Name.Equals(localEntry.Name); + } + return false; + } + + /// + /// Derive a Hash value for the current + /// + /// A Hash code for the current + public override int GetHashCode() + { + return Name.GetHashCode(); + } + + /// + /// Determine if the given entry is a descendant of this entry. + /// Descendancy is determined by the name of the descendant + /// starting with this entry's name. + /// + /// + /// Entry to be checked as a descendent of this. + /// + /// + /// True if entry is a descendant of this. + /// + public bool IsDescendent(TarEntry toTest) + { + if (toTest == null) + { + throw new ArgumentNullException(nameof(toTest)); + } + + return toTest.Name.StartsWith(Name, StringComparison.Ordinal); + } + + /// + /// Get this entry's header. + /// + /// + /// This entry's TarHeader. + /// + public TarHeader TarHeader + { + get + { + return header; + } + } + + /// + /// Get/Set this entry's name. + /// + public string Name + { + get + { + return header.Name; + } + set + { + header.Name = value; + } + } + + /// + /// Get/set this entry's user id. + /// + public int UserId + { + get + { + return header.UserId; + } + set + { + header.UserId = value; + } + } + + /// + /// Get/set this entry's group id. + /// + public int GroupId + { + get + { + return header.GroupId; + } + set + { + header.GroupId = value; + } + } + + /// + /// Get/set this entry's user name. + /// + public string UserName + { + get + { + return header.UserName; + } + set + { + header.UserName = value; + } + } + + /// + /// Get/set this entry's group name. + /// + public string GroupName + { + get + { + return header.GroupName; + } + set + { + header.GroupName = value; + } + } + + /// + /// Convenience method to set this entry's group and user ids. + /// + /// + /// This entry's new user id. + /// + /// + /// This entry's new group id. + /// + public void SetIds(int userId, int groupId) + { + UserId = userId; + GroupId = groupId; + } + + /// + /// Convenience method to set this entry's group and user names. + /// + /// + /// This entry's new user name. + /// + /// + /// This entry's new group name. + /// + public void SetNames(string userName, string groupName) + { + UserName = userName; + GroupName = groupName; + } + + /// + /// Get/Set the modification time for this entry + /// + public DateTime ModTime + { + get + { + return header.ModTime; + } + set + { + header.ModTime = value; + } + } + + /// + /// Get this entry's file. + /// + /// + /// This entry's file. + /// + public string File + { + get + { + return file; + } + } + + /// + /// Get/set this entry's recorded file size. + /// + public long Size + { + get + { + return header.Size; + } + set + { + header.Size = value; + } + } + + /// + /// Return true if this entry represents a directory, false otherwise + /// + /// + /// True if this entry is a directory. + /// + public bool IsDirectory + { + get + { + if (file != null) + { + return Directory.Exists(file); + } + + if (header != null) + { + if ((header.TypeFlag == TarHeader.LF_DIR) || Name.EndsWith("/", StringComparison.Ordinal)) + { + return true; + } + } + return false; + } + } + + /// + /// Fill in a TarHeader with information from a File. + /// + /// + /// The TarHeader to fill in. + /// + /// + /// The file from which to get the header information. + /// + public void GetFileTarHeader(TarHeader header, string file) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + if (file == null) + { + throw new ArgumentNullException(nameof(file)); + } + + this.file = file; + + // bugfix from torhovl from #D forum: + string name = file; + + // 23-Jan-2004 GnuTar allows device names in path where the name is not local to the current directory + if (name.IndexOf(Directory.GetCurrentDirectory(), StringComparison.Ordinal) == 0) + { + name = name.Substring(Directory.GetCurrentDirectory().Length); + } + + /* + if (Path.DirectorySeparatorChar == '\\') + { + // check if the OS is Windows + // Strip off drive letters! + if (name.Length > 2) + { + char ch1 = name[0]; + char ch2 = name[1]; + + if (ch2 == ':' && Char.IsLetter(ch1)) + { + name = name.Substring(2); + } + } + } + */ + + name = name.Replace(Path.DirectorySeparatorChar, '/'); + + // No absolute pathnames + // Windows (and Posix?) paths can start with UNC style "\\NetworkDrive\", + // so we loop on starting /'s. + while (name.StartsWith("/", StringComparison.Ordinal)) + { + name = name.Substring(1); + } + + header.LinkName = String.Empty; + header.Name = name; + + if (Directory.Exists(file)) + { + header.Mode = 1003; // Magic number for security access for a UNIX filesystem + header.TypeFlag = TarHeader.LF_DIR; + if ((header.Name.Length == 0) || header.Name[header.Name.Length - 1] != '/') + { + header.Name = header.Name + "/"; + } + + header.Size = 0; + } + else + { + header.Mode = 33216; // Magic number for security access for a UNIX filesystem + header.TypeFlag = TarHeader.LF_NORMAL; + header.Size = new FileInfo(file.Replace('/', Path.DirectorySeparatorChar)).Length; + } + + header.ModTime = System.IO.File.GetLastWriteTime(file.Replace('/', Path.DirectorySeparatorChar)).ToUniversalTime(); + header.DevMajor = 0; + header.DevMinor = 0; + } + + /// + /// Get entries for all files present in this entries directory. + /// If this entry doesnt represent a directory zero entries are returned. + /// + /// + /// An array of TarEntry's for this entry's children. + /// + public TarEntry[] GetDirectoryEntries() + { + if ((file == null) || !Directory.Exists(file)) + { + return new TarEntry[0]; + } + + string[] list = Directory.GetFileSystemEntries(file); + TarEntry[] result = new TarEntry[list.Length]; + + for (int i = 0; i < list.Length; ++i) + { + result[i] = TarEntry.CreateEntryFromFile(list[i]); + } + + return result; + } + + /// + /// Write an entry's header information to a header buffer. + /// + /// + /// The tar entry header buffer to fill in. + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public void WriteEntryHeader(byte[] outBuffer) + { + WriteEntryHeader(outBuffer, null); + } + + /// + /// Write an entry's header information to a header buffer. + /// + /// + /// The tar entry header buffer to fill in. + /// + /// + /// The used for the Name fields, or null for ASCII only + /// + public void WriteEntryHeader(byte[] outBuffer, Encoding nameEncoding) + { + header.WriteHeader(outBuffer, nameEncoding); + } + + /// + /// Convenience method that will modify an entry's name directly + /// in place in an entry header buffer byte array. + /// + /// + /// The buffer containing the entry header to modify. + /// + /// + /// The new name to place into the header buffer. + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + static public void AdjustEntryName(byte[] buffer, string newName) + { + AdjustEntryName(buffer, newName, null); + } + + /// + /// Convenience method that will modify an entry's name directly + /// in place in an entry header buffer byte array. + /// + /// + /// The buffer containing the entry header to modify. + /// + /// + /// The new name to place into the header buffer. + /// + /// + /// The used for the Name fields, or null for ASCII only + /// + static public void AdjustEntryName(byte[] buffer, string newName, Encoding nameEncoding) + { + TarHeader.GetNameBytes(newName, buffer, 0, TarHeader.NAMELEN, nameEncoding); + } + + /// + /// Fill in a TarHeader given only the entry's name. + /// + /// + /// The TarHeader to fill in. + /// + /// + /// The tar entry name. + /// + static public void NameTarHeader(TarHeader header, string name) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + bool isDir = name.EndsWith("/", StringComparison.Ordinal); + + header.Name = name; + header.Mode = isDir ? 1003 : 33216; + header.UserId = 0; + header.GroupId = 0; + header.Size = 0; + + header.ModTime = DateTime.UtcNow; + + header.TypeFlag = isDir ? TarHeader.LF_DIR : TarHeader.LF_NORMAL; + + header.LinkName = String.Empty; + header.UserName = String.Empty; + header.GroupName = String.Empty; + + header.DevMajor = 0; + header.DevMinor = 0; + } + + #region Instance Fields + + /// + /// The name of the file this entry represents or null if the entry is not based on a file. + /// + private string file; + + /// + /// The entry's header information. + /// + private TarHeader header; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs.meta new file mode 100644 index 0000000..0a224f3 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarEntry.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 125faa6cc09304642a1a6867f9cd63aa +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs new file mode 100644 index 0000000..9d448ca --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs @@ -0,0 +1,54 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// TarException represents exceptions specific to Tar classes and code. + /// + [Serializable] + public class TarException : SharpZipBaseException + { + /// + /// Initialise a new instance of . + /// + public TarException() + { + } + + /// + /// Initialise a new instance of with its message string. + /// + /// A that describes the error. + public TarException(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of . + /// + /// A that describes the error. + /// The that caused this exception. + public TarException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the TarException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected TarException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs.meta new file mode 100644 index 0000000..e2063ba --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 7fcc64c9221a84bf2b25f640577fd970 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs new file mode 100644 index 0000000..d1d438a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs @@ -0,0 +1,99 @@ +using System.Collections.Generic; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// Reads the extended header of a Tar stream + /// + public class TarExtendedHeaderReader + { + private const byte LENGTH = 0; + private const byte KEY = 1; + private const byte VALUE = 2; + private const byte END = 3; + + private readonly Dictionary headers = new Dictionary(); + + private string[] headerParts = new string[3]; + + private int bbIndex; + private byte[] byteBuffer; + private char[] charBuffer; + + private readonly StringBuilder sb = new StringBuilder(); + private readonly Decoder decoder = Encoding.UTF8.GetDecoder(); + + private int state = LENGTH; + + private static readonly byte[] StateNext = new[] { (byte)' ', (byte)'=', (byte)'\n' }; + + /// + /// Creates a new . + /// + public TarExtendedHeaderReader() + { + ResetBuffers(); + } + + /// + /// Read bytes from + /// + /// + /// + public void Read(byte[] buffer, int length) + { + for (int i = 0; i < length; i++) + { + byte next = buffer[i]; + + if (next == StateNext[state]) + { + Flush(); + headerParts[state] = sb.ToString(); + sb.Clear(); + + if (++state == END) + { + headers.Add(headerParts[KEY], headerParts[VALUE]); + headerParts = new string[3]; + state = LENGTH; + } + } + else + { + byteBuffer[bbIndex++] = next; + if (bbIndex == 4) + Flush(); + } + } + } + + private void Flush() + { + decoder.Convert(byteBuffer, 0, bbIndex, charBuffer, 0, 4, false, out int bytesUsed, out int charsUsed, out bool completed); + + sb.Append(charBuffer, 0, charsUsed); + ResetBuffers(); + } + + private void ResetBuffers() + { + charBuffer = new char[4]; + byteBuffer = new byte[4]; + bbIndex = 0; + } + + /// + /// Returns the parsed headers as key-value strings + /// + public Dictionary Headers + { + get + { + // TODO: Check for invalid state? -NM 2018-07-01 + return headers; + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs.meta new file mode 100644 index 0000000..f0dc278 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarExtendedHeaderReader.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 78f7ff027f4694c0d9db2dd7d13ea261 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs new file mode 100644 index 0000000..3bd1bdf --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs @@ -0,0 +1,1310 @@ +using System; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// This class encapsulates the Tar Entry Header used in Tar Archives. + /// The class also holds a number of tar constants, used mostly in headers. + /// + /// + /// The tar format and its POSIX successor PAX have a long history which makes for compatability + /// issues when creating and reading files. + /// + /// This is further complicated by a large number of programs with variations on formats + /// One common issue is the handling of names longer than 100 characters. + /// GNU style long names are currently supported. + /// + /// This is the ustar (Posix 1003.1) header. + /// + /// struct header + /// { + /// char t_name[100]; // 0 Filename + /// char t_mode[8]; // 100 Permissions + /// char t_uid[8]; // 108 Numerical User ID + /// char t_gid[8]; // 116 Numerical Group ID + /// char t_size[12]; // 124 Filesize + /// char t_mtime[12]; // 136 st_mtime + /// char t_chksum[8]; // 148 Checksum + /// char t_typeflag; // 156 Type of File + /// char t_linkname[100]; // 157 Target of Links + /// char t_magic[6]; // 257 "ustar" or other... + /// char t_version[2]; // 263 Version fixed to 00 + /// char t_uname[32]; // 265 User Name + /// char t_gname[32]; // 297 Group Name + /// char t_devmajor[8]; // 329 Major for devices + /// char t_devminor[8]; // 337 Minor for devices + /// char t_prefix[155]; // 345 Prefix for t_name + /// char t_mfill[12]; // 500 Filler up to 512 + /// }; + /// + public class TarHeader + { + #region Constants + + /// + /// The length of the name field in a header buffer. + /// + public const int NAMELEN = 100; + + /// + /// The length of the mode field in a header buffer. + /// + public const int MODELEN = 8; + + /// + /// The length of the user id field in a header buffer. + /// + public const int UIDLEN = 8; + + /// + /// The length of the group id field in a header buffer. + /// + public const int GIDLEN = 8; + + /// + /// The length of the checksum field in a header buffer. + /// + public const int CHKSUMLEN = 8; + + /// + /// Offset of checksum in a header buffer. + /// + public const int CHKSUMOFS = 148; + + /// + /// The length of the size field in a header buffer. + /// + public const int SIZELEN = 12; + + /// + /// The length of the magic field in a header buffer. + /// + public const int MAGICLEN = 6; + + /// + /// The length of the version field in a header buffer. + /// + public const int VERSIONLEN = 2; + + /// + /// The length of the modification time field in a header buffer. + /// + public const int MODTIMELEN = 12; + + /// + /// The length of the user name field in a header buffer. + /// + public const int UNAMELEN = 32; + + /// + /// The length of the group name field in a header buffer. + /// + public const int GNAMELEN = 32; + + /// + /// The length of the devices field in a header buffer. + /// + public const int DEVLEN = 8; + + /// + /// The length of the name prefix field in a header buffer. + /// + public const int PREFIXLEN = 155; + + // + // LF_ constants represent the "type" of an entry + // + + /// + /// The "old way" of indicating a normal file. + /// + public const byte LF_OLDNORM = 0; + + /// + /// Normal file type. + /// + public const byte LF_NORMAL = (byte)'0'; + + /// + /// Link file type. + /// + public const byte LF_LINK = (byte)'1'; + + /// + /// Symbolic link file type. + /// + public const byte LF_SYMLINK = (byte)'2'; + + /// + /// Character device file type. + /// + public const byte LF_CHR = (byte)'3'; + + /// + /// Block device file type. + /// + public const byte LF_BLK = (byte)'4'; + + /// + /// Directory file type. + /// + public const byte LF_DIR = (byte)'5'; + + /// + /// FIFO (pipe) file type. + /// + public const byte LF_FIFO = (byte)'6'; + + /// + /// Contiguous file type. + /// + public const byte LF_CONTIG = (byte)'7'; + + /// + /// Posix.1 2001 global extended header + /// + public const byte LF_GHDR = (byte)'g'; + + /// + /// Posix.1 2001 extended header + /// + public const byte LF_XHDR = (byte)'x'; + + // POSIX allows for upper case ascii type as extensions + + /// + /// Solaris access control list file type + /// + public const byte LF_ACL = (byte)'A'; + + /// + /// GNU dir dump file type + /// This is a dir entry that contains the names of files that were in the + /// dir at the time the dump was made + /// + public const byte LF_GNU_DUMPDIR = (byte)'D'; + + /// + /// Solaris Extended Attribute File + /// + public const byte LF_EXTATTR = (byte)'E'; + + /// + /// Inode (metadata only) no file content + /// + public const byte LF_META = (byte)'I'; + + /// + /// Identifies the next file on the tape as having a long link name + /// + public const byte LF_GNU_LONGLINK = (byte)'K'; + + /// + /// Identifies the next file on the tape as having a long name + /// + public const byte LF_GNU_LONGNAME = (byte)'L'; + + /// + /// Continuation of a file that began on another volume + /// + public const byte LF_GNU_MULTIVOL = (byte)'M'; + + /// + /// For storing filenames that dont fit in the main header (old GNU) + /// + public const byte LF_GNU_NAMES = (byte)'N'; + + /// + /// GNU Sparse file + /// + public const byte LF_GNU_SPARSE = (byte)'S'; + + /// + /// GNU Tape/volume header ignore on extraction + /// + public const byte LF_GNU_VOLHDR = (byte)'V'; + + /// + /// The magic tag representing a POSIX tar archive. (would be written with a trailing NULL) + /// + public const string TMAGIC = "ustar"; + + /// + /// The magic tag representing an old GNU tar archive where version is included in magic and overwrites it + /// + public const string GNU_TMAGIC = "ustar "; + + private const long timeConversionFactor = 10000000L; // 1 tick == 100 nanoseconds + private static readonly DateTime dateTime1970 = new DateTime(1970, 1, 1, 0, 0, 0, 0); + + #endregion Constants + + #region Constructors + + /// + /// Initialise a default TarHeader instance + /// + public TarHeader() + { + Magic = TMAGIC; + Version = " "; + + Name = ""; + LinkName = ""; + + UserId = defaultUserId; + GroupId = defaultGroupId; + UserName = defaultUser; + GroupName = defaultGroupName; + Size = 0; + } + + #endregion Constructors + + #region Properties + + /// + /// Get/set the name for this tar entry. + /// + /// Thrown when attempting to set the property to null. + public string Name + { + get { return name; } + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + name = value; + } + } + + /// + /// Get the name of this entry. + /// + /// The entry's name. + [Obsolete("Use the Name property instead", true)] + public string GetName() + { + return name; + } + + /// + /// Get/set the entry's Unix style permission mode. + /// + public int Mode + { + get { return mode; } + set { mode = value; } + } + + /// + /// The entry's user id. + /// + /// + /// This is only directly relevant to unix systems. + /// The default is zero. + /// + public int UserId + { + get { return userId; } + set { userId = value; } + } + + /// + /// Get/set the entry's group id. + /// + /// + /// This is only directly relevant to linux/unix systems. + /// The default value is zero. + /// + public int GroupId + { + get { return groupId; } + set { groupId = value; } + } + + /// + /// Get/set the entry's size. + /// + /// Thrown when setting the size to less than zero. + public long Size + { + get { return size; } + set + { + if (value < 0) + { + throw new ArgumentOutOfRangeException(nameof(value), "Cannot be less than zero"); + } + size = value; + } + } + + /// + /// Get/set the entry's modification time. + /// + /// + /// The modification time is only accurate to within a second. + /// + /// Thrown when setting the date time to less than 1/1/1970. + public DateTime ModTime + { + get { return modTime; } + set + { + if (value < dateTime1970) + { + throw new ArgumentOutOfRangeException(nameof(value), "ModTime cannot be before Jan 1st 1970"); + } + modTime = new DateTime(value.Year, value.Month, value.Day, value.Hour, value.Minute, value.Second); + } + } + + /// + /// Get the entry's checksum. This is only valid/updated after writing or reading an entry. + /// + public int Checksum + { + get { return checksum; } + } + + /// + /// Get value of true if the header checksum is valid, false otherwise. + /// + public bool IsChecksumValid + { + get { return isChecksumValid; } + } + + /// + /// Get/set the entry's type flag. + /// + public byte TypeFlag + { + get { return typeFlag; } + set { typeFlag = value; } + } + + /// + /// The entry's link name. + /// + /// Thrown when attempting to set LinkName to null. + public string LinkName + { + get { return linkName; } + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + linkName = value; + } + } + + /// + /// Get/set the entry's magic tag. + /// + /// Thrown when attempting to set Magic to null. + public string Magic + { + get { return magic; } + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + magic = value; + } + } + + /// + /// The entry's version. + /// + /// Thrown when attempting to set Version to null. + public string Version + { + get + { + return version; + } + + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + version = value; + } + } + + /// + /// The entry's user name. + /// + public string UserName + { + get { return userName; } + set + { + if (value != null) + { + userName = value.Substring(0, Math.Min(UNAMELEN, value.Length)); + } + else + { + string currentUser = "user"; + if (currentUser.Length > UNAMELEN) + { + currentUser = currentUser.Substring(0, UNAMELEN); + } + userName = currentUser; + } + } + } + + /// + /// Get/set the entry's group name. + /// + /// + /// This is only directly relevant to unix systems. + /// + public string GroupName + { + get { return groupName; } + set + { + if (value == null) + { + groupName = "None"; + } + else + { + groupName = value; + } + } + } + + /// + /// Get/set the entry's major device number. + /// + public int DevMajor + { + get { return devMajor; } + set { devMajor = value; } + } + + /// + /// Get/set the entry's minor device number. + /// + public int DevMinor + { + get { return devMinor; } + set { devMinor = value; } + } + + #endregion Properties + + #region ICloneable Members + + /// + /// Create a new that is a copy of the current instance. + /// + /// A new that is a copy of the current instance. + public object Clone() + { + return this.MemberwiseClone(); + } + + #endregion ICloneable Members + + /// + /// Parse TarHeader information from a header buffer. + /// + /// + /// The tar entry header buffer to get information from. + /// + /// + /// The used for the Name field, or null for ASCII only + /// + public void ParseBuffer(byte[] header, Encoding nameEncoding) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + int offset = 0; + + name = ParseName(header, offset, NAMELEN, nameEncoding).ToString(); + offset += NAMELEN; + + mode = (int)ParseOctal(header, offset, MODELEN); + offset += MODELEN; + + UserId = (int)ParseOctal(header, offset, UIDLEN); + offset += UIDLEN; + + GroupId = (int)ParseOctal(header, offset, GIDLEN); + offset += GIDLEN; + + Size = ParseBinaryOrOctal(header, offset, SIZELEN); + offset += SIZELEN; + + ModTime = GetDateTimeFromCTime(ParseOctal(header, offset, MODTIMELEN)); + offset += MODTIMELEN; + + checksum = (int)ParseOctal(header, offset, CHKSUMLEN); + offset += CHKSUMLEN; + + TypeFlag = header[offset++]; + + LinkName = ParseName(header, offset, NAMELEN, nameEncoding).ToString(); + offset += NAMELEN; + + Magic = ParseName(header, offset, MAGICLEN, nameEncoding).ToString(); + offset += MAGICLEN; + + if (Magic == "ustar") + { + Version = ParseName(header, offset, VERSIONLEN, nameEncoding).ToString(); + offset += VERSIONLEN; + + UserName = ParseName(header, offset, UNAMELEN, nameEncoding).ToString(); + offset += UNAMELEN; + + GroupName = ParseName(header, offset, GNAMELEN, nameEncoding).ToString(); + offset += GNAMELEN; + + DevMajor = (int)ParseOctal(header, offset, DEVLEN); + offset += DEVLEN; + + DevMinor = (int)ParseOctal(header, offset, DEVLEN); + offset += DEVLEN; + + string prefix = ParseName(header, offset, PREFIXLEN, nameEncoding).ToString(); + if (!string.IsNullOrEmpty(prefix)) Name = prefix + '/' + Name; + } + + isChecksumValid = Checksum == TarHeader.MakeCheckSum(header); + } + + /// + /// Parse TarHeader information from a header buffer. + /// + /// + /// The tar entry header buffer to get information from. + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public void ParseBuffer(byte[] header) + { + ParseBuffer(header, null); + } + + /// + /// 'Write' header information to buffer provided, updating the check sum. + /// + /// output buffer for header information + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public void WriteHeader(byte[] outBuffer) + { + WriteHeader(outBuffer, null); + } + + /// + /// 'Write' header information to buffer provided, updating the check sum. + /// + /// output buffer for header information + /// The used for the Name field, or null for ASCII only + public void WriteHeader(byte[] outBuffer, Encoding nameEncoding) + { + if (outBuffer == null) + { + throw new ArgumentNullException(nameof(outBuffer)); + } + + int offset = 0; + + offset = GetNameBytes(Name, outBuffer, offset, NAMELEN, nameEncoding); + offset = GetOctalBytes(mode, outBuffer, offset, MODELEN); + offset = GetOctalBytes(UserId, outBuffer, offset, UIDLEN); + offset = GetOctalBytes(GroupId, outBuffer, offset, GIDLEN); + + offset = GetBinaryOrOctalBytes(Size, outBuffer, offset, SIZELEN); + offset = GetOctalBytes(GetCTime(ModTime), outBuffer, offset, MODTIMELEN); + + int csOffset = offset; + for (int c = 0; c < CHKSUMLEN; ++c) + { + outBuffer[offset++] = (byte)' '; + } + + outBuffer[offset++] = TypeFlag; + + offset = GetNameBytes(LinkName, outBuffer, offset, NAMELEN, nameEncoding); + offset = GetAsciiBytes(Magic, 0, outBuffer, offset, MAGICLEN, nameEncoding); + offset = GetNameBytes(Version, outBuffer, offset, VERSIONLEN, nameEncoding); + offset = GetNameBytes(UserName, outBuffer, offset, UNAMELEN, nameEncoding); + offset = GetNameBytes(GroupName, outBuffer, offset, GNAMELEN, nameEncoding); + + if ((TypeFlag == LF_CHR) || (TypeFlag == LF_BLK)) + { + offset = GetOctalBytes(DevMajor, outBuffer, offset, DEVLEN); + offset = GetOctalBytes(DevMinor, outBuffer, offset, DEVLEN); + } + + for (; offset < outBuffer.Length;) + { + outBuffer[offset++] = 0; + } + + checksum = ComputeCheckSum(outBuffer); + + GetCheckSumOctalBytes(checksum, outBuffer, csOffset, CHKSUMLEN); + isChecksumValid = true; + } + + /// + /// Get a hash code for the current object. + /// + /// A hash code for the current object. + public override int GetHashCode() + { + return Name.GetHashCode(); + } + + /// + /// Determines if this instance is equal to the specified object. + /// + /// The object to compare with. + /// true if the objects are equal, false otherwise. + public override bool Equals(object obj) + { + var localHeader = obj as TarHeader; + + bool result; + if (localHeader != null) + { + result = (name == localHeader.name) + && (mode == localHeader.mode) + && (UserId == localHeader.UserId) + && (GroupId == localHeader.GroupId) + && (Size == localHeader.Size) + && (ModTime == localHeader.ModTime) + && (Checksum == localHeader.Checksum) + && (TypeFlag == localHeader.TypeFlag) + && (LinkName == localHeader.LinkName) + && (Magic == localHeader.Magic) + && (Version == localHeader.Version) + && (UserName == localHeader.UserName) + && (GroupName == localHeader.GroupName) + && (DevMajor == localHeader.DevMajor) + && (DevMinor == localHeader.DevMinor); + } + else + { + result = false; + } + return result; + } + + /// + /// Set defaults for values used when constructing a TarHeader instance. + /// + /// Value to apply as a default for userId. + /// Value to apply as a default for userName. + /// Value to apply as a default for groupId. + /// Value to apply as a default for groupName. + static internal void SetValueDefaults(int userId, string userName, int groupId, string groupName) + { + defaultUserId = userIdAsSet = userId; + defaultUser = userNameAsSet = userName; + defaultGroupId = groupIdAsSet = groupId; + defaultGroupName = groupNameAsSet = groupName; + } + + static internal void RestoreSetValues() + { + defaultUserId = userIdAsSet; + defaultUser = userNameAsSet; + defaultGroupId = groupIdAsSet; + defaultGroupName = groupNameAsSet; + } + + // Return value that may be stored in octal or binary. Length must exceed 8. + // + static private long ParseBinaryOrOctal(byte[] header, int offset, int length) + { + if (header[offset] >= 0x80) + { + // File sizes over 8GB are stored in 8 right-justified bytes of binary indicated by setting the high-order bit of the leftmost byte of a numeric field. + long result = 0; + for (int pos = length - 8; pos < length; pos++) + { + result = result << 8 | header[offset + pos]; + } + return result; + } + return ParseOctal(header, offset, length); + } + + /// + /// Parse an octal string from a header buffer. + /// + /// The header buffer from which to parse. + /// The offset into the buffer from which to parse. + /// The number of header bytes to parse. + /// The long equivalent of the octal string. + static public long ParseOctal(byte[] header, int offset, int length) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + long result = 0; + bool stillPadding = true; + + int end = offset + length; + for (int i = offset; i < end; ++i) + { + if (header[i] == 0) + { + break; + } + + if (header[i] == (byte)' ' || header[i] == '0') + { + if (stillPadding) + { + continue; + } + + if (header[i] == (byte)' ') + { + break; + } + } + + stillPadding = false; + + result = (result << 3) + (header[i] - '0'); + } + + return result; + } + + /// + /// Parse a name from a header buffer. + /// + /// + /// The header buffer from which to parse. + /// + /// + /// The offset into the buffer from which to parse. + /// + /// + /// The number of header bytes to parse. + /// + /// + /// The name parsed. + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + static public StringBuilder ParseName(byte[] header, int offset, int length) + { + return ParseName(header, offset, length, null); + } + + /// + /// Parse a name from a header buffer. + /// + /// + /// The header buffer from which to parse. + /// + /// + /// The offset into the buffer from which to parse. + /// + /// + /// The number of header bytes to parse. + /// + /// + /// name encoding, or null for ASCII only + /// + /// + /// The name parsed. + /// + static public StringBuilder ParseName(byte[] header, int offset, int length, Encoding encoding) + { + if (header == null) + { + throw new ArgumentNullException(nameof(header)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be less than zero"); + } + + if (length < 0) + { + throw new ArgumentOutOfRangeException(nameof(length), "Cannot be less than zero"); + } + + if (offset + length > header.Length) + { + throw new ArgumentException("Exceeds header size", nameof(length)); + } + + var result = new StringBuilder(length); + + int count = 0; + if(encoding == null) + { + for (int i = offset; i < offset + length; ++i) + { + if (header[i] == 0) + { + break; + } + result.Append((char)header[i]); + } + } + else + { + for(int i = offset; i < offset + length; ++i, ++count) + { + if(header[i] == 0) + { + break; + } + } + result.Append(encoding.GetString(header, offset, count)); + } + + return result; + } + + /// + /// Add name to the buffer as a collection of bytes + /// + /// The name to add + /// The offset of the first character + /// The buffer to add to + /// The index of the first byte to add + /// The number of characters/bytes to add + /// The next free index in the + public static int GetNameBytes(StringBuilder name, int nameOffset, byte[] buffer, int bufferOffset, int length) + { + return GetNameBytes(name.ToString(), nameOffset, buffer, bufferOffset, length, null); + } + + /// + /// Add name to the buffer as a collection of bytes + /// + /// The name to add + /// The offset of the first character + /// The buffer to add to + /// The index of the first byte to add + /// The number of characters/bytes to add + /// The next free index in the + public static int GetNameBytes(string name, int nameOffset, byte[] buffer, int bufferOffset, int length) + { + return GetNameBytes(name, nameOffset, buffer, bufferOffset, length, null); + } + + /// + /// Add name to the buffer as a collection of bytes + /// + /// The name to add + /// The offset of the first character + /// The buffer to add to + /// The index of the first byte to add + /// The number of characters/bytes to add + /// name encoding, or null for ASCII only + /// The next free index in the + public static int GetNameBytes(string name, int nameOffset, byte[] buffer, int bufferOffset, int length, Encoding encoding) + { + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + int i; + if(encoding != null) + { + // it can be more sufficient if using Span or unsafe + var nameArray = name.ToCharArray(nameOffset, Math.Min(name.Length - nameOffset, length)); + // it can be more sufficient if using Span(or unsafe?) and ArrayPool for temporary buffer + var bytes = encoding.GetBytes(nameArray, 0, nameArray.Length); + i = Math.Min(bytes.Length, length); + Array.Copy(bytes, 0, buffer, bufferOffset, i); + } + else + { + for (i = 0; i < length && nameOffset + i < name.Length; ++i) + { + buffer[bufferOffset + i] = (byte)name[nameOffset + i]; + } + } + + for (; i < length; ++i) + { + buffer[bufferOffset + i] = 0; + } + return bufferOffset + length; + } + /// + /// Add an entry name to the buffer + /// + /// + /// The name to add + /// + /// + /// The buffer to add to + /// + /// + /// The offset into the buffer from which to start adding + /// + /// + /// The number of header bytes to add + /// + /// + /// The index of the next free byte in the buffer + /// + /// TODO: what should be default behavior?(omit upper byte or UTF8?) + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public static int GetNameBytes(StringBuilder name, byte[] buffer, int offset, int length) + { + return GetNameBytes(name, buffer, offset, length, null); + } + + /// + /// Add an entry name to the buffer + /// + /// + /// The name to add + /// + /// + /// The buffer to add to + /// + /// + /// The offset into the buffer from which to start adding + /// + /// + /// The number of header bytes to add + /// + /// + /// + /// + /// The index of the next free byte in the buffer + /// + public static int GetNameBytes(StringBuilder name, byte[] buffer, int offset, int length, Encoding encoding) + { + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + return GetNameBytes(name.ToString(), 0, buffer, offset, length, encoding); + } + + /// + /// Add an entry name to the buffer + /// + /// The name to add + /// The buffer to add to + /// The offset into the buffer from which to start adding + /// The number of header bytes to add + /// The index of the next free byte in the buffer + /// TODO: what should be default behavior?(omit upper byte or UTF8?) + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public static int GetNameBytes(string name, byte[] buffer, int offset, int length) + { + return GetNameBytes(name, buffer, offset, length, null); + } + + /// + /// Add an entry name to the buffer + /// + /// The name to add + /// The buffer to add to + /// The offset into the buffer from which to start adding + /// The number of header bytes to add + /// + /// The index of the next free byte in the buffer + public static int GetNameBytes(string name, byte[] buffer, int offset, int length, Encoding encoding) + { + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + return GetNameBytes(name, 0, buffer, offset, length, encoding); + } + /// + /// Add a string to a buffer as a collection of ascii bytes. + /// + /// The string to add + /// The offset of the first character to add. + /// The buffer to add to. + /// The offset to start adding at. + /// The number of ascii characters to add. + /// The next free index in the buffer. + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public static int GetAsciiBytes(string toAdd, int nameOffset, byte[] buffer, int bufferOffset, int length) + { + return GetAsciiBytes(toAdd, nameOffset, buffer, bufferOffset, length, null); + } + + /// + /// Add a string to a buffer as a collection of ascii bytes. + /// + /// The string to add + /// The offset of the first character to add. + /// The buffer to add to. + /// The offset to start adding at. + /// The number of ascii characters to add. + /// String encoding, or null for ASCII only + /// The next free index in the buffer. + public static int GetAsciiBytes(string toAdd, int nameOffset, byte[] buffer, int bufferOffset, int length, Encoding encoding) + { + if (toAdd == null) + { + throw new ArgumentNullException(nameof(toAdd)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + int i; + if(encoding == null) + { + for (i = 0; i < length && nameOffset + i < toAdd.Length; ++i) + { + buffer[bufferOffset + i] = (byte)toAdd[nameOffset + i]; + } + } + else + { + // It can be more sufficient if using unsafe code or Span(ToCharArray can be omitted) + var chars = toAdd.ToCharArray(); + // It can be more sufficient if using Span(or unsafe?) and ArrayPool for temporary buffer + var bytes = encoding.GetBytes(chars, nameOffset, Math.Min(toAdd.Length - nameOffset, length)); + i = Math.Min(bytes.Length, length); + Array.Copy(bytes, 0, buffer, bufferOffset, i); + } + // If length is beyond the toAdd string length (which is OK by the prev loop condition), eg if a field has fixed length and the string is shorter, make sure all of the extra chars are written as NULLs, so that the reader func would ignore them and get back the original string + for (; i < length; ++i) + buffer[bufferOffset + i] = 0; + return bufferOffset + length; + } + + /// + /// Put an octal representation of a value into a buffer + /// + /// + /// the value to be converted to octal + /// + /// + /// buffer to store the octal string + /// + /// + /// The offset into the buffer where the value starts + /// + /// + /// The length of the octal string to create + /// + /// + /// The offset of the character next byte after the octal string + /// + public static int GetOctalBytes(long value, byte[] buffer, int offset, int length) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + int localIndex = length - 1; + + // Either a space or null is valid here. We use NULL as per GNUTar + buffer[offset + localIndex] = 0; + --localIndex; + + if (value > 0) + { + for (long v = value; (localIndex >= 0) && (v > 0); --localIndex) + { + buffer[offset + localIndex] = (byte)((byte)'0' + (byte)(v & 7)); + v >>= 3; + } + } + + for (; localIndex >= 0; --localIndex) + { + buffer[offset + localIndex] = (byte)'0'; + } + + return offset + length; + } + + /// + /// Put an octal or binary representation of a value into a buffer + /// + /// Value to be convert to octal + /// The buffer to update + /// The offset into the buffer to store the value + /// The length of the octal string. Must be 12. + /// Index of next byte + private static int GetBinaryOrOctalBytes(long value, byte[] buffer, int offset, int length) + { + if (value > 0x1FFFFFFFF) + { // Octal 77777777777 (11 digits) + // Put value as binary, right-justified into the buffer. Set high order bit of left-most byte. + for (int pos = length - 1; pos > 0; pos--) + { + buffer[offset + pos] = (byte)value; + value = value >> 8; + } + buffer[offset] = 0x80; + return offset + length; + } + return GetOctalBytes(value, buffer, offset, length); + } + + /// + /// Add the checksum integer to header buffer. + /// + /// + /// The header buffer to set the checksum for + /// The offset into the buffer for the checksum + /// The number of header bytes to update. + /// It's formatted differently from the other fields: it has 6 digits, a + /// null, then a space -- rather than digits, a space, then a null. + /// The final space is already there, from checksumming + /// + /// The modified buffer offset + private static void GetCheckSumOctalBytes(long value, byte[] buffer, int offset, int length) + { + GetOctalBytes(value, buffer, offset, length - 1); + } + + /// + /// Compute the checksum for a tar entry header. + /// The checksum field must be all spaces prior to this happening + /// + /// The tar entry's header buffer. + /// The computed checksum. + private static int ComputeCheckSum(byte[] buffer) + { + int sum = 0; + for (int i = 0; i < buffer.Length; ++i) + { + sum += buffer[i]; + } + return sum; + } + + /// + /// Make a checksum for a tar entry ignoring the checksum contents. + /// + /// The tar entry's header buffer. + /// The checksum for the buffer + private static int MakeCheckSum(byte[] buffer) + { + int sum = 0; + for (int i = 0; i < CHKSUMOFS; ++i) + { + sum += buffer[i]; + } + + for (int i = 0; i < CHKSUMLEN; ++i) + { + sum += (byte)' '; + } + + for (int i = CHKSUMOFS + CHKSUMLEN; i < buffer.Length; ++i) + { + sum += buffer[i]; + } + return sum; + } + + private static int GetCTime(DateTime dateTime) + { + return unchecked((int)((dateTime.Ticks - dateTime1970.Ticks) / timeConversionFactor)); + } + + private static DateTime GetDateTimeFromCTime(long ticks) + { + DateTime result; + + try + { + result = new DateTime(dateTime1970.Ticks + ticks * timeConversionFactor); + } + catch (ArgumentOutOfRangeException) + { + result = dateTime1970; + } + return result; + } + + #region Instance Fields + + private string name; + private int mode; + private int userId; + private int groupId; + private long size; + private DateTime modTime; + private int checksum; + private bool isChecksumValid; + private byte typeFlag; + private string linkName; + private string magic; + private string version; + private string userName; + private string groupName; + private int devMajor; + private int devMinor; + + #endregion Instance Fields + + #region Class Fields + + // Values used during recursive operations. + static internal int userIdAsSet; + + static internal int groupIdAsSet; + static internal string userNameAsSet; + static internal string groupNameAsSet = "None"; + + static internal int defaultUserId; + static internal int defaultGroupId; + static internal string defaultGroupName = "None"; + static internal string defaultUser; + + #endregion Class Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs.meta new file mode 100644 index 0000000..8e0f9cd --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarHeader.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 6d3287167e2584c75a2c9cca394944ad +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs new file mode 100644 index 0000000..f1a3622 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs @@ -0,0 +1,771 @@ +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// The TarInputStream reads a UNIX tar archive as an InputStream. + /// methods are provided to position at each successive entry in + /// the archive, and the read each entry as a normal input stream + /// using read(). + /// + public class TarInputStream : Stream + { + #region Constructors + + /// + /// Construct a TarInputStream with default block factor + /// + /// stream to source data from + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public TarInputStream(Stream inputStream) + : this(inputStream, TarBuffer.DefaultBlockFactor, null) + { + } + /// + /// Construct a TarInputStream with default block factor + /// + /// stream to source data from + /// The used for the Name fields, or null for ASCII only + public TarInputStream(Stream inputStream, Encoding nameEncoding) + : this(inputStream, TarBuffer.DefaultBlockFactor, nameEncoding) + { + } + + /// + /// Construct a TarInputStream with user specified block factor + /// + /// stream to source data from + /// block factor to apply to archive + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public TarInputStream(Stream inputStream, int blockFactor) + { + this.inputStream = inputStream; + tarBuffer = TarBuffer.CreateInputTarBuffer(inputStream, blockFactor); + encoding = null; + } + + /// + /// Construct a TarInputStream with user specified block factor + /// + /// stream to source data from + /// block factor to apply to archive + /// The used for the Name fields, or null for ASCII only + public TarInputStream(Stream inputStream, int blockFactor, Encoding nameEncoding) + { + this.inputStream = inputStream; + tarBuffer = TarBuffer.CreateInputTarBuffer(inputStream, blockFactor); + encoding = nameEncoding; + } + + #endregion Constructors + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner + { + get { return tarBuffer.IsStreamOwner; } + set { tarBuffer.IsStreamOwner = value; } + } + + #region Stream Overrides + + /// + /// Gets a value indicating whether the current stream supports reading + /// + public override bool CanRead + { + get + { + return inputStream.CanRead; + } + } + + /// + /// Gets a value indicating whether the current stream supports seeking + /// This property always returns false. + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Gets a value indicating if the stream supports writing. + /// This property always returns false. + /// + public override bool CanWrite + { + get + { + return false; + } + } + + /// + /// The length in bytes of the stream + /// + public override long Length + { + get + { + return inputStream.Length; + } + } + + /// + /// Gets or sets the position within the stream. + /// Setting the Position is not supported and throws a NotSupportedExceptionNotSupportedException + /// + /// Any attempt to set position + public override long Position + { + get + { + return inputStream.Position; + } + set + { + throw new NotSupportedException("TarInputStream Seek not supported"); + } + } + + /// + /// Flushes the baseInputStream + /// + public override void Flush() + { + inputStream.Flush(); + } + + /// + /// Set the streams position. This operation is not supported and will throw a NotSupportedException + /// + /// The offset relative to the origin to seek to. + /// The to start seeking from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("TarInputStream Seek not supported"); + } + + /// + /// Sets the length of the stream + /// This operation is not supported and will throw a NotSupportedException + /// + /// The new stream length. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("TarInputStream SetLength not supported"); + } + + /// + /// Writes a block of bytes to this stream using data from a buffer. + /// This operation is not supported and will throw a NotSupportedException + /// + /// The buffer containing bytes to write. + /// The offset in the buffer of the frist byte to write. + /// The number of bytes to write. + /// Any access + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("TarInputStream Write not supported"); + } + + /// + /// Writes a byte to the current position in the file stream. + /// This operation is not supported and will throw a NotSupportedException + /// + /// The byte value to write. + /// Any access + public override void WriteByte(byte value) + { + throw new NotSupportedException("TarInputStream WriteByte not supported"); + } + + /// + /// Reads a byte from the current tar archive entry. + /// + /// A byte cast to an int; -1 if the at the end of the stream. + public override int ReadByte() + { + byte[] oneByteBuffer = new byte[1]; + int num = Read(oneByteBuffer, 0, 1); + if (num <= 0) + { + // return -1 to indicate that no byte was read. + return -1; + } + return oneByteBuffer[0]; + } + + /// + /// Reads bytes from the current tar archive entry. + /// + /// This method is aware of the boundaries of the current + /// entry in the archive and will deal with them appropriately + /// + /// + /// The buffer into which to place bytes read. + /// + /// + /// The offset at which to place bytes read. + /// + /// + /// The number of bytes to read. + /// + /// + /// The number of bytes read, or 0 at end of stream/EOF. + /// + public override int Read(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + int totalRead = 0; + + if (entryOffset >= entrySize) + { + return 0; + } + + long numToRead = count; + + if ((numToRead + entryOffset) > entrySize) + { + numToRead = entrySize - entryOffset; + } + + if (readBuffer != null) + { + int sz = (numToRead > readBuffer.Length) ? readBuffer.Length : (int)numToRead; + + Array.Copy(readBuffer, 0, buffer, offset, sz); + + if (sz >= readBuffer.Length) + { + readBuffer = null; + } + else + { + int newLen = readBuffer.Length - sz; + byte[] newBuf = new byte[newLen]; + Array.Copy(readBuffer, sz, newBuf, 0, newLen); + readBuffer = newBuf; + } + + totalRead += sz; + numToRead -= sz; + offset += sz; + } + + while (numToRead > 0) + { + byte[] rec = tarBuffer.ReadBlock(); + if (rec == null) + { + // Unexpected EOF! + throw new TarException("unexpected EOF with " + numToRead + " bytes unread"); + } + + var sz = (int)numToRead; + int recLen = rec.Length; + + if (recLen > sz) + { + Array.Copy(rec, 0, buffer, offset, sz); + readBuffer = new byte[recLen - sz]; + Array.Copy(rec, sz, readBuffer, 0, recLen - sz); + } + else + { + sz = recLen; + Array.Copy(rec, 0, buffer, offset, recLen); + } + + totalRead += sz; + numToRead -= sz; + offset += sz; + } + + entryOffset += totalRead; + + return totalRead; + } + + /// + /// Closes this stream. Calls the TarBuffer's close() method. + /// The underlying stream is closed by the TarBuffer. + /// + protected override void Dispose(bool disposing) + { + if (disposing) + { + tarBuffer.Close(); + } + } + + #endregion Stream Overrides + + /// + /// Set the entry factory for this instance. + /// + /// The factory for creating new entries + public void SetEntryFactory(IEntryFactory factory) + { + entryFactory = factory; + } + + /// + /// Get the record size being used by this stream's TarBuffer. + /// + public int RecordSize + { + get { return tarBuffer.RecordSize; } + } + + /// + /// Get the record size being used by this stream's TarBuffer. + /// + /// + /// TarBuffer record size. + /// + [Obsolete("Use RecordSize property instead")] + public int GetRecordSize() + { + return tarBuffer.RecordSize; + } + + /// + /// Get the available data that can be read from the current + /// entry in the archive. This does not indicate how much data + /// is left in the entire archive, only in the current entry. + /// This value is determined from the entry's size header field + /// and the amount of data already read from the current entry. + /// + /// + /// The number of available bytes for the current entry. + /// + public long Available + { + get + { + return entrySize - entryOffset; + } + } + + /// + /// Skip bytes in the input buffer. This skips bytes in the + /// current entry's data, not the entire archive, and will + /// stop at the end of the current entry's data if the number + /// to skip extends beyond that point. + /// + /// + /// The number of bytes to skip. + /// + public void Skip(long skipCount) + { + // TODO: REVIEW efficiency of TarInputStream.Skip + // This is horribly inefficient, but it ensures that we + // properly skip over bytes via the TarBuffer... + // + byte[] skipBuf = new byte[8 * 1024]; + + for (long num = skipCount; num > 0;) + { + int toRead = num > skipBuf.Length ? skipBuf.Length : (int)num; + int numRead = Read(skipBuf, 0, toRead); + + if (numRead == -1) + { + break; + } + + num -= numRead; + } + } + + /// + /// Return a value of true if marking is supported; false otherwise. + /// + /// Currently marking is not supported, the return value is always false. + public bool IsMarkSupported + { + get + { + return false; + } + } + + /// + /// Since we do not support marking just yet, we do nothing. + /// + /// + /// The limit to mark. + /// + public void Mark(int markLimit) + { + } + + /// + /// Since we do not support marking just yet, we do nothing. + /// + public void Reset() + { + } + + /// + /// Get the next entry in this tar archive. This will skip + /// over any remaining data in the current entry, if there + /// is one, and place the input stream at the header of the + /// next entry, and read the header and instantiate a new + /// TarEntry from the header bytes and return that entry. + /// If there are no more entries in the archive, null will + /// be returned to indicate that the end of the archive has + /// been reached. + /// + /// + /// The next TarEntry in the archive, or null. + /// + public TarEntry GetNextEntry() + { + if (hasHitEOF) + { + return null; + } + + if (currentEntry != null) + { + SkipToNextEntry(); + } + + byte[] headerBuf = tarBuffer.ReadBlock(); + + if (headerBuf == null) + { + hasHitEOF = true; + } + else if (TarBuffer.IsEndOfArchiveBlock(headerBuf)) + { + hasHitEOF = true; + + // Read the second zero-filled block + tarBuffer.ReadBlock(); + } + else + { + hasHitEOF = false; + } + + if (hasHitEOF) + { + currentEntry = null; + } + else + { + try + { + var header = new TarHeader(); + header.ParseBuffer(headerBuf, encoding); + if (!header.IsChecksumValid) + { + throw new TarException("Header checksum is invalid"); + } + this.entryOffset = 0; + this.entrySize = header.Size; + + StringBuilder longName = null; + + if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME) + { + byte[] nameBuffer = new byte[TarBuffer.BlockSize]; + long numToRead = this.entrySize; + + longName = new StringBuilder(); + + while (numToRead > 0) + { + int numRead = this.Read(nameBuffer, 0, (numToRead > nameBuffer.Length ? nameBuffer.Length : (int)numToRead)); + + if (numRead == -1) + { + throw new InvalidHeaderException("Failed to read long name entry"); + } + + longName.Append(TarHeader.ParseName(nameBuffer, 0, numRead, encoding).ToString()); + numToRead -= numRead; + } + + SkipToNextEntry(); + headerBuf = this.tarBuffer.ReadBlock(); + } + else if (header.TypeFlag == TarHeader.LF_GHDR) + { // POSIX global extended header + // Ignore things we dont understand completely for now + SkipToNextEntry(); + headerBuf = this.tarBuffer.ReadBlock(); + } + else if (header.TypeFlag == TarHeader.LF_XHDR) + { // POSIX extended header + byte[] nameBuffer = new byte[TarBuffer.BlockSize]; + long numToRead = this.entrySize; + + var xhr = new TarExtendedHeaderReader(); + + while (numToRead > 0) + { + int numRead = this.Read(nameBuffer, 0, (numToRead > nameBuffer.Length ? nameBuffer.Length : (int)numToRead)); + + if (numRead == -1) + { + throw new InvalidHeaderException("Failed to read long name entry"); + } + + xhr.Read(nameBuffer, numRead); + numToRead -= numRead; + } + + if (xhr.Headers.TryGetValue("path", out string name)) + { + longName = new StringBuilder(name); + } + + SkipToNextEntry(); + headerBuf = this.tarBuffer.ReadBlock(); + } + else if (header.TypeFlag == TarHeader.LF_GNU_VOLHDR) + { + // TODO: could show volume name when verbose + SkipToNextEntry(); + headerBuf = this.tarBuffer.ReadBlock(); + } + else if (header.TypeFlag != TarHeader.LF_NORMAL && + header.TypeFlag != TarHeader.LF_OLDNORM && + header.TypeFlag != TarHeader.LF_LINK && + header.TypeFlag != TarHeader.LF_SYMLINK && + header.TypeFlag != TarHeader.LF_DIR) + { + // Ignore things we dont understand completely for now + SkipToNextEntry(); + headerBuf = tarBuffer.ReadBlock(); + } + + if (entryFactory == null) + { + currentEntry = new TarEntry(headerBuf, encoding); + if (longName != null) + { + currentEntry.Name = longName.ToString(); + } + } + else + { + currentEntry = entryFactory.CreateEntry(headerBuf); + } + + // Magic was checked here for 'ustar' but there are multiple valid possibilities + // so this is not done anymore. + + entryOffset = 0; + + // TODO: Review How do we resolve this discrepancy?! + entrySize = this.currentEntry.Size; + } + catch (InvalidHeaderException ex) + { + entrySize = 0; + entryOffset = 0; + currentEntry = null; + string errorText = string.Format("Bad header in record {0} block {1} {2}", + tarBuffer.CurrentRecord, tarBuffer.CurrentBlock, ex.Message); + throw new InvalidHeaderException(errorText); + } + } + return currentEntry; + } + + /// + /// Copies the contents of the current tar archive entry directly into + /// an output stream. + /// + /// + /// The OutputStream into which to write the entry's data. + /// + public void CopyEntryContents(Stream outputStream) + { + byte[] tempBuffer = new byte[32 * 1024]; + + while (true) + { + int numRead = Read(tempBuffer, 0, tempBuffer.Length); + if (numRead <= 0) + { + break; + } + outputStream.Write(tempBuffer, 0, numRead); + } + } + + private void SkipToNextEntry() + { + long numToSkip = entrySize - entryOffset; + + if (numToSkip > 0) + { + Skip(numToSkip); + } + + readBuffer = null; + } + + /// + /// This interface is provided, along with the method , to allow + /// the programmer to have their own subclass instantiated for the + /// entries return from . + /// + public interface IEntryFactory + { + // This interface does not considering name encoding. + // How this interface should be? + /// + /// Create an entry based on name alone + /// + /// + /// Name of the new EntryPointNotFoundException to create + /// + /// created TarEntry or descendant class + TarEntry CreateEntry(string name); + + /// + /// Create an instance based on an actual file + /// + /// + /// Name of file to represent in the entry + /// + /// + /// Created TarEntry or descendant class + /// + TarEntry CreateEntryFromFile(string fileName); + + /// + /// Create a tar entry based on the header information passed + /// + /// + /// Buffer containing header information to create an entry from. + /// + /// + /// Created TarEntry or descendant class + /// + TarEntry CreateEntry(byte[] headerBuffer); + } + + /// + /// Standard entry factory class creating instances of the class TarEntry + /// + public class EntryFactoryAdapter : IEntryFactory + { + Encoding nameEncoding; + /// + /// Construct standard entry factory class with ASCII name encoding + /// + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public EntryFactoryAdapter() + { + } + /// + /// Construct standard entry factory with name encoding + /// + /// The used for the Name fields, or null for ASCII only + public EntryFactoryAdapter(Encoding nameEncoding) + { + this.nameEncoding = nameEncoding; + } + /// + /// Create a based on named + /// + /// The name to use for the entry + /// A new + public TarEntry CreateEntry(string name) + { + return TarEntry.CreateTarEntry(name); + } + + /// + /// Create a tar entry with details obtained from file + /// + /// The name of the file to retrieve details from. + /// A new + public TarEntry CreateEntryFromFile(string fileName) + { + return TarEntry.CreateEntryFromFile(fileName); + } + + /// + /// Create an entry based on details in header + /// + /// The buffer containing entry details. + /// A new + public TarEntry CreateEntry(byte[] headerBuffer) + { + return new TarEntry(headerBuffer, nameEncoding); + } + } + + #region Instance Fields + + /// + /// Flag set when last block has been read + /// + protected bool hasHitEOF; + + /// + /// Size of this entry as recorded in header + /// + protected long entrySize; + + /// + /// Number of bytes read for this entry so far + /// + protected long entryOffset; + + /// + /// Buffer used with calls to Read() + /// + protected byte[] readBuffer; + + /// + /// Working buffer + /// + protected TarBuffer tarBuffer; + + /// + /// Current entry being read + /// + private TarEntry currentEntry; + + /// + /// Factory used to create TarEntry or descendant class instance + /// + protected IEntryFactory entryFactory; + + /// + /// Stream used as the source of input data. + /// + private readonly Stream inputStream; + + private readonly Encoding encoding; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs.meta new file mode 100644 index 0000000..c4e00d7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarInputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 9be97b99d6e1040eabd2c7a6361bdfbf +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs new file mode 100644 index 0000000..7c52e6c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs @@ -0,0 +1,522 @@ +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Tar +{ + /// + /// The TarOutputStream writes a UNIX tar archive as an OutputStream. + /// Methods are provided to put entries, and then write their contents + /// by writing to this stream using write(). + /// + /// public + public class TarOutputStream : Stream + { + #region Constructors + + /// + /// Construct TarOutputStream using default block factor + /// + /// stream to write to + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public TarOutputStream(Stream outputStream) + : this(outputStream, TarBuffer.DefaultBlockFactor) + { + } + + /// + /// Construct TarOutputStream using default block factor + /// + /// stream to write to + /// The used for the Name fields, or null for ASCII only + public TarOutputStream(Stream outputStream, Encoding nameEncoding) + : this(outputStream, TarBuffer.DefaultBlockFactor, nameEncoding) + { + } + + /// + /// Construct TarOutputStream with user specified block factor + /// + /// stream to write to + /// blocking factor + [Obsolete("No Encoding for Name field is specified, any non-ASCII bytes will be discarded")] + public TarOutputStream(Stream outputStream, int blockFactor) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + this.outputStream = outputStream; + buffer = TarBuffer.CreateOutputTarBuffer(outputStream, blockFactor); + + assemblyBuffer = new byte[TarBuffer.BlockSize]; + blockBuffer = new byte[TarBuffer.BlockSize]; + } + + /// + /// Construct TarOutputStream with user specified block factor + /// + /// stream to write to + /// blocking factor + /// The used for the Name fields, or null for ASCII only + public TarOutputStream(Stream outputStream, int blockFactor, Encoding nameEncoding) + { + if (outputStream == null) + { + throw new ArgumentNullException(nameof(outputStream)); + } + + this.outputStream = outputStream; + buffer = TarBuffer.CreateOutputTarBuffer(outputStream, blockFactor); + + assemblyBuffer = new byte[TarBuffer.BlockSize]; + blockBuffer = new byte[TarBuffer.BlockSize]; + + this.nameEncoding = nameEncoding; + } + + #endregion Constructors + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner + { + get { return buffer.IsStreamOwner; } + set { buffer.IsStreamOwner = value; } + } + + /// + /// true if the stream supports reading; otherwise, false. + /// + public override bool CanRead + { + get + { + return outputStream.CanRead; + } + } + + /// + /// true if the stream supports seeking; otherwise, false. + /// + public override bool CanSeek + { + get + { + return outputStream.CanSeek; + } + } + + /// + /// true if stream supports writing; otherwise, false. + /// + public override bool CanWrite + { + get + { + return outputStream.CanWrite; + } + } + + /// + /// length of stream in bytes + /// + public override long Length + { + get + { + return outputStream.Length; + } + } + + /// + /// gets or sets the position within the current stream. + /// + public override long Position + { + get + { + return outputStream.Position; + } + set + { + outputStream.Position = value; + } + } + + /// + /// set the position within the current stream + /// + /// The offset relative to the to seek to + /// The to seek from. + /// The new position in the stream. + public override long Seek(long offset, SeekOrigin origin) + { + return outputStream.Seek(offset, origin); + } + + /// + /// Set the length of the current stream + /// + /// The new stream length. + public override void SetLength(long value) + { + outputStream.SetLength(value); + } + + /// + /// Read a byte from the stream and advance the position within the stream + /// by one byte or returns -1 if at the end of the stream. + /// + /// The byte value or -1 if at end of stream + public override int ReadByte() + { + return outputStream.ReadByte(); + } + + /// + /// read bytes from the current stream and advance the position within the + /// stream by the number of bytes read. + /// + /// The buffer to store read bytes in. + /// The index into the buffer to being storing bytes at. + /// The desired number of bytes to read. + /// The total number of bytes read, or zero if at the end of the stream. + /// The number of bytes may be less than the count + /// requested if data is not available. + public override int Read(byte[] buffer, int offset, int count) + { + return outputStream.Read(buffer, offset, count); + } + + /// + /// All buffered data is written to destination + /// + public override void Flush() + { + outputStream.Flush(); + } + + /// + /// Ends the TAR archive without closing the underlying OutputStream. + /// The result is that the EOF block of nulls is written. + /// + public void Finish() + { + if (IsEntryOpen) + { + CloseEntry(); + } + WriteEofBlock(); + } + + /// + /// Ends the TAR archive and closes the underlying OutputStream. + /// + /// This means that Finish() is called followed by calling the + /// TarBuffer's Close(). + protected override void Dispose(bool disposing) + { + if (!isClosed) + { + isClosed = true; + Finish(); + buffer.Close(); + } + } + + /// + /// Get the record size being used by this stream's TarBuffer. + /// + public int RecordSize + { + get { return buffer.RecordSize; } + } + + /// + /// Get the record size being used by this stream's TarBuffer. + /// + /// + /// The TarBuffer record size. + /// + [Obsolete("Use RecordSize property instead")] + public int GetRecordSize() + { + return buffer.RecordSize; + } + + /// + /// Get a value indicating whether an entry is open, requiring more data to be written. + /// + private bool IsEntryOpen + { + get { return (currBytes < currSize); } + } + + /// + /// Put an entry on the output stream. This writes the entry's + /// header and positions the output stream for writing + /// the contents of the entry. Once this method is called, the + /// stream is ready for calls to write() to write the entry's + /// contents. Once the contents are written, closeEntry() + /// MUST be called to ensure that all buffered data + /// is completely written to the output stream. + /// + /// + /// The TarEntry to be written to the archive. + /// + public void PutNextEntry(TarEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + var namelen = nameEncoding != null ? nameEncoding.GetByteCount(entry.TarHeader.Name) : entry.TarHeader.Name.Length; + + if (namelen > TarHeader.NAMELEN) + { + var longHeader = new TarHeader(); + longHeader.TypeFlag = TarHeader.LF_GNU_LONGNAME; + longHeader.Name = longHeader.Name + "././@LongLink"; + longHeader.Mode = 420;//644 by default + longHeader.UserId = entry.UserId; + longHeader.GroupId = entry.GroupId; + longHeader.GroupName = entry.GroupName; + longHeader.UserName = entry.UserName; + longHeader.LinkName = ""; + longHeader.Size = namelen + 1; // Plus one to avoid dropping last char + + longHeader.WriteHeader(blockBuffer, nameEncoding); + buffer.WriteBlock(blockBuffer); // Add special long filename header block + + int nameCharIndex = 0; + + while (nameCharIndex < namelen + 1 /* we've allocated one for the null char, now we must make sure it gets written out */) + { + Array.Clear(blockBuffer, 0, blockBuffer.Length); + TarHeader.GetAsciiBytes(entry.TarHeader.Name, nameCharIndex, this.blockBuffer, 0, TarBuffer.BlockSize, nameEncoding); // This func handles OK the extra char out of string length + nameCharIndex += TarBuffer.BlockSize; + buffer.WriteBlock(blockBuffer); + } + } + + entry.WriteEntryHeader(blockBuffer, nameEncoding); + buffer.WriteBlock(blockBuffer); + + currBytes = 0; + + currSize = entry.IsDirectory ? 0 : entry.Size; + } + + /// + /// Close an entry. This method MUST be called for all file + /// entries that contain data. The reason is that we must + /// buffer data written to the stream in order to satisfy + /// the buffer's block based writes. Thus, there may be + /// data fragments still being assembled that must be written + /// to the output stream before this entry is closed and the + /// next entry written. + /// + public void CloseEntry() + { + if (assemblyBufferLength > 0) + { + Array.Clear(assemblyBuffer, assemblyBufferLength, assemblyBuffer.Length - assemblyBufferLength); + + buffer.WriteBlock(assemblyBuffer); + + currBytes += assemblyBufferLength; + assemblyBufferLength = 0; + } + + if (currBytes < currSize) + { + string errorText = string.Format( + "Entry closed at '{0}' before the '{1}' bytes specified in the header were written", + currBytes, currSize); + throw new TarException(errorText); + } + } + + /// + /// Writes a byte to the current tar archive entry. + /// This method simply calls Write(byte[], int, int). + /// + /// + /// The byte to be written. + /// + public override void WriteByte(byte value) + { + Write(new byte[] { value }, 0, 1); + } + + /// + /// Writes bytes to the current tar archive entry. This method + /// is aware of the current entry and will throw an exception if + /// you attempt to write bytes past the length specified for the + /// current entry. The method is also (painfully) aware of the + /// record buffering required by TarBuffer, and manages buffers + /// that are not a multiple of recordsize in length, including + /// assembling records from small buffers. + /// + /// + /// The buffer to write to the archive. + /// + /// + /// The offset in the buffer from which to get bytes. + /// + /// + /// The number of bytes to write. + /// + public override void Write(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative"); + } + + if (buffer.Length - offset < count) + { + throw new ArgumentException("offset and count combination is invalid"); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative"); + } + + if ((currBytes + count) > currSize) + { + string errorText = string.Format("request to write '{0}' bytes exceeds size in header of '{1}' bytes", + count, this.currSize); + throw new ArgumentOutOfRangeException(nameof(count), errorText); + } + + // + // We have to deal with assembly!!! + // The programmer can be writing little 32 byte chunks for all + // we know, and we must assemble complete blocks for writing. + // TODO REVIEW Maybe this should be in TarBuffer? Could that help to + // eliminate some of the buffer copying. + // + if (assemblyBufferLength > 0) + { + if ((assemblyBufferLength + count) >= blockBuffer.Length) + { + int aLen = blockBuffer.Length - assemblyBufferLength; + + Array.Copy(assemblyBuffer, 0, blockBuffer, 0, assemblyBufferLength); + Array.Copy(buffer, offset, blockBuffer, assemblyBufferLength, aLen); + + this.buffer.WriteBlock(blockBuffer); + + currBytes += blockBuffer.Length; + + offset += aLen; + count -= aLen; + + assemblyBufferLength = 0; + } + else + { + Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count); + offset += count; + assemblyBufferLength += count; + count -= count; + } + } + + // + // When we get here we have EITHER: + // o An empty "assembly" buffer. + // o No bytes to write (count == 0) + // + while (count > 0) + { + if (count < blockBuffer.Length) + { + Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count); + assemblyBufferLength += count; + break; + } + + this.buffer.WriteBlock(buffer, offset); + + int bufferLength = blockBuffer.Length; + currBytes += bufferLength; + count -= bufferLength; + offset += bufferLength; + } + } + + /// + /// Write an EOF (end of archive) block to the tar archive. + /// The end of the archive is indicated by two blocks consisting entirely of zero bytes. + /// + private void WriteEofBlock() + { + Array.Clear(blockBuffer, 0, blockBuffer.Length); + buffer.WriteBlock(blockBuffer); + buffer.WriteBlock(blockBuffer); + } + + #region Instance Fields + + /// + /// bytes written for this entry so far + /// + private long currBytes; + + /// + /// current 'Assembly' buffer length + /// + private int assemblyBufferLength; + + /// + /// Flag indicating whether this instance has been closed or not. + /// + private bool isClosed; + + /// + /// Size for the current entry + /// + protected long currSize; + + /// + /// single block working buffer + /// + protected byte[] blockBuffer; + + /// + /// 'Assembly' buffer used to assemble data before writing + /// + protected byte[] assemblyBuffer; + + /// + /// TarBuffer used to provide correct blocking factor + /// + protected TarBuffer buffer; + + /// + /// the destination stream for the archive contents + /// + protected Stream outputStream; + + /// + /// name encoding + /// + protected Encoding nameEncoding; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs.meta new file mode 100644 index 0000000..a832cdc --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Tar/TarOutputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 98c4e5ebd7bad47738bbe05e751562e8 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip.meta new file mode 100644 index 0000000..e33baf6 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 1c9bbae3c49b546dd9618c31d0879747 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression.meta new file mode 100644 index 0000000..c6940a4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: eed83bece5b264d068c76aa50226ac37 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs new file mode 100644 index 0000000..3dbe98c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs @@ -0,0 +1,604 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// This is the Deflater class. The deflater class compresses input + /// with the deflate algorithm described in RFC 1951. It has several + /// compression levels and three different strategies described below. + /// + /// This class is not thread safe. This is inherent in the API, due + /// to the split of deflate and setInput. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class Deflater + { + #region Deflater Documentation + + /* + * The Deflater can do the following state transitions: + * + * (1) -> INIT_STATE ----> INIT_FINISHING_STATE ---. + * / | (2) (5) | + * / v (5) | + * (3)| SETDICT_STATE ---> SETDICT_FINISHING_STATE |(3) + * \ | (3) | ,--------' + * | | | (3) / + * v v (5) v v + * (1) -> BUSY_STATE ----> FINISHING_STATE + * | (6) + * v + * FINISHED_STATE + * \_____________________________________/ + * | (7) + * v + * CLOSED_STATE + * + * (1) If we should produce a header we start in INIT_STATE, otherwise + * we start in BUSY_STATE. + * (2) A dictionary may be set only when we are in INIT_STATE, then + * we change the state as indicated. + * (3) Whether a dictionary is set or not, on the first call of deflate + * we change to BUSY_STATE. + * (4) -- intentionally left blank -- :) + * (5) FINISHING_STATE is entered, when flush() is called to indicate that + * there is no more INPUT. There are also states indicating, that + * the header wasn't written yet. + * (6) FINISHED_STATE is entered, when everything has been flushed to the + * internal pending output buffer. + * (7) At any time (7) + * + */ + + #endregion Deflater Documentation + + #region Public Constants + + /// + /// The best and slowest compression level. This tries to find very + /// long and distant string repetitions. + /// + public const int BEST_COMPRESSION = 9; + + /// + /// The worst but fastest compression level. + /// + public const int BEST_SPEED = 1; + + /// + /// The default compression level. + /// + public const int DEFAULT_COMPRESSION = -1; + + /// + /// This level won't compress at all but output uncompressed blocks. + /// + public const int NO_COMPRESSION = 0; + + /// + /// The compression method. This is the only method supported so far. + /// There is no need to use this constant at all. + /// + public const int DEFLATED = 8; + + #endregion Public Constants + + #region Public Enum + + /// + /// Compression Level as an enum for safer use + /// + public enum CompressionLevel + { + /// + /// The best and slowest compression level. This tries to find very + /// long and distant string repetitions. + /// + BEST_COMPRESSION = Deflater.BEST_COMPRESSION, + + /// + /// The worst but fastest compression level. + /// + BEST_SPEED = Deflater.BEST_SPEED, + + /// + /// The default compression level. + /// + DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION, + + /// + /// This level won't compress at all but output uncompressed blocks. + /// + NO_COMPRESSION = Deflater.NO_COMPRESSION, + + /// + /// The compression method. This is the only method supported so far. + /// There is no need to use this constant at all. + /// + DEFLATED = Deflater.DEFLATED + } + + #endregion Public Enum + + #region Local Constants + + private const int IS_SETDICT = 0x01; + private const int IS_FLUSHING = 0x04; + private const int IS_FINISHING = 0x08; + + private const int INIT_STATE = 0x00; + private const int SETDICT_STATE = 0x01; + + // private static int INIT_FINISHING_STATE = 0x08; + // private static int SETDICT_FINISHING_STATE = 0x09; + private const int BUSY_STATE = 0x10; + + private const int FLUSHING_STATE = 0x14; + private const int FINISHING_STATE = 0x1c; + private const int FINISHED_STATE = 0x1e; + private const int CLOSED_STATE = 0x7f; + + #endregion Local Constants + + #region Constructors + + /// + /// Creates a new deflater with default compression level. + /// + public Deflater() : this(DEFAULT_COMPRESSION, false) + { + } + + /// + /// Creates a new deflater with given compression level. + /// + /// + /// the compression level, a value between NO_COMPRESSION + /// and BEST_COMPRESSION, or DEFAULT_COMPRESSION. + /// + /// if lvl is out of range. + public Deflater(int level) : this(level, false) + { + } + + /// + /// Creates a new deflater with given compression level. + /// + /// + /// the compression level, a value between NO_COMPRESSION + /// and BEST_COMPRESSION. + /// + /// + /// true, if we should suppress the Zlib/RFC1950 header at the + /// beginning and the adler checksum at the end of the output. This is + /// useful for the GZIP/PKZIP formats. + /// + /// if lvl is out of range. + public Deflater(int level, bool noZlibHeaderOrFooter) + { + if (level == DEFAULT_COMPRESSION) + { + level = 6; + } + else if (level < NO_COMPRESSION || level > BEST_COMPRESSION) + { + throw new ArgumentOutOfRangeException(nameof(level)); + } + + pending = new DeflaterPending(); + engine = new DeflaterEngine(pending, noZlibHeaderOrFooter); + this.noZlibHeaderOrFooter = noZlibHeaderOrFooter; + SetStrategy(DeflateStrategy.Default); + SetLevel(level); + Reset(); + } + + #endregion Constructors + + /// + /// Resets the deflater. The deflater acts afterwards as if it was + /// just created with the same compression level and strategy as it + /// had before. + /// + public void Reset() + { + state = (noZlibHeaderOrFooter ? BUSY_STATE : INIT_STATE); + totalOut = 0; + pending.Reset(); + engine.Reset(); + } + + /// + /// Gets the current adler checksum of the data that was processed so far. + /// + public int Adler + { + get + { + return engine.Adler; + } + } + + /// + /// Gets the number of input bytes processed so far. + /// + public long TotalIn + { + get + { + return engine.TotalIn; + } + } + + /// + /// Gets the number of output bytes so far. + /// + public long TotalOut + { + get + { + return totalOut; + } + } + + /// + /// Flushes the current input block. Further calls to deflate() will + /// produce enough output to inflate everything in the current input + /// block. This is not part of Sun's JDK so I have made it package + /// private. It is used by DeflaterOutputStream to implement + /// flush(). + /// + public void Flush() + { + state |= IS_FLUSHING; + } + + /// + /// Finishes the deflater with the current input block. It is an error + /// to give more input after this method was called. This method must + /// be called to force all bytes to be flushed. + /// + public void Finish() + { + state |= (IS_FLUSHING | IS_FINISHING); + } + + /// + /// Returns true if the stream was finished and no more output bytes + /// are available. + /// + public bool IsFinished + { + get + { + return (state == FINISHED_STATE) && pending.IsFlushed; + } + } + + /// + /// Returns true, if the input buffer is empty. + /// You should then call setInput(). + /// NOTE: This method can also return true when the stream + /// was finished. + /// + public bool IsNeedingInput + { + get + { + return engine.NeedsInput(); + } + } + + /// + /// Sets the data which should be compressed next. This should be only + /// called when needsInput indicates that more input is needed. + /// If you call setInput when needsInput() returns false, the + /// previous input that is still pending will be thrown away. + /// The given byte array should not be changed, before needsInput() returns + /// true again. + /// This call is equivalent to setInput(input, 0, input.length). + /// + /// + /// the buffer containing the input data. + /// + /// + /// if the buffer was finished() or ended(). + /// + public void SetInput(byte[] input) + { + SetInput(input, 0, input.Length); + } + + /// + /// Sets the data which should be compressed next. This should be + /// only called when needsInput indicates that more input is needed. + /// The given byte array should not be changed, before needsInput() returns + /// true again. + /// + /// + /// the buffer containing the input data. + /// + /// + /// the start of the data. + /// + /// + /// the number of data bytes of input. + /// + /// + /// if the buffer was Finish()ed or if previous input is still pending. + /// + public void SetInput(byte[] input, int offset, int count) + { + if ((state & IS_FINISHING) != 0) + { + throw new InvalidOperationException("Finish() already called"); + } + engine.SetInput(input, offset, count); + } + + /// + /// Sets the compression level. There is no guarantee of the exact + /// position of the change, but if you call this when needsInput is + /// true the change of compression level will occur somewhere near + /// before the end of the so far given input. + /// + /// + /// the new compression level. + /// + public void SetLevel(int level) + { + if (level == DEFAULT_COMPRESSION) + { + level = 6; + } + else if (level < NO_COMPRESSION || level > BEST_COMPRESSION) + { + throw new ArgumentOutOfRangeException(nameof(level)); + } + + if (this.level != level) + { + this.level = level; + engine.SetLevel(level); + } + } + + /// + /// Get current compression level + /// + /// Returns the current compression level + public int GetLevel() + { + return level; + } + + /// + /// Sets the compression strategy. Strategy is one of + /// DEFAULT_STRATEGY, HUFFMAN_ONLY and FILTERED. For the exact + /// position where the strategy is changed, the same as for + /// SetLevel() applies. + /// + /// + /// The new compression strategy. + /// + public void SetStrategy(DeflateStrategy strategy) + { + engine.Strategy = strategy; + } + + /// + /// Deflates the current input block with to the given array. + /// + /// + /// The buffer where compressed data is stored + /// + /// + /// The number of compressed bytes added to the output, or 0 if either + /// IsNeedingInput() or IsFinished returns true or length is zero. + /// + public int Deflate(byte[] output) + { + return Deflate(output, 0, output.Length); + } + + /// + /// Deflates the current input block to the given array. + /// + /// + /// Buffer to store the compressed data. + /// + /// + /// Offset into the output array. + /// + /// + /// The maximum number of bytes that may be stored. + /// + /// + /// The number of compressed bytes added to the output, or 0 if either + /// needsInput() or finished() returns true or length is zero. + /// + /// + /// If Finish() was previously called. + /// + /// + /// If offset or length don't match the array length. + /// + public int Deflate(byte[] output, int offset, int length) + { + int origLength = length; + + if (state == CLOSED_STATE) + { + throw new InvalidOperationException("Deflater closed"); + } + + if (state < BUSY_STATE) + { + // output header + int header = (DEFLATED + + ((DeflaterConstants.MAX_WBITS - 8) << 4)) << 8; + int level_flags = (level - 1) >> 1; + if (level_flags < 0 || level_flags > 3) + { + level_flags = 3; + } + header |= level_flags << 6; + if ((state & IS_SETDICT) != 0) + { + // Dictionary was set + header |= DeflaterConstants.PRESET_DICT; + } + header += 31 - (header % 31); + + pending.WriteShortMSB(header); + if ((state & IS_SETDICT) != 0) + { + int chksum = engine.Adler; + engine.ResetAdler(); + pending.WriteShortMSB(chksum >> 16); + pending.WriteShortMSB(chksum & 0xffff); + } + + state = BUSY_STATE | (state & (IS_FLUSHING | IS_FINISHING)); + } + + for (; ; ) + { + int count = pending.Flush(output, offset, length); + offset += count; + totalOut += count; + length -= count; + + if (length == 0 || state == FINISHED_STATE) + { + break; + } + + if (!engine.Deflate((state & IS_FLUSHING) != 0, (state & IS_FINISHING) != 0)) + { + switch (state) + { + case BUSY_STATE: + // We need more input now + return origLength - length; + + case FLUSHING_STATE: + if (level != NO_COMPRESSION) + { + /* We have to supply some lookahead. 8 bit lookahead + * is needed by the zlib inflater, and we must fill + * the next byte, so that all bits are flushed. + */ + int neededbits = 8 + ((-pending.BitCount) & 7); + while (neededbits > 0) + { + /* write a static tree block consisting solely of + * an EOF: + */ + pending.WriteBits(2, 10); + neededbits -= 10; + } + } + state = BUSY_STATE; + break; + + case FINISHING_STATE: + pending.AlignToByte(); + + // Compressed data is complete. Write footer information if required. + if (!noZlibHeaderOrFooter) + { + int adler = engine.Adler; + pending.WriteShortMSB(adler >> 16); + pending.WriteShortMSB(adler & 0xffff); + } + state = FINISHED_STATE; + break; + } + } + } + return origLength - length; + } + + /// + /// Sets the dictionary which should be used in the deflate process. + /// This call is equivalent to setDictionary(dict, 0, dict.Length). + /// + /// + /// the dictionary. + /// + /// + /// if SetInput () or Deflate () were already called or another dictionary was already set. + /// + public void SetDictionary(byte[] dictionary) + { + SetDictionary(dictionary, 0, dictionary.Length); + } + + /// + /// Sets the dictionary which should be used in the deflate process. + /// The dictionary is a byte array containing strings that are + /// likely to occur in the data which should be compressed. The + /// dictionary is not stored in the compressed output, only a + /// checksum. To decompress the output you need to supply the same + /// dictionary again. + /// + /// + /// The dictionary data + /// + /// + /// The index where dictionary information commences. + /// + /// + /// The number of bytes in the dictionary. + /// + /// + /// If SetInput () or Deflate() were already called or another dictionary was already set. + /// + public void SetDictionary(byte[] dictionary, int index, int count) + { + if (state != INIT_STATE) + { + throw new InvalidOperationException(); + } + + state = SETDICT_STATE; + engine.SetDictionary(dictionary, index, count); + } + + #region Instance Fields + + /// + /// Compression level. + /// + private int level; + + /// + /// If true no Zlib/RFC1950 headers or footers are generated + /// + private bool noZlibHeaderOrFooter; + + /// + /// The current state. + /// + private int state; + + /// + /// The total bytes of output written. + /// + private long totalOut; + + /// + /// The pending output. + /// + private DeflaterPending pending; + + /// + /// The deflater engine. + /// + private DeflaterEngine engine; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs.meta new file mode 100644 index 0000000..1bc83ac --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Deflater.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: cc3e5841e8ca64e1dadd4d80bed1f272 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs new file mode 100644 index 0000000..b6d7f29 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs @@ -0,0 +1,145 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// This class contains constants used for deflation. + /// + public static class DeflaterConstants + { + /// + /// Set to true to enable debugging + /// + public const bool DEBUGGING = false; + + /// + /// Written to Zip file to identify a stored block + /// + public const int STORED_BLOCK = 0; + + /// + /// Identifies static tree in Zip file + /// + public const int STATIC_TREES = 1; + + /// + /// Identifies dynamic tree in Zip file + /// + public const int DYN_TREES = 2; + + /// + /// Header flag indicating a preset dictionary for deflation + /// + public const int PRESET_DICT = 0x20; + + /// + /// Sets internal buffer sizes for Huffman encoding + /// + public const int DEFAULT_MEM_LEVEL = 8; + + /// + /// Internal compression engine constant + /// + public const int MAX_MATCH = 258; + + /// + /// Internal compression engine constant + /// + public const int MIN_MATCH = 3; + + /// + /// Internal compression engine constant + /// + public const int MAX_WBITS = 15; + + /// + /// Internal compression engine constant + /// + public const int WSIZE = 1 << MAX_WBITS; + + /// + /// Internal compression engine constant + /// + public const int WMASK = WSIZE - 1; + + /// + /// Internal compression engine constant + /// + public const int HASH_BITS = DEFAULT_MEM_LEVEL + 7; + + /// + /// Internal compression engine constant + /// + public const int HASH_SIZE = 1 << HASH_BITS; + + /// + /// Internal compression engine constant + /// + public const int HASH_MASK = HASH_SIZE - 1; + + /// + /// Internal compression engine constant + /// + public const int HASH_SHIFT = (HASH_BITS + MIN_MATCH - 1) / MIN_MATCH; + + /// + /// Internal compression engine constant + /// + public const int MIN_LOOKAHEAD = MAX_MATCH + MIN_MATCH + 1; + + /// + /// Internal compression engine constant + /// + public const int MAX_DIST = WSIZE - MIN_LOOKAHEAD; + + /// + /// Internal compression engine constant + /// + public const int PENDING_BUF_SIZE = 1 << (DEFAULT_MEM_LEVEL + 8); + + /// + /// Internal compression engine constant + /// + public static int MAX_BLOCK_SIZE = Math.Min(65535, PENDING_BUF_SIZE - 5); + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_STORED = 0; + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_FAST = 1; + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_SLOW = 2; + + /// + /// Internal compression engine constant + /// + public static int[] GOOD_LENGTH = { 0, 4, 4, 4, 4, 8, 8, 8, 32, 32 }; + + /// + /// Internal compression engine constant + /// + public static int[] MAX_LAZY = { 0, 4, 5, 6, 4, 16, 16, 32, 128, 258 }; + + /// + /// Internal compression engine constant + /// + public static int[] NICE_LENGTH = { 0, 8, 16, 32, 16, 32, 128, 128, 258, 258 }; + + /// + /// Internal compression engine constant + /// + public static int[] MAX_CHAIN = { 0, 4, 8, 32, 16, 32, 128, 256, 1024, 4096 }; + + /// + /// Internal compression engine constant + /// + public static int[] COMPR_FUNC = { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2 }; + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs.meta new file mode 100644 index 0000000..d493853 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterConstants.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 575fc516b15214dc6ac89affaa1cc783 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs new file mode 100644 index 0000000..556911c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs @@ -0,0 +1,946 @@ +using ICSharpCode.SharpZipLib.Checksum; +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// Strategies for deflater + /// + public enum DeflateStrategy + { + /// + /// The default strategy + /// + Default = 0, + + /// + /// This strategy will only allow longer string repetitions. It is + /// useful for random data with a small character set. + /// + Filtered = 1, + + /// + /// This strategy will not look for string repetitions at all. It + /// only encodes with Huffman trees (which means, that more common + /// characters get a smaller encoding. + /// + HuffmanOnly = 2 + } + + // DEFLATE ALGORITHM: + // + // The uncompressed stream is inserted into the window array. When + // the window array is full the first half is thrown away and the + // second half is copied to the beginning. + // + // The head array is a hash table. Three characters build a hash value + // and they the value points to the corresponding index in window of + // the last string with this hash. The prev array implements a + // linked list of matches with the same hash: prev[index & WMASK] points + // to the previous index with the same hash. + // + + /// + /// Low level compression engine for deflate algorithm which uses a 32K sliding window + /// with secondary compression from Huffman/Shannon-Fano codes. + /// + public class DeflaterEngine + { + #region Constants + + private const int TooFar = 4096; + + #endregion Constants + + #region Constructors + + /// + /// Construct instance with pending buffer + /// Adler calculation will be performed + /// + /// + /// Pending buffer to use + /// + public DeflaterEngine(DeflaterPending pending) + : this (pending, false) + { + } + + + + /// + /// Construct instance with pending buffer + /// + /// + /// Pending buffer to use + /// + /// + /// If no adler calculation should be performed + /// + public DeflaterEngine(DeflaterPending pending, bool noAdlerCalculation) + { + this.pending = pending; + huffman = new DeflaterHuffman(pending); + if (!noAdlerCalculation) + adler = new Adler32(); + + window = new byte[2 * DeflaterConstants.WSIZE]; + head = new short[DeflaterConstants.HASH_SIZE]; + prev = new short[DeflaterConstants.WSIZE]; + + // We start at index 1, to avoid an implementation deficiency, that + // we cannot build a repeat pattern at index 0. + blockStart = strstart = 1; + } + + #endregion Constructors + + /// + /// Deflate drives actual compression of data + /// + /// True to flush input buffers + /// Finish deflation with the current input. + /// Returns true if progress has been made. + public bool Deflate(bool flush, bool finish) + { + bool progress; + do + { + FillWindow(); + bool canFlush = flush && (inputOff == inputEnd); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.WriteLine("window: [" + blockStart + "," + strstart + "," + + lookahead + "], " + compressionFunction + "," + canFlush); + } +#endif + switch (compressionFunction) + { + case DeflaterConstants.DEFLATE_STORED: + progress = DeflateStored(canFlush, finish); + break; + + case DeflaterConstants.DEFLATE_FAST: + progress = DeflateFast(canFlush, finish); + break; + + case DeflaterConstants.DEFLATE_SLOW: + progress = DeflateSlow(canFlush, finish); + break; + + default: + throw new InvalidOperationException("unknown compressionFunction"); + } + } while (pending.IsFlushed && progress); // repeat while we have no pending output and progress was made + return progress; + } + + /// + /// Sets input data to be deflated. Should only be called when NeedsInput() + /// returns true + /// + /// The buffer containing input data. + /// The offset of the first byte of data. + /// The number of bytes of data to use as input. + public void SetInput(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + if (inputOff < inputEnd) + { + throw new InvalidOperationException("Old input was not completely processed"); + } + + int end = offset + count; + + /* We want to throw an ArrayIndexOutOfBoundsException early. The + * check is very tricky: it also handles integer wrap around. + */ + if ((offset > end) || (end > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + inputBuf = buffer; + inputOff = offset; + inputEnd = end; + } + + /// + /// Determines if more input is needed. + /// + /// Return true if input is needed via SetInput + public bool NeedsInput() + { + return (inputEnd == inputOff); + } + + /// + /// Set compression dictionary + /// + /// The buffer containing the dictionary data + /// The offset in the buffer for the first byte of data + /// The length of the dictionary data. + public void SetDictionary(byte[] buffer, int offset, int length) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (strstart != 1) ) + { + throw new InvalidOperationException("strstart not 1"); + } +#endif + adler?.Update(new ArraySegment(buffer, offset, length)); + if (length < DeflaterConstants.MIN_MATCH) + { + return; + } + + if (length > DeflaterConstants.MAX_DIST) + { + offset += length - DeflaterConstants.MAX_DIST; + length = DeflaterConstants.MAX_DIST; + } + + System.Array.Copy(buffer, offset, window, strstart, length); + + UpdateHash(); + --length; + while (--length > 0) + { + InsertString(); + strstart++; + } + strstart += 2; + blockStart = strstart; + } + + /// + /// Reset internal state + /// + public void Reset() + { + huffman.Reset(); + adler?.Reset(); + blockStart = strstart = 1; + lookahead = 0; + totalIn = 0; + prevAvailable = false; + matchLen = DeflaterConstants.MIN_MATCH - 1; + + for (int i = 0; i < DeflaterConstants.HASH_SIZE; i++) + { + head[i] = 0; + } + + for (int i = 0; i < DeflaterConstants.WSIZE; i++) + { + prev[i] = 0; + } + } + + /// + /// Reset Adler checksum + /// + public void ResetAdler() + { + adler?.Reset(); + } + + /// + /// Get current value of Adler checksum + /// + public int Adler + { + get + { + return (adler != null) ? unchecked((int)adler.Value) : 0; + } + } + + /// + /// Total data processed + /// + public long TotalIn + { + get + { + return totalIn; + } + } + + /// + /// Get/set the deflate strategy + /// + public DeflateStrategy Strategy + { + get + { + return strategy; + } + set + { + strategy = value; + } + } + + /// + /// Set the deflate level (0-9) + /// + /// The value to set the level to. + public void SetLevel(int level) + { + if ((level < 0) || (level > 9)) + { + throw new ArgumentOutOfRangeException(nameof(level)); + } + + goodLength = DeflaterConstants.GOOD_LENGTH[level]; + max_lazy = DeflaterConstants.MAX_LAZY[level]; + niceLength = DeflaterConstants.NICE_LENGTH[level]; + max_chain = DeflaterConstants.MAX_CHAIN[level]; + + if (DeflaterConstants.COMPR_FUNC[level] != compressionFunction) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.WriteLine("Change from " + compressionFunction + " to " + + DeflaterConstants.COMPR_FUNC[level]); + } +#endif + switch (compressionFunction) + { + case DeflaterConstants.DEFLATE_STORED: + if (strstart > blockStart) + { + huffman.FlushStoredBlock(window, blockStart, + strstart - blockStart, false); + blockStart = strstart; + } + UpdateHash(); + break; + + case DeflaterConstants.DEFLATE_FAST: + if (strstart > blockStart) + { + huffman.FlushBlock(window, blockStart, strstart - blockStart, + false); + blockStart = strstart; + } + break; + + case DeflaterConstants.DEFLATE_SLOW: + if (prevAvailable) + { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + if (strstart > blockStart) + { + huffman.FlushBlock(window, blockStart, strstart - blockStart, false); + blockStart = strstart; + } + prevAvailable = false; + matchLen = DeflaterConstants.MIN_MATCH - 1; + break; + } + compressionFunction = DeflaterConstants.COMPR_FUNC[level]; + } + } + + /// + /// Fill the window + /// + public void FillWindow() + { + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (strstart >= DeflaterConstants.WSIZE + DeflaterConstants.MAX_DIST) + { + SlideWindow(); + } + + /* If there is not enough lookahead, but still some input left, + * read in the input + */ + if (lookahead < DeflaterConstants.MIN_LOOKAHEAD && inputOff < inputEnd) + { + int more = 2 * DeflaterConstants.WSIZE - lookahead - strstart; + + if (more > inputEnd - inputOff) + { + more = inputEnd - inputOff; + } + + System.Array.Copy(inputBuf, inputOff, window, strstart + lookahead, more); + adler?.Update(new ArraySegment(inputBuf, inputOff, more)); + + inputOff += more; + totalIn += more; + lookahead += more; + } + + if (lookahead >= DeflaterConstants.MIN_MATCH) + { + UpdateHash(); + } + } + + private void UpdateHash() + { + /* + if (DEBUGGING) { + Console.WriteLine("updateHash: "+strstart); + } + */ + ins_h = (window[strstart] << DeflaterConstants.HASH_SHIFT) ^ window[strstart + 1]; + } + + /// + /// Inserts the current string in the head hash and returns the previous + /// value for this hash. + /// + /// The previous hash value + private int InsertString() + { + short match; + int hash = ((ins_h << DeflaterConstants.HASH_SHIFT) ^ window[strstart + (DeflaterConstants.MIN_MATCH - 1)]) & DeflaterConstants.HASH_MASK; + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + if (hash != (((window[strstart] << (2*HASH_SHIFT)) ^ + (window[strstart + 1] << HASH_SHIFT) ^ + (window[strstart + 2])) & HASH_MASK)) { + throw new SharpZipBaseException("hash inconsistent: " + hash + "/" + +window[strstart] + "," + +window[strstart + 1] + "," + +window[strstart + 2] + "," + HASH_SHIFT); + } + } +#endif + prev[strstart & DeflaterConstants.WMASK] = match = head[hash]; + head[hash] = unchecked((short)strstart); + ins_h = hash; + return match & 0xffff; + } + + private void SlideWindow() + { + Array.Copy(window, DeflaterConstants.WSIZE, window, 0, DeflaterConstants.WSIZE); + matchStart -= DeflaterConstants.WSIZE; + strstart -= DeflaterConstants.WSIZE; + blockStart -= DeflaterConstants.WSIZE; + + // Slide the hash table (could be avoided with 32 bit values + // at the expense of memory usage). + for (int i = 0; i < DeflaterConstants.HASH_SIZE; ++i) + { + int m = head[i] & 0xffff; + head[i] = (short)(m >= DeflaterConstants.WSIZE ? (m - DeflaterConstants.WSIZE) : 0); + } + + // Slide the prev table. + for (int i = 0; i < DeflaterConstants.WSIZE; i++) + { + int m = prev[i] & 0xffff; + prev[i] = (short)(m >= DeflaterConstants.WSIZE ? (m - DeflaterConstants.WSIZE) : 0); + } + } + + /// + /// Find the best (longest) string in the window matching the + /// string starting at strstart. + /// + /// Preconditions: + /// + /// strstart + DeflaterConstants.MAX_MATCH <= window.length. + /// + /// + /// True if a match greater than the minimum length is found + private bool FindLongestMatch(int curMatch) + { + int match; + int scan = strstart; + // scanMax is the highest position that we can look at + int scanMax = scan + Math.Min(DeflaterConstants.MAX_MATCH, lookahead) - 1; + int limit = Math.Max(scan - DeflaterConstants.MAX_DIST, 0); + + byte[] window = this.window; + short[] prev = this.prev; + int chainLength = this.max_chain; + int niceLength = Math.Min(this.niceLength, lookahead); + + matchLen = Math.Max(matchLen, DeflaterConstants.MIN_MATCH - 1); + + if (scan + matchLen > scanMax) return false; + + byte scan_end1 = window[scan + matchLen - 1]; + byte scan_end = window[scan + matchLen]; + + // Do not waste too much time if we already have a good match: + if (matchLen >= this.goodLength) chainLength >>= 2; + + do + { + match = curMatch; + scan = strstart; + + if (window[match + matchLen] != scan_end + || window[match + matchLen - 1] != scan_end1 + || window[match] != window[scan] + || window[++match] != window[++scan]) + { + continue; + } + + // scan is set to strstart+1 and the comparison passed, so + // scanMax - scan is the maximum number of bytes we can compare. + // below we compare 8 bytes at a time, so first we compare + // (scanMax - scan) % 8 bytes, so the remainder is a multiple of 8 + + switch ((scanMax - scan) % 8) + { + case 1: + if (window[++scan] == window[++match]) break; + break; + + case 2: + if (window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + + case 3: + if (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + + case 4: + if (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + + case 5: + if (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + + case 6: + if (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + + case 7: + if (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]) break; + break; + } + + if (window[scan] == window[match]) + { + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart + 258 unless lookahead is + * exhausted first. + */ + do + { + if (scan == scanMax) + { + ++scan; // advance to first position not matched + ++match; + + break; + } + } + while (window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match] + && window[++scan] == window[++match]); + } + + if (scan - strstart > matchLen) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (ins_h == 0) ) + Console.Error.WriteLine("Found match: " + curMatch + "-" + (scan - strstart)); +#endif + + matchStart = curMatch; + matchLen = scan - strstart; + + if (matchLen >= niceLength) + break; + + scan_end1 = window[scan - 1]; + scan_end = window[scan]; + } + } while ((curMatch = (prev[curMatch & DeflaterConstants.WMASK] & 0xffff)) > limit && 0 != --chainLength); + + return matchLen >= DeflaterConstants.MIN_MATCH; + } + + private bool DeflateStored(bool flush, bool finish) + { + if (!flush && (lookahead == 0)) + { + return false; + } + + strstart += lookahead; + lookahead = 0; + + int storedLength = strstart - blockStart; + + if ((storedLength >= DeflaterConstants.MAX_BLOCK_SIZE) || // Block is full + (blockStart < DeflaterConstants.WSIZE && storedLength >= DeflaterConstants.MAX_DIST) || // Block may move out of window + flush) + { + bool lastBlock = finish; + if (storedLength > DeflaterConstants.MAX_BLOCK_SIZE) + { + storedLength = DeflaterConstants.MAX_BLOCK_SIZE; + lastBlock = false; + } + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + Console.WriteLine("storedBlock[" + storedLength + "," + lastBlock + "]"); + } +#endif + + huffman.FlushStoredBlock(window, blockStart, storedLength, lastBlock); + blockStart += storedLength; + return !(lastBlock || storedLength == 0); + } + return true; + } + + private bool DeflateFast(bool flush, bool finish) + { + if (lookahead < DeflaterConstants.MIN_LOOKAHEAD && !flush) + { + return false; + } + + while (lookahead >= DeflaterConstants.MIN_LOOKAHEAD || flush) + { + if (lookahead == 0) + { + // We are flushing everything + huffman.FlushBlock(window, blockStart, strstart - blockStart, finish); + blockStart = strstart; + return false; + } + + if (strstart > 2 * DeflaterConstants.WSIZE - DeflaterConstants.MIN_LOOKAHEAD) + { + /* slide window, as FindLongestMatch needs this. + * This should only happen when flushing and the window + * is almost full. + */ + SlideWindow(); + } + + int hashHead; + if (lookahead >= DeflaterConstants.MIN_MATCH && + (hashHead = InsertString()) != 0 && + strategy != DeflateStrategy.HuffmanOnly && + strstart - hashHead <= DeflaterConstants.MAX_DIST && + FindLongestMatch(hashHead)) + { + // longestMatch sets matchStart and matchLen +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + for (int i = 0 ; i < matchLen; i++) { + if (window[strstart + i] != window[matchStart + i]) { + throw new SharpZipBaseException("Match failure"); + } + } + } +#endif + + bool full = huffman.TallyDist(strstart - matchStart, matchLen); + + lookahead -= matchLen; + if (matchLen <= max_lazy && lookahead >= DeflaterConstants.MIN_MATCH) + { + while (--matchLen > 0) + { + ++strstart; + InsertString(); + } + ++strstart; + } + else + { + strstart += matchLen; + if (lookahead >= DeflaterConstants.MIN_MATCH - 1) + { + UpdateHash(); + } + } + matchLen = DeflaterConstants.MIN_MATCH - 1; + if (!full) + { + continue; + } + } + else + { + // No match found + huffman.TallyLit(window[strstart] & 0xff); + ++strstart; + --lookahead; + } + + if (huffman.IsFull()) + { + bool lastBlock = finish && (lookahead == 0); + huffman.FlushBlock(window, blockStart, strstart - blockStart, lastBlock); + blockStart = strstart; + return !lastBlock; + } + } + return true; + } + + private bool DeflateSlow(bool flush, bool finish) + { + if (lookahead < DeflaterConstants.MIN_LOOKAHEAD && !flush) + { + return false; + } + + while (lookahead >= DeflaterConstants.MIN_LOOKAHEAD || flush) + { + if (lookahead == 0) + { + if (prevAvailable) + { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + prevAvailable = false; + + // We are flushing everything +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && !flush) + { + throw new SharpZipBaseException("Not flushing, but no lookahead"); + } +#endif + huffman.FlushBlock(window, blockStart, strstart - blockStart, + finish); + blockStart = strstart; + return false; + } + + if (strstart >= 2 * DeflaterConstants.WSIZE - DeflaterConstants.MIN_LOOKAHEAD) + { + /* slide window, as FindLongestMatch needs this. + * This should only happen when flushing and the window + * is almost full. + */ + SlideWindow(); + } + + int prevMatch = matchStart; + int prevLen = matchLen; + if (lookahead >= DeflaterConstants.MIN_MATCH) + { + int hashHead = InsertString(); + + if (strategy != DeflateStrategy.HuffmanOnly && + hashHead != 0 && + strstart - hashHead <= DeflaterConstants.MAX_DIST && + FindLongestMatch(hashHead)) + { + // longestMatch sets matchStart and matchLen + + // Discard match if too small and too far away + if (matchLen <= 5 && (strategy == DeflateStrategy.Filtered || (matchLen == DeflaterConstants.MIN_MATCH && strstart - matchStart > TooFar))) + { + matchLen = DeflaterConstants.MIN_MATCH - 1; + } + } + } + + // previous match was better + if ((prevLen >= DeflaterConstants.MIN_MATCH) && (matchLen <= prevLen)) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + for (int i = 0 ; i < matchLen; i++) { + if (window[strstart-1+i] != window[prevMatch + i]) + throw new SharpZipBaseException(); + } + } +#endif + huffman.TallyDist(strstart - 1 - prevMatch, prevLen); + prevLen -= 2; + do + { + strstart++; + lookahead--; + if (lookahead >= DeflaterConstants.MIN_MATCH) + { + InsertString(); + } + } while (--prevLen > 0); + + strstart++; + lookahead--; + prevAvailable = false; + matchLen = DeflaterConstants.MIN_MATCH - 1; + } + else + { + if (prevAvailable) + { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + prevAvailable = true; + strstart++; + lookahead--; + } + + if (huffman.IsFull()) + { + int len = strstart - blockStart; + if (prevAvailable) + { + len--; + } + bool lastBlock = (finish && (lookahead == 0) && !prevAvailable); + huffman.FlushBlock(window, blockStart, len, lastBlock); + blockStart += len; + return !lastBlock; + } + } + return true; + } + + #region Instance Fields + + // Hash index of string to be inserted + private int ins_h; + + /// + /// Hashtable, hashing three characters to an index for window, so + /// that window[index]..window[index+2] have this hash code. + /// Note that the array should really be unsigned short, so you need + /// to and the values with 0xffff. + /// + private short[] head; + + /// + /// prev[index & WMASK] points to the previous index that has the + /// same hash code as the string starting at index. This way + /// entries with the same hash code are in a linked list. + /// Note that the array should really be unsigned short, so you need + /// to and the values with 0xffff. + /// + private short[] prev; + + private int matchStart; + + // Length of best match + private int matchLen; + + // Set if previous match exists + private bool prevAvailable; + + private int blockStart; + + /// + /// Points to the current character in the window. + /// + private int strstart; + + /// + /// lookahead is the number of characters starting at strstart in + /// window that are valid. + /// So window[strstart] until window[strstart+lookahead-1] are valid + /// characters. + /// + private int lookahead; + + /// + /// This array contains the part of the uncompressed stream that + /// is of relevance. The current character is indexed by strstart. + /// + private byte[] window; + + private DeflateStrategy strategy; + private int max_chain, max_lazy, niceLength, goodLength; + + /// + /// The current compression function. + /// + private int compressionFunction; + + /// + /// The input data for compression. + /// + private byte[] inputBuf; + + /// + /// The total bytes of input read. + /// + private long totalIn; + + /// + /// The offset into inputBuf, where input data starts. + /// + private int inputOff; + + /// + /// The end offset of the input data. + /// + private int inputEnd; + + private DeflaterPending pending; + private DeflaterHuffman huffman; + + /// + /// The adler checksum + /// + private Adler32 adler; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs.meta new file mode 100644 index 0000000..731661a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterEngine.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 66d95b06937154c5e9acab1a061fc0cb +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs new file mode 100644 index 0000000..2f71366 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs @@ -0,0 +1,959 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// This is the DeflaterHuffman class. + /// + /// This class is not thread safe. This is inherent in the API, due + /// to the split of Deflate and SetInput. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class DeflaterHuffman + { + private const int BUFSIZE = 1 << (DeflaterConstants.DEFAULT_MEM_LEVEL + 6); + private const int LITERAL_NUM = 286; + + // Number of distance codes + private const int DIST_NUM = 30; + + // Number of codes used to transfer bit lengths + private const int BITLEN_NUM = 19; + + // repeat previous bit length 3-6 times (2 bits of repeat count) + private const int REP_3_6 = 16; + + // repeat a zero length 3-10 times (3 bits of repeat count) + private const int REP_3_10 = 17; + + // repeat a zero length 11-138 times (7 bits of repeat count) + private const int REP_11_138 = 18; + + private const int EOF_SYMBOL = 256; + + // The lengths of the bit length codes are sent in order of decreasing + // probability, to avoid transmitting the lengths for unused bit length codes. + private static readonly int[] BL_ORDER = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + + private static readonly byte[] bit4Reverse = { + 0, + 8, + 4, + 12, + 2, + 10, + 6, + 14, + 1, + 9, + 5, + 13, + 3, + 11, + 7, + 15 + }; + + private static short[] staticLCodes; + private static byte[] staticLLength; + private static short[] staticDCodes; + private static byte[] staticDLength; + + private class Tree + { + #region Instance Fields + + public short[] freqs; + + public byte[] length; + + public int minNumCodes; + + public int numCodes; + + private short[] codes; + private readonly int[] bl_counts; + private readonly int maxLength; + private DeflaterHuffman dh; + + #endregion Instance Fields + + #region Constructors + + public Tree(DeflaterHuffman dh, int elems, int minCodes, int maxLength) + { + this.dh = dh; + this.minNumCodes = minCodes; + this.maxLength = maxLength; + freqs = new short[elems]; + bl_counts = new int[maxLength]; + } + + #endregion Constructors + + /// + /// Resets the internal state of the tree + /// + public void Reset() + { + for (int i = 0; i < freqs.Length; i++) + { + freqs[i] = 0; + } + codes = null; + length = null; + } + + public void WriteSymbol(int code) + { + // if (DeflaterConstants.DEBUGGING) { + // freqs[code]--; + // // Console.Write("writeSymbol("+freqs.length+","+code+"): "); + // } + dh.pending.WriteBits(codes[code] & 0xffff, length[code]); + } + + /// + /// Check that all frequencies are zero + /// + /// + /// At least one frequency is non-zero + /// + public void CheckEmpty() + { + bool empty = true; + for (int i = 0; i < freqs.Length; i++) + { + empty &= freqs[i] == 0; + } + + if (!empty) + { + throw new SharpZipBaseException("!Empty"); + } + } + + /// + /// Set static codes and length + /// + /// new codes + /// length for new codes + public void SetStaticCodes(short[] staticCodes, byte[] staticLengths) + { + codes = staticCodes; + length = staticLengths; + } + + /// + /// Build dynamic codes and lengths + /// + public void BuildCodes() + { + int numSymbols = freqs.Length; + int[] nextCode = new int[maxLength]; + int code = 0; + + codes = new short[freqs.Length]; + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("buildCodes: "+freqs.Length); + // } + + for (int bits = 0; bits < maxLength; bits++) + { + nextCode[bits] = code; + code += bl_counts[bits] << (15 - bits); + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("bits: " + ( bits + 1) + " count: " + bl_counts[bits] + // +" nextCode: "+code); + // } + } + +#if DebugDeflation + if ( DeflaterConstants.DEBUGGING && (code != 65536) ) + { + throw new SharpZipBaseException("Inconsistent bl_counts!"); + } +#endif + for (int i = 0; i < numCodes; i++) + { + int bits = length[i]; + if (bits > 0) + { + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("codes["+i+"] = rev(" + nextCode[bits-1]+"), + // +bits); + // } + + codes[i] = BitReverse(nextCode[bits - 1]); + nextCode[bits - 1] += 1 << (16 - bits); + } + } + } + + public void BuildTree() + { + int numSymbols = freqs.Length; + + /* heap is a priority queue, sorted by frequency, least frequent + * nodes first. The heap is a binary tree, with the property, that + * the parent node is smaller than both child nodes. This assures + * that the smallest node is the first parent. + * + * The binary tree is encoded in an array: 0 is root node and + * the nodes 2*n+1, 2*n+2 are the child nodes of node n. + */ + int[] heap = new int[numSymbols]; + int heapLen = 0; + int maxCode = 0; + for (int n = 0; n < numSymbols; n++) + { + int freq = freqs[n]; + if (freq != 0) + { + // Insert n into heap + int pos = heapLen++; + int ppos; + while (pos > 0 && freqs[heap[ppos = (pos - 1) / 2]] > freq) + { + heap[pos] = heap[ppos]; + pos = ppos; + } + heap[pos] = n; + + maxCode = n; + } + } + + /* We could encode a single literal with 0 bits but then we + * don't see the literals. Therefore we force at least two + * literals to avoid this case. We don't care about order in + * this case, both literals get a 1 bit code. + */ + while (heapLen < 2) + { + int node = maxCode < 2 ? ++maxCode : 0; + heap[heapLen++] = node; + } + + numCodes = Math.Max(maxCode + 1, minNumCodes); + + int numLeafs = heapLen; + int[] childs = new int[4 * heapLen - 2]; + int[] values = new int[2 * heapLen - 1]; + int numNodes = numLeafs; + for (int i = 0; i < heapLen; i++) + { + int node = heap[i]; + childs[2 * i] = node; + childs[2 * i + 1] = -1; + values[i] = freqs[node] << 8; + heap[i] = i; + } + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + do + { + int first = heap[0]; + int last = heap[--heapLen]; + + // Propagate the hole to the leafs of the heap + int ppos = 0; + int path = 1; + + while (path < heapLen) + { + if (path + 1 < heapLen && values[heap[path]] > values[heap[path + 1]]) + { + path++; + } + + heap[ppos] = heap[path]; + ppos = path; + path = path * 2 + 1; + } + + /* Now propagate the last element down along path. Normally + * it shouldn't go too deep. + */ + int lastVal = values[last]; + while ((path = ppos) > 0 && values[heap[ppos = (path - 1) / 2]] > lastVal) + { + heap[path] = heap[ppos]; + } + heap[path] = last; + + int second = heap[0]; + + // Create a new node father of first and second + last = numNodes++; + childs[2 * last] = first; + childs[2 * last + 1] = second; + int mindepth = Math.Min(values[first] & 0xff, values[second] & 0xff); + values[last] = lastVal = values[first] + values[second] - mindepth + 1; + + // Again, propagate the hole to the leafs + ppos = 0; + path = 1; + + while (path < heapLen) + { + if (path + 1 < heapLen && values[heap[path]] > values[heap[path + 1]]) + { + path++; + } + + heap[ppos] = heap[path]; + ppos = path; + path = ppos * 2 + 1; + } + + // Now propagate the new element down along path + while ((path = ppos) > 0 && values[heap[ppos = (path - 1) / 2]] > lastVal) + { + heap[path] = heap[ppos]; + } + heap[path] = last; + } while (heapLen > 1); + + if (heap[0] != childs.Length / 2 - 1) + { + throw new SharpZipBaseException("Heap invariant violated"); + } + + BuildLength(childs); + } + + /// + /// Get encoded length + /// + /// Encoded length, the sum of frequencies * lengths + public int GetEncodedLength() + { + int len = 0; + for (int i = 0; i < freqs.Length; i++) + { + len += freqs[i] * length[i]; + } + return len; + } + + /// + /// Scan a literal or distance tree to determine the frequencies of the codes + /// in the bit length tree. + /// + public void CalcBLFreq(Tree blTree) + { + int max_count; /* max repeat count */ + int min_count; /* min repeat count */ + int count; /* repeat count of the current code */ + int curlen = -1; /* length of current code */ + + int i = 0; + while (i < numCodes) + { + count = 1; + int nextlen = length[i]; + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + else + { + max_count = 6; + min_count = 3; + if (curlen != nextlen) + { + blTree.freqs[nextlen]++; + count = 0; + } + } + curlen = nextlen; + i++; + + while (i < numCodes && curlen == length[i]) + { + i++; + if (++count >= max_count) + { + break; + } + } + + if (count < min_count) + { + blTree.freqs[curlen] += (short)count; + } + else if (curlen != 0) + { + blTree.freqs[REP_3_6]++; + } + else if (count <= 10) + { + blTree.freqs[REP_3_10]++; + } + else + { + blTree.freqs[REP_11_138]++; + } + } + } + + /// + /// Write tree values + /// + /// Tree to write + public void WriteTree(Tree blTree) + { + int max_count; // max repeat count + int min_count; // min repeat count + int count; // repeat count of the current code + int curlen = -1; // length of current code + + int i = 0; + while (i < numCodes) + { + count = 1; + int nextlen = length[i]; + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + else + { + max_count = 6; + min_count = 3; + if (curlen != nextlen) + { + blTree.WriteSymbol(nextlen); + count = 0; + } + } + curlen = nextlen; + i++; + + while (i < numCodes && curlen == length[i]) + { + i++; + if (++count >= max_count) + { + break; + } + } + + if (count < min_count) + { + while (count-- > 0) + { + blTree.WriteSymbol(curlen); + } + } + else if (curlen != 0) + { + blTree.WriteSymbol(REP_3_6); + dh.pending.WriteBits(count - 3, 2); + } + else if (count <= 10) + { + blTree.WriteSymbol(REP_3_10); + dh.pending.WriteBits(count - 3, 3); + } + else + { + blTree.WriteSymbol(REP_11_138); + dh.pending.WriteBits(count - 11, 7); + } + } + } + + private void BuildLength(int[] childs) + { + this.length = new byte[freqs.Length]; + int numNodes = childs.Length / 2; + int numLeafs = (numNodes + 1) / 2; + int overflow = 0; + + for (int i = 0; i < maxLength; i++) + { + bl_counts[i] = 0; + } + + // First calculate optimal bit lengths + int[] lengths = new int[numNodes]; + lengths[numNodes - 1] = 0; + + for (int i = numNodes - 1; i >= 0; i--) + { + if (childs[2 * i + 1] != -1) + { + int bitLength = lengths[i] + 1; + if (bitLength > maxLength) + { + bitLength = maxLength; + overflow++; + } + lengths[childs[2 * i]] = lengths[childs[2 * i + 1]] = bitLength; + } + else + { + // A leaf node + int bitLength = lengths[i]; + bl_counts[bitLength - 1]++; + this.length[childs[2 * i]] = (byte)lengths[i]; + } + } + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Tree "+freqs.Length+" lengths:"); + // for (int i=0; i < numLeafs; i++) { + // //Console.WriteLine("Node "+childs[2*i]+" freq: "+freqs[childs[2*i]] + // + " len: "+length[childs[2*i]]); + // } + // } + + if (overflow == 0) + { + return; + } + + int incrBitLen = maxLength - 1; + do + { + // Find the first bit length which could increase: + while (bl_counts[--incrBitLen] == 0) + { + } + + // Move this node one down and remove a corresponding + // number of overflow nodes. + do + { + bl_counts[incrBitLen]--; + bl_counts[++incrBitLen]++; + overflow -= 1 << (maxLength - 1 - incrBitLen); + } while (overflow > 0 && incrBitLen < maxLength - 1); + } while (overflow > 0); + + /* We may have overshot above. Move some nodes from maxLength to + * maxLength-1 in that case. + */ + bl_counts[maxLength - 1] += overflow; + bl_counts[maxLength - 2] -= overflow; + + /* Now recompute all bit lengths, scanning in increasing + * frequency. It is simpler to reconstruct all lengths instead of + * fixing only the wrong ones. This idea is taken from 'ar' + * written by Haruhiko Okumura. + * + * The nodes were inserted with decreasing frequency into the childs + * array. + */ + int nodePtr = 2 * numLeafs; + for (int bits = maxLength; bits != 0; bits--) + { + int n = bl_counts[bits - 1]; + while (n > 0) + { + int childPtr = 2 * childs[nodePtr++]; + if (childs[childPtr + 1] == -1) + { + // We found another leaf + length[childs[childPtr]] = (byte)bits; + n--; + } + } + } + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("*** After overflow elimination. ***"); + // for (int i=0; i < numLeafs; i++) { + // //Console.WriteLine("Node "+childs[2*i]+" freq: "+freqs[childs[2*i]] + // + " len: "+length[childs[2*i]]); + // } + // } + } + } + + #region Instance Fields + + /// + /// Pending buffer to use + /// + public DeflaterPending pending; + + private Tree literalTree; + private Tree distTree; + private Tree blTree; + + // Buffer for distances + private short[] d_buf; + + private byte[] l_buf; + private int last_lit; + private int extra_bits; + + #endregion Instance Fields + + static DeflaterHuffman() + { + // See RFC 1951 3.2.6 + // Literal codes + staticLCodes = new short[LITERAL_NUM]; + staticLLength = new byte[LITERAL_NUM]; + + int i = 0; + while (i < 144) + { + staticLCodes[i] = BitReverse((0x030 + i) << 8); + staticLLength[i++] = 8; + } + + while (i < 256) + { + staticLCodes[i] = BitReverse((0x190 - 144 + i) << 7); + staticLLength[i++] = 9; + } + + while (i < 280) + { + staticLCodes[i] = BitReverse((0x000 - 256 + i) << 9); + staticLLength[i++] = 7; + } + + while (i < LITERAL_NUM) + { + staticLCodes[i] = BitReverse((0x0c0 - 280 + i) << 8); + staticLLength[i++] = 8; + } + + // Distance codes + staticDCodes = new short[DIST_NUM]; + staticDLength = new byte[DIST_NUM]; + for (i = 0; i < DIST_NUM; i++) + { + staticDCodes[i] = BitReverse(i << 11); + staticDLength[i] = 5; + } + } + + /// + /// Construct instance with pending buffer + /// + /// Pending buffer to use + public DeflaterHuffman(DeflaterPending pending) + { + this.pending = pending; + + literalTree = new Tree(this, LITERAL_NUM, 257, 15); + distTree = new Tree(this, DIST_NUM, 1, 15); + blTree = new Tree(this, BITLEN_NUM, 4, 7); + + d_buf = new short[BUFSIZE]; + l_buf = new byte[BUFSIZE]; + } + + /// + /// Reset internal state + /// + public void Reset() + { + last_lit = 0; + extra_bits = 0; + literalTree.Reset(); + distTree.Reset(); + blTree.Reset(); + } + + /// + /// Write all trees to pending buffer + /// + /// The number/rank of treecodes to send. + public void SendAllTrees(int blTreeCodes) + { + blTree.BuildCodes(); + literalTree.BuildCodes(); + distTree.BuildCodes(); + pending.WriteBits(literalTree.numCodes - 257, 5); + pending.WriteBits(distTree.numCodes - 1, 5); + pending.WriteBits(blTreeCodes - 4, 4); + for (int rank = 0; rank < blTreeCodes; rank++) + { + pending.WriteBits(blTree.length[BL_ORDER[rank]], 3); + } + literalTree.WriteTree(blTree); + distTree.WriteTree(blTree); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + blTree.CheckEmpty(); + } +#endif + } + + /// + /// Compress current buffer writing data to pending buffer + /// + public void CompressBlock() + { + for (int i = 0; i < last_lit; i++) + { + int litlen = l_buf[i] & 0xff; + int dist = d_buf[i]; + if (dist-- != 0) + { + // if (DeflaterConstants.DEBUGGING) { + // Console.Write("["+(dist+1)+","+(litlen+3)+"]: "); + // } + + int lc = Lcode(litlen); + literalTree.WriteSymbol(lc); + + int bits = (lc - 261) / 4; + if (bits > 0 && bits <= 5) + { + pending.WriteBits(litlen & ((1 << bits) - 1), bits); + } + + int dc = Dcode(dist); + distTree.WriteSymbol(dc); + + bits = dc / 2 - 1; + if (bits > 0) + { + pending.WriteBits(dist & ((1 << bits) - 1), bits); + } + } + else + { + // if (DeflaterConstants.DEBUGGING) { + // if (litlen > 32 && litlen < 127) { + // Console.Write("("+(char)litlen+"): "); + // } else { + // Console.Write("{"+litlen+"}: "); + // } + // } + literalTree.WriteSymbol(litlen); + } + } + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.Write("EOF: "); + } +#endif + literalTree.WriteSymbol(EOF_SYMBOL); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + literalTree.CheckEmpty(); + distTree.CheckEmpty(); + } +#endif + } + + /// + /// Flush block to output with no compression + /// + /// Data to write + /// Index of first byte to write + /// Count of bytes to write + /// True if this is the last block + public void FlushStoredBlock(byte[] stored, int storedOffset, int storedLength, bool lastBlock) + { +#if DebugDeflation + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Flushing stored block "+ storedLength); + // } +#endif + pending.WriteBits((DeflaterConstants.STORED_BLOCK << 1) + (lastBlock ? 1 : 0), 3); + pending.AlignToByte(); + pending.WriteShort(storedLength); + pending.WriteShort(~storedLength); + pending.WriteBlock(stored, storedOffset, storedLength); + Reset(); + } + + /// + /// Flush block to output with compression + /// + /// Data to flush + /// Index of first byte to flush + /// Count of bytes to flush + /// True if this is the last block + public void FlushBlock(byte[] stored, int storedOffset, int storedLength, bool lastBlock) + { + literalTree.freqs[EOF_SYMBOL]++; + + // Build trees + literalTree.BuildTree(); + distTree.BuildTree(); + + // Calculate bitlen frequency + literalTree.CalcBLFreq(blTree); + distTree.CalcBLFreq(blTree); + + // Build bitlen tree + blTree.BuildTree(); + + int blTreeCodes = 4; + for (int i = 18; i > blTreeCodes; i--) + { + if (blTree.length[BL_ORDER[i]] > 0) + { + blTreeCodes = i + 1; + } + } + int opt_len = 14 + blTreeCodes * 3 + blTree.GetEncodedLength() + + literalTree.GetEncodedLength() + distTree.GetEncodedLength() + + extra_bits; + + int static_len = extra_bits; + for (int i = 0; i < LITERAL_NUM; i++) + { + static_len += literalTree.freqs[i] * staticLLength[i]; + } + for (int i = 0; i < DIST_NUM; i++) + { + static_len += distTree.freqs[i] * staticDLength[i]; + } + if (opt_len >= static_len) + { + // Force static trees + opt_len = static_len; + } + + if (storedOffset >= 0 && storedLength + 4 < opt_len >> 3) + { + // Store Block + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Storing, since " + storedLength + " < " + opt_len + // + " <= " + static_len); + // } + FlushStoredBlock(stored, storedOffset, storedLength, lastBlock); + } + else if (opt_len == static_len) + { + // Encode with static tree + pending.WriteBits((DeflaterConstants.STATIC_TREES << 1) + (lastBlock ? 1 : 0), 3); + literalTree.SetStaticCodes(staticLCodes, staticLLength); + distTree.SetStaticCodes(staticDCodes, staticDLength); + CompressBlock(); + Reset(); + } + else + { + // Encode with dynamic tree + pending.WriteBits((DeflaterConstants.DYN_TREES << 1) + (lastBlock ? 1 : 0), 3); + SendAllTrees(blTreeCodes); + CompressBlock(); + Reset(); + } + } + + /// + /// Get value indicating if internal buffer is full + /// + /// true if buffer is full + public bool IsFull() + { + return last_lit >= BUFSIZE; + } + + /// + /// Add literal to buffer + /// + /// Literal value to add to buffer. + /// Value indicating internal buffer is full + public bool TallyLit(int literal) + { + // if (DeflaterConstants.DEBUGGING) { + // if (lit > 32 && lit < 127) { + // //Console.WriteLine("("+(char)lit+")"); + // } else { + // //Console.WriteLine("{"+lit+"}"); + // } + // } + d_buf[last_lit] = 0; + l_buf[last_lit++] = (byte)literal; + literalTree.freqs[literal]++; + return IsFull(); + } + + /// + /// Add distance code and length to literal and distance trees + /// + /// Distance code + /// Length + /// Value indicating if internal buffer is full + public bool TallyDist(int distance, int length) + { + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("[" + distance + "," + length + "]"); + // } + + d_buf[last_lit] = (short)distance; + l_buf[last_lit++] = (byte)(length - 3); + + int lc = Lcode(length - 3); + literalTree.freqs[lc]++; + if (lc >= 265 && lc < 285) + { + extra_bits += (lc - 261) / 4; + } + + int dc = Dcode(distance - 1); + distTree.freqs[dc]++; + if (dc >= 4) + { + extra_bits += dc / 2 - 1; + } + return IsFull(); + } + + /// + /// Reverse the bits of a 16 bit value. + /// + /// Value to reverse bits + /// Value with bits reversed + public static short BitReverse(int toReverse) + { + return (short)(bit4Reverse[toReverse & 0xF] << 12 | + bit4Reverse[(toReverse >> 4) & 0xF] << 8 | + bit4Reverse[(toReverse >> 8) & 0xF] << 4 | + bit4Reverse[toReverse >> 12]); + } + + private static int Lcode(int length) + { + if (length == 255) + { + return 285; + } + + int code = 257; + while (length >= 8) + { + code += 4; + length >>= 1; + } + return code + length; + } + + private static int Dcode(int distance) + { + int code = 0; + while (distance >= 4) + { + code += 2; + distance >>= 1; + } + return code + distance; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs.meta new file mode 100644 index 0000000..9688ae3 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterHuffman.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: ff8a41dc2774943e4a9e8ddac81bf587 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs new file mode 100644 index 0000000..80d3e21 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs @@ -0,0 +1,17 @@ +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// This class stores the pending output of the Deflater. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class DeflaterPending : PendingBuffer + { + /// + /// Construct instance with default buffer size + /// + public DeflaterPending() : base(DeflaterConstants.PENDING_BUF_SIZE) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs.meta new file mode 100644 index 0000000..afb61ce --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/DeflaterPending.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 10f0bf04b06f84a009b71431737b4017 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs new file mode 100644 index 0000000..439b4c6 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs @@ -0,0 +1,887 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// Inflater is used to decompress data that has been compressed according + /// to the "deflate" standard described in rfc1951. + /// + /// By default Zlib (rfc1950) headers and footers are expected in the input. + /// You can use constructor public Inflater(bool noHeader) passing true + /// if there is no Zlib header information + /// + /// The usage is as following. First you have to set some input with + /// SetInput(), then Inflate() it. If inflate doesn't + /// inflate any bytes there may be three reasons: + ///
    + ///
  • IsNeedingInput() returns true because the input buffer is empty. + /// You have to provide more input with SetInput(). + /// NOTE: IsNeedingInput() also returns true when, the stream is finished. + ///
  • + ///
  • IsNeedingDictionary() returns true, you have to provide a preset + /// dictionary with SetDictionary().
  • + ///
  • IsFinished returns true, the inflater has finished.
  • + ///
+ /// Once the first output byte is produced, a dictionary will not be + /// needed at a later stage. + /// + /// author of the original java version : John Leuner, Jochen Hoenicke + ///
+ public class Inflater + { + #region Constants/Readonly + + /// + /// Copy lengths for literal codes 257..285 + /// + private static readonly int[] CPLENS = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258 + }; + + /// + /// Extra bits for literal codes 257..285 + /// + private static readonly int[] CPLEXT = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 + }; + + /// + /// Copy offsets for distance codes 0..29 + /// + private static readonly int[] CPDIST = { + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577 + }; + + /// + /// Extra bits for distance codes + /// + private static readonly int[] CPDEXT = { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13 + }; + + /// + /// These are the possible states for an inflater + /// + private const int DECODE_HEADER = 0; + + private const int DECODE_DICT = 1; + private const int DECODE_BLOCKS = 2; + private const int DECODE_STORED_LEN1 = 3; + private const int DECODE_STORED_LEN2 = 4; + private const int DECODE_STORED = 5; + private const int DECODE_DYN_HEADER = 6; + private const int DECODE_HUFFMAN = 7; + private const int DECODE_HUFFMAN_LENBITS = 8; + private const int DECODE_HUFFMAN_DIST = 9; + private const int DECODE_HUFFMAN_DISTBITS = 10; + private const int DECODE_CHKSUM = 11; + private const int FINISHED = 12; + + #endregion Constants/Readonly + + #region Instance Fields + + /// + /// This variable contains the current state. + /// + private int mode; + + /// + /// The adler checksum of the dictionary or of the decompressed + /// stream, as it is written in the header resp. footer of the + /// compressed stream. + /// Only valid if mode is DECODE_DICT or DECODE_CHKSUM. + /// + private int readAdler; + + /// + /// The number of bits needed to complete the current state. This + /// is valid, if mode is DECODE_DICT, DECODE_CHKSUM, + /// DECODE_HUFFMAN_LENBITS or DECODE_HUFFMAN_DISTBITS. + /// + private int neededBits; + + private int repLength; + private int repDist; + private int uncomprLen; + + /// + /// True, if the last block flag was set in the last block of the + /// inflated stream. This means that the stream ends after the + /// current block. + /// + private bool isLastBlock; + + /// + /// The total number of inflated bytes. + /// + private long totalOut; + + /// + /// The total number of bytes set with setInput(). This is not the + /// value returned by the TotalIn property, since this also includes the + /// unprocessed input. + /// + private long totalIn; + + /// + /// This variable stores the noHeader flag that was given to the constructor. + /// True means, that the inflated stream doesn't contain a Zlib header or + /// footer. + /// + private bool noHeader; + + private readonly StreamManipulator input; + private OutputWindow outputWindow; + private InflaterDynHeader dynHeader; + private InflaterHuffmanTree litlenTree, distTree; + private Adler32 adler; + + #endregion Instance Fields + + #region Constructors + + /// + /// Creates a new inflater or RFC1951 decompressor + /// RFC1950/Zlib headers and footers will be expected in the input data + /// + public Inflater() : this(false) + { + } + + /// + /// Creates a new inflater. + /// + /// + /// True if no RFC1950/Zlib header and footer fields are expected in the input data + /// + /// This is used for GZIPed/Zipped input. + /// + /// For compatibility with + /// Sun JDK you should provide one byte of input more than needed in + /// this case. + /// + public Inflater(bool noHeader) + { + this.noHeader = noHeader; + if (!noHeader) + this.adler = new Adler32(); + input = new StreamManipulator(); + outputWindow = new OutputWindow(); + mode = noHeader ? DECODE_BLOCKS : DECODE_HEADER; + } + + #endregion Constructors + + /// + /// Resets the inflater so that a new stream can be decompressed. All + /// pending input and output will be discarded. + /// + public void Reset() + { + mode = noHeader ? DECODE_BLOCKS : DECODE_HEADER; + totalIn = 0; + totalOut = 0; + input.Reset(); + outputWindow.Reset(); + dynHeader = null; + litlenTree = null; + distTree = null; + isLastBlock = false; + adler?.Reset(); + } + + /// + /// Decodes a zlib/RFC1950 header. + /// + /// + /// False if more input is needed. + /// + /// + /// The header is invalid. + /// + private bool DecodeHeader() + { + int header = input.PeekBits(16); + if (header < 0) + { + return false; + } + input.DropBits(16); + + // The header is written in "wrong" byte order + header = ((header << 8) | (header >> 8)) & 0xffff; + if (header % 31 != 0) + { + throw new SharpZipBaseException("Header checksum illegal"); + } + + if ((header & 0x0f00) != (Deflater.DEFLATED << 8)) + { + throw new SharpZipBaseException("Compression Method unknown"); + } + + /* Maximum size of the backwards window in bits. + * We currently ignore this, but we could use it to make the + * inflater window more space efficient. On the other hand the + * full window (15 bits) is needed most times, anyway. + int max_wbits = ((header & 0x7000) >> 12) + 8; + */ + + if ((header & 0x0020) == 0) + { // Dictionary flag? + mode = DECODE_BLOCKS; + } + else + { + mode = DECODE_DICT; + neededBits = 32; + } + return true; + } + + /// + /// Decodes the dictionary checksum after the deflate header. + /// + /// + /// False if more input is needed. + /// + private bool DecodeDict() + { + while (neededBits > 0) + { + int dictByte = input.PeekBits(8); + if (dictByte < 0) + { + return false; + } + input.DropBits(8); + readAdler = (readAdler << 8) | dictByte; + neededBits -= 8; + } + return false; + } + + /// + /// Decodes the huffman encoded symbols in the input stream. + /// + /// + /// false if more input is needed, true if output window is + /// full or the current block ends. + /// + /// + /// if deflated stream is invalid. + /// + private bool DecodeHuffman() + { + int free = outputWindow.GetFreeSpace(); + while (free >= 258) + { + int symbol; + switch (mode) + { + case DECODE_HUFFMAN: + // This is the inner loop so it is optimized a bit + while (((symbol = litlenTree.GetSymbol(input)) & ~0xff) == 0) + { + outputWindow.Write(symbol); + if (--free < 258) + { + return true; + } + } + + if (symbol < 257) + { + if (symbol < 0) + { + return false; + } + else + { + // symbol == 256: end of block + distTree = null; + litlenTree = null; + mode = DECODE_BLOCKS; + return true; + } + } + + try + { + repLength = CPLENS[symbol - 257]; + neededBits = CPLEXT[symbol - 257]; + } + catch (Exception) + { + throw new SharpZipBaseException("Illegal rep length code"); + } + goto case DECODE_HUFFMAN_LENBITS; // fall through + + case DECODE_HUFFMAN_LENBITS: + if (neededBits > 0) + { + mode = DECODE_HUFFMAN_LENBITS; + int i = input.PeekBits(neededBits); + if (i < 0) + { + return false; + } + input.DropBits(neededBits); + repLength += i; + } + mode = DECODE_HUFFMAN_DIST; + goto case DECODE_HUFFMAN_DIST; // fall through + + case DECODE_HUFFMAN_DIST: + symbol = distTree.GetSymbol(input); + if (symbol < 0) + { + return false; + } + + try + { + repDist = CPDIST[symbol]; + neededBits = CPDEXT[symbol]; + } + catch (Exception) + { + throw new SharpZipBaseException("Illegal rep dist code"); + } + + goto case DECODE_HUFFMAN_DISTBITS; // fall through + + case DECODE_HUFFMAN_DISTBITS: + if (neededBits > 0) + { + mode = DECODE_HUFFMAN_DISTBITS; + int i = input.PeekBits(neededBits); + if (i < 0) + { + return false; + } + input.DropBits(neededBits); + repDist += i; + } + + outputWindow.Repeat(repLength, repDist); + free -= repLength; + mode = DECODE_HUFFMAN; + break; + + default: + throw new SharpZipBaseException("Inflater unknown mode"); + } + } + return true; + } + + /// + /// Decodes the adler checksum after the deflate stream. + /// + /// + /// false if more input is needed. + /// + /// + /// If checksum doesn't match. + /// + private bool DecodeChksum() + { + while (neededBits > 0) + { + int chkByte = input.PeekBits(8); + if (chkByte < 0) + { + return false; + } + input.DropBits(8); + readAdler = (readAdler << 8) | chkByte; + neededBits -= 8; + } + + if ((int)adler?.Value != readAdler) + { + throw new SharpZipBaseException("Adler chksum doesn't match: " + (int)adler?.Value + " vs. " + readAdler); + } + + mode = FINISHED; + return false; + } + + /// + /// Decodes the deflated stream. + /// + /// + /// false if more input is needed, or if finished. + /// + /// + /// if deflated stream is invalid. + /// + private bool Decode() + { + switch (mode) + { + case DECODE_HEADER: + return DecodeHeader(); + + case DECODE_DICT: + return DecodeDict(); + + case DECODE_CHKSUM: + return DecodeChksum(); + + case DECODE_BLOCKS: + if (isLastBlock) + { + if (noHeader) + { + mode = FINISHED; + return false; + } + else + { + input.SkipToByteBoundary(); + neededBits = 32; + mode = DECODE_CHKSUM; + return true; + } + } + + int type = input.PeekBits(3); + if (type < 0) + { + return false; + } + input.DropBits(3); + + isLastBlock |= (type & 1) != 0; + switch (type >> 1) + { + case DeflaterConstants.STORED_BLOCK: + input.SkipToByteBoundary(); + mode = DECODE_STORED_LEN1; + break; + + case DeflaterConstants.STATIC_TREES: + litlenTree = InflaterHuffmanTree.defLitLenTree; + distTree = InflaterHuffmanTree.defDistTree; + mode = DECODE_HUFFMAN; + break; + + case DeflaterConstants.DYN_TREES: + dynHeader = new InflaterDynHeader(input); + mode = DECODE_DYN_HEADER; + break; + + default: + throw new SharpZipBaseException("Unknown block type " + type); + } + return true; + + case DECODE_STORED_LEN1: + { + if ((uncomprLen = input.PeekBits(16)) < 0) + { + return false; + } + input.DropBits(16); + mode = DECODE_STORED_LEN2; + } + goto case DECODE_STORED_LEN2; // fall through + + case DECODE_STORED_LEN2: + { + int nlen = input.PeekBits(16); + if (nlen < 0) + { + return false; + } + input.DropBits(16); + if (nlen != (uncomprLen ^ 0xffff)) + { + throw new SharpZipBaseException("broken uncompressed block"); + } + mode = DECODE_STORED; + } + goto case DECODE_STORED; // fall through + + case DECODE_STORED: + { + int more = outputWindow.CopyStored(input, uncomprLen); + uncomprLen -= more; + if (uncomprLen == 0) + { + mode = DECODE_BLOCKS; + return true; + } + return !input.IsNeedingInput; + } + + case DECODE_DYN_HEADER: + if (!dynHeader.AttemptRead()) + { + return false; + } + + litlenTree = dynHeader.LiteralLengthTree; + distTree = dynHeader.DistanceTree; + mode = DECODE_HUFFMAN; + goto case DECODE_HUFFMAN; // fall through + + case DECODE_HUFFMAN: + case DECODE_HUFFMAN_LENBITS: + case DECODE_HUFFMAN_DIST: + case DECODE_HUFFMAN_DISTBITS: + return DecodeHuffman(); + + case FINISHED: + return false; + + default: + throw new SharpZipBaseException("Inflater.Decode unknown mode"); + } + } + + /// + /// Sets the preset dictionary. This should only be called, if + /// needsDictionary() returns true and it should set the same + /// dictionary, that was used for deflating. The getAdler() + /// function returns the checksum of the dictionary needed. + /// + /// + /// The dictionary. + /// + public void SetDictionary(byte[] buffer) + { + SetDictionary(buffer, 0, buffer.Length); + } + + /// + /// Sets the preset dictionary. This should only be called, if + /// needsDictionary() returns true and it should set the same + /// dictionary, that was used for deflating. The getAdler() + /// function returns the checksum of the dictionary needed. + /// + /// + /// The dictionary. + /// + /// + /// The index into buffer where the dictionary starts. + /// + /// + /// The number of bytes in the dictionary. + /// + /// + /// No dictionary is needed. + /// + /// + /// The adler checksum for the buffer is invalid + /// + public void SetDictionary(byte[] buffer, int index, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (index < 0) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + if (!IsNeedingDictionary) + { + throw new InvalidOperationException("Dictionary is not needed"); + } + + adler?.Update(new ArraySegment(buffer, index, count)); + + if (adler != null && (int)adler.Value != readAdler) + { + throw new SharpZipBaseException("Wrong adler checksum"); + } + adler?.Reset(); + outputWindow.CopyDict(buffer, index, count); + mode = DECODE_BLOCKS; + } + + /// + /// Sets the input. This should only be called, if needsInput() + /// returns true. + /// + /// + /// the input. + /// + public void SetInput(byte[] buffer) + { + SetInput(buffer, 0, buffer.Length); + } + + /// + /// Sets the input. This should only be called, if needsInput() + /// returns true. + /// + /// + /// The source of input data + /// + /// + /// The index into buffer where the input starts. + /// + /// + /// The number of bytes of input to use. + /// + /// + /// No input is needed. + /// + /// + /// The index and/or count are wrong. + /// + public void SetInput(byte[] buffer, int index, int count) + { + input.SetInput(buffer, index, count); + totalIn += (long)count; + } + + /// + /// Inflates the compressed stream to the output buffer. If this + /// returns 0, you should check, whether IsNeedingDictionary(), + /// IsNeedingInput() or IsFinished() returns true, to determine why no + /// further output is produced. + /// + /// + /// the output buffer. + /// + /// + /// The number of bytes written to the buffer, 0 if no further + /// output can be produced. + /// + /// + /// if buffer has length 0. + /// + /// + /// if deflated stream is invalid. + /// + public int Inflate(byte[] buffer) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + return Inflate(buffer, 0, buffer.Length); + } + + /// + /// Inflates the compressed stream to the output buffer. If this + /// returns 0, you should check, whether needsDictionary(), + /// needsInput() or finished() returns true, to determine why no + /// further output is produced. + /// + /// + /// the output buffer. + /// + /// + /// the offset in buffer where storing starts. + /// + /// + /// the maximum number of bytes to output. + /// + /// + /// the number of bytes written to the buffer, 0 if no further output can be produced. + /// + /// + /// if count is less than 0. + /// + /// + /// if the index and / or count are wrong. + /// + /// + /// if deflated stream is invalid. + /// + public int Inflate(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "count cannot be negative"); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "offset cannot be negative"); + } + + if (offset + count > buffer.Length) + { + throw new ArgumentException("count exceeds buffer bounds"); + } + + // Special case: count may be zero + if (count == 0) + { + if (!IsFinished) + { // -jr- 08-Nov-2003 INFLATE_BUG fix.. + Decode(); + } + return 0; + } + + int bytesCopied = 0; + + do + { + if (mode != DECODE_CHKSUM) + { + /* Don't give away any output, if we are waiting for the + * checksum in the input stream. + * + * With this trick we have always: + * IsNeedingInput() and not IsFinished() + * implies more output can be produced. + */ + int more = outputWindow.CopyOutput(buffer, offset, count); + if (more > 0) + { + adler?.Update(new ArraySegment(buffer, offset, more)); + offset += more; + bytesCopied += more; + totalOut += (long)more; + count -= more; + if (count == 0) + { + return bytesCopied; + } + } + } + } while (Decode() || ((outputWindow.GetAvailable() > 0) && (mode != DECODE_CHKSUM))); + return bytesCopied; + } + + /// + /// Returns true, if the input buffer is empty. + /// You should then call setInput(). + /// NOTE: This method also returns true when the stream is finished. + /// + public bool IsNeedingInput + { + get + { + return input.IsNeedingInput; + } + } + + /// + /// Returns true, if a preset dictionary is needed to inflate the input. + /// + public bool IsNeedingDictionary + { + get + { + return mode == DECODE_DICT && neededBits == 0; + } + } + + /// + /// Returns true, if the inflater has finished. This means, that no + /// input is needed and no output can be produced. + /// + public bool IsFinished + { + get + { + return mode == FINISHED && outputWindow.GetAvailable() == 0; + } + } + + /// + /// Gets the adler checksum. This is either the checksum of all + /// uncompressed bytes returned by inflate(), or if needsDictionary() + /// returns true (and thus no output was yet produced) this is the + /// adler checksum of the expected dictionary. + /// + /// + /// the adler checksum. + /// + public int Adler + { + get + { + if (IsNeedingDictionary) + { + return readAdler; + } + else if (adler != null) + { + return (int)adler.Value; + } + else + { + return 0; + } + } + } + + /// + /// Gets the total number of output bytes returned by Inflate(). + /// + /// + /// the total number of output bytes. + /// + public long TotalOut + { + get + { + return totalOut; + } + } + + /// + /// Gets the total number of processed compressed input bytes. + /// + /// + /// The total number of bytes of processed input bytes. + /// + public long TotalIn + { + get + { + return totalIn - (long)RemainingInput; + } + } + + /// + /// Gets the number of unprocessed input bytes. Useful, if the end of the + /// stream is reached and you want to further process the bytes after + /// the deflate stream. + /// + /// + /// The number of bytes of the input which have not been processed. + /// + public int RemainingInput + { + // TODO: This should be a long? + get + { + return input.AvailableBytes; + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs.meta new file mode 100644 index 0000000..985de0b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Inflater.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 50084530e2a0a477c83e686728cb9f0f +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs new file mode 100644 index 0000000..8e0196b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs @@ -0,0 +1,151 @@ +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.Collections.Generic; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + internal class InflaterDynHeader + { + #region Constants + + // maximum number of literal/length codes + private const int LITLEN_MAX = 286; + + // maximum number of distance codes + private const int DIST_MAX = 30; + + // maximum data code lengths to read + private const int CODELEN_MAX = LITLEN_MAX + DIST_MAX; + + // maximum meta code length codes to read + private const int META_MAX = 19; + + private static readonly int[] MetaCodeLengthIndex = + { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + + #endregion Constants + + /// + /// Continue decoding header from until more bits are needed or decoding has been completed + /// + /// Returns whether decoding could be completed + public bool AttemptRead() + => !state.MoveNext() || state.Current; + + public InflaterDynHeader(StreamManipulator input) + { + this.input = input; + stateMachine = CreateStateMachine(); + state = stateMachine.GetEnumerator(); + } + + private IEnumerable CreateStateMachine() + { + // Read initial code length counts from header + while (!input.TryGetBits(5, ref litLenCodeCount, 257)) yield return false; + while (!input.TryGetBits(5, ref distanceCodeCount, 1)) yield return false; + while (!input.TryGetBits(4, ref metaCodeCount, 4)) yield return false; + var dataCodeCount = litLenCodeCount + distanceCodeCount; + + if (litLenCodeCount > LITLEN_MAX) throw new ValueOutOfRangeException(nameof(litLenCodeCount)); + if (distanceCodeCount > DIST_MAX) throw new ValueOutOfRangeException(nameof(distanceCodeCount)); + if (metaCodeCount > META_MAX) throw new ValueOutOfRangeException(nameof(metaCodeCount)); + + // Load code lengths for the meta tree from the header bits + for (int i = 0; i < metaCodeCount; i++) + { + while (!input.TryGetBits(3, ref codeLengths, MetaCodeLengthIndex[i])) yield return false; + } + + var metaCodeTree = new InflaterHuffmanTree(codeLengths); + + // Decompress the meta tree symbols into the data table code lengths + int index = 0; + while (index < dataCodeCount) + { + byte codeLength; + int symbol; + + while ((symbol = metaCodeTree.GetSymbol(input)) < 0) yield return false; + + if (symbol < 16) + { + // append literal code length + codeLengths[index++] = (byte)symbol; + } + else + { + int repeatCount = 0; + + if (symbol == 16) // Repeat last code length 3..6 times + { + if (index == 0) + throw new StreamDecodingException("Cannot repeat previous code length when no other code length has been read"); + + codeLength = codeLengths[index - 1]; + + // 2 bits + 3, [3..6] + while (!input.TryGetBits(2, ref repeatCount, 3)) yield return false; + } + else if (symbol == 17) // Repeat zero 3..10 times + { + codeLength = 0; + + // 3 bits + 3, [3..10] + while (!input.TryGetBits(3, ref repeatCount, 3)) yield return false; + } + else // (symbol == 18), Repeat zero 11..138 times + { + codeLength = 0; + + // 7 bits + 11, [11..138] + while (!input.TryGetBits(7, ref repeatCount, 11)) yield return false; + } + + if (index + repeatCount > dataCodeCount) + throw new StreamDecodingException("Cannot repeat code lengths past total number of data code lengths"); + + while (repeatCount-- > 0) + codeLengths[index++] = codeLength; + } + } + + if (codeLengths[256] == 0) + throw new StreamDecodingException("Inflater dynamic header end-of-block code missing"); + + litLenTree = new InflaterHuffmanTree(new ArraySegment(codeLengths, 0, litLenCodeCount)); + distTree = new InflaterHuffmanTree(new ArraySegment(codeLengths, litLenCodeCount, distanceCodeCount)); + + yield return true; + } + + /// + /// Get literal/length huffman tree, must not be used before has returned true + /// + /// If hader has not been successfully read by the state machine + public InflaterHuffmanTree LiteralLengthTree + => litLenTree ?? throw new StreamDecodingException("Header properties were accessed before header had been successfully read"); + + /// + /// Get distance huffman tree, must not be used before has returned true + /// + /// If hader has not been successfully read by the state machine + public InflaterHuffmanTree DistanceTree + => distTree ?? throw new StreamDecodingException("Header properties were accessed before header had been successfully read"); + + #region Instance Fields + + private readonly StreamManipulator input; + private readonly IEnumerator state; + private readonly IEnumerable stateMachine; + + private byte[] codeLengths = new byte[CODELEN_MAX]; + + private InflaterHuffmanTree litLenTree; + private InflaterHuffmanTree distTree; + + private int litLenCodeCount, distanceCodeCount, metaCodeCount; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs.meta new file mode 100644 index 0000000..01beb89 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterDynHeader.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 18933e8df86c34bd49e18bf016e82a29 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs new file mode 100644 index 0000000..ed31882 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs @@ -0,0 +1,237 @@ +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.Collections.Generic; + +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// Huffman tree used for inflation + /// + public class InflaterHuffmanTree + { + #region Constants + + private const int MAX_BITLEN = 15; + + #endregion Constants + + #region Instance Fields + + private short[] tree; + + #endregion Instance Fields + + /// + /// Literal length tree + /// + public static InflaterHuffmanTree defLitLenTree; + + /// + /// Distance tree + /// + public static InflaterHuffmanTree defDistTree; + + static InflaterHuffmanTree() + { + try + { + byte[] codeLengths = new byte[288]; + int i = 0; + while (i < 144) + { + codeLengths[i++] = 8; + } + while (i < 256) + { + codeLengths[i++] = 9; + } + while (i < 280) + { + codeLengths[i++] = 7; + } + while (i < 288) + { + codeLengths[i++] = 8; + } + defLitLenTree = new InflaterHuffmanTree(codeLengths); + + codeLengths = new byte[32]; + i = 0; + while (i < 32) + { + codeLengths[i++] = 5; + } + defDistTree = new InflaterHuffmanTree(codeLengths); + } + catch (Exception) + { + throw new SharpZipBaseException("InflaterHuffmanTree: static tree length illegal"); + } + } + + #region Constructors + + /// + /// Constructs a Huffman tree from the array of code lengths. + /// + /// + /// the array of code lengths + /// + public InflaterHuffmanTree(IList codeLengths) + { + BuildTree(codeLengths); + } + + #endregion Constructors + + private void BuildTree(IList codeLengths) + { + int[] blCount = new int[MAX_BITLEN + 1]; + int[] nextCode = new int[MAX_BITLEN + 1]; + + for (int i = 0; i < codeLengths.Count; i++) + { + int bits = codeLengths[i]; + if (bits > 0) + { + blCount[bits]++; + } + } + + int code = 0; + int treeSize = 512; + for (int bits = 1; bits <= MAX_BITLEN; bits++) + { + nextCode[bits] = code; + code += blCount[bits] << (16 - bits); + if (bits >= 10) + { + /* We need an extra table for bit lengths >= 10. */ + int start = nextCode[bits] & 0x1ff80; + int end = code & 0x1ff80; + treeSize += (end - start) >> (16 - bits); + } + } + + /* -jr comment this out! doesnt work for dynamic trees and pkzip 2.04g + if (code != 65536) + { + throw new SharpZipBaseException("Code lengths don't add up properly."); + } + */ + /* Now create and fill the extra tables from longest to shortest + * bit len. This way the sub trees will be aligned. + */ + tree = new short[treeSize]; + int treePtr = 512; + for (int bits = MAX_BITLEN; bits >= 10; bits--) + { + int end = code & 0x1ff80; + code -= blCount[bits] << (16 - bits); + int start = code & 0x1ff80; + for (int i = start; i < end; i += 1 << 7) + { + tree[DeflaterHuffman.BitReverse(i)] = (short)((-treePtr << 4) | bits); + treePtr += 1 << (bits - 9); + } + } + + for (int i = 0; i < codeLengths.Count; i++) + { + int bits = codeLengths[i]; + if (bits == 0) + { + continue; + } + code = nextCode[bits]; + int revcode = DeflaterHuffman.BitReverse(code); + if (bits <= 9) + { + do + { + tree[revcode] = (short)((i << 4) | bits); + revcode += 1 << bits; + } while (revcode < 512); + } + else + { + int subTree = tree[revcode & 511]; + int treeLen = 1 << (subTree & 15); + subTree = -(subTree >> 4); + do + { + tree[subTree | (revcode >> 9)] = (short)((i << 4) | bits); + revcode += 1 << bits; + } while (revcode < treeLen); + } + nextCode[bits] = code + (1 << (16 - bits)); + } + } + + /// + /// Reads the next symbol from input. The symbol is encoded using the + /// huffman tree. + /// + /// + /// input the input source. + /// + /// + /// the next symbol, or -1 if not enough input is available. + /// + public int GetSymbol(StreamManipulator input) + { + int lookahead, symbol; + if ((lookahead = input.PeekBits(9)) >= 0) + { + symbol = tree[lookahead]; + int bitlen = symbol & 15; + + if (symbol >= 0) + { + if(bitlen == 0){ + throw new SharpZipBaseException("Encountered invalid codelength 0"); + } + input.DropBits(bitlen); + return symbol >> 4; + } + int subtree = -(symbol >> 4); + if ((lookahead = input.PeekBits(bitlen)) >= 0) + { + symbol = tree[subtree | (lookahead >> 9)]; + input.DropBits(symbol & 15); + return symbol >> 4; + } + else + { + int bits = input.AvailableBits; + lookahead = input.PeekBits(bits); + symbol = tree[subtree | (lookahead >> 9)]; + if ((symbol & 15) <= bits) + { + input.DropBits(symbol & 15); + return symbol >> 4; + } + else + { + return -1; + } + } + } + else // Less than 9 bits + { + int bits = input.AvailableBits; + lookahead = input.PeekBits(bits); + symbol = tree[lookahead]; + if (symbol >= 0 && (symbol & 15) <= bits) + { + input.DropBits(symbol & 15); + return symbol >> 4; + } + else + { + return -1; + } + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs.meta new file mode 100644 index 0000000..1752049 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/InflaterHuffmanTree.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 61c7a4a313e9b4db584b591305797905 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs new file mode 100644 index 0000000..6ed7e4a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs @@ -0,0 +1,268 @@ +namespace ICSharpCode.SharpZipLib.Zip.Compression +{ + /// + /// This class is general purpose class for writing data to a buffer. + /// + /// It allows you to write bits as well as bytes + /// Based on DeflaterPending.java + /// + /// author of the original java version : Jochen Hoenicke + /// + public class PendingBuffer + { + #region Instance Fields + + /// + /// Internal work buffer + /// + private readonly byte[] buffer; + + private int start; + private int end; + + private uint bits; + private int bitCount; + + #endregion Instance Fields + + #region Constructors + + /// + /// construct instance using default buffer size of 4096 + /// + public PendingBuffer() : this(4096) + { + } + + /// + /// construct instance using specified buffer size + /// + /// + /// size to use for internal buffer + /// + public PendingBuffer(int bufferSize) + { + buffer = new byte[bufferSize]; + } + + #endregion Constructors + + /// + /// Clear internal state/buffers + /// + public void Reset() + { + start = end = bitCount = 0; + } + + /// + /// Write a byte to buffer + /// + /// + /// The value to write + /// + public void WriteByte(int value) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer[end++] = unchecked((byte)value); + } + + /// + /// Write a short value to buffer LSB first + /// + /// + /// The value to write. + /// + public void WriteShort(int value) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer[end++] = unchecked((byte)value); + buffer[end++] = unchecked((byte)(value >> 8)); + } + + /// + /// write an integer LSB first + /// + /// The value to write. + public void WriteInt(int value) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer[end++] = unchecked((byte)value); + buffer[end++] = unchecked((byte)(value >> 8)); + buffer[end++] = unchecked((byte)(value >> 16)); + buffer[end++] = unchecked((byte)(value >> 24)); + } + + /// + /// Write a block of data to buffer + /// + /// data to write + /// offset of first byte to write + /// number of bytes to write + public void WriteBlock(byte[] block, int offset, int length) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + System.Array.Copy(block, offset, buffer, end, length); + end += length; + } + + /// + /// The number of bits written to the buffer + /// + public int BitCount + { + get + { + return bitCount; + } + } + + /// + /// Align internal buffer on a byte boundary + /// + public void AlignToByte() + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + if (bitCount > 0) + { + buffer[end++] = unchecked((byte)bits); + if (bitCount > 8) + { + buffer[end++] = unchecked((byte)(bits >> 8)); + } + } + bits = 0; + bitCount = 0; + } + + /// + /// Write bits to internal buffer + /// + /// source of bits + /// number of bits to write + public void WriteBits(int b, int count) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("writeBits("+b+","+count+")"); + // } +#endif + bits |= (uint)(b << bitCount); + bitCount += count; + if (bitCount >= 16) + { + buffer[end++] = unchecked((byte)bits); + buffer[end++] = unchecked((byte)(bits >> 8)); + bits >>= 16; + bitCount -= 16; + } + } + + /// + /// Write a short value to internal buffer most significant byte first + /// + /// value to write + public void WriteShortMSB(int s) + { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer[end++] = unchecked((byte)(s >> 8)); + buffer[end++] = unchecked((byte)s); + } + + /// + /// Indicates if buffer has been flushed + /// + public bool IsFlushed + { + get + { + return end == 0; + } + } + + /// + /// Flushes the pending buffer into the given output array. If the + /// output array is to small, only a partial flush is done. + /// + /// The output array. + /// The offset into output array. + /// The maximum number of bytes to store. + /// The number of bytes flushed. + public int Flush(byte[] output, int offset, int length) + { + if (bitCount >= 8) + { + buffer[end++] = unchecked((byte)bits); + bits >>= 8; + bitCount -= 8; + } + + if (length > end - start) + { + length = end - start; + System.Array.Copy(buffer, start, output, offset, length); + start = 0; + end = 0; + } + else + { + System.Array.Copy(buffer, start, output, offset, length); + start += length; + } + return length; + } + + /// + /// Convert internal buffer to byte array. + /// Buffer is empty on completion + /// + /// + /// The internal buffer contents converted to a byte array. + /// + public byte[] ToByteArray() + { + AlignToByte(); + + byte[] result = new byte[end - start]; + System.Array.Copy(buffer, start, result, 0, result.Length); + start = 0; + end = 0; + return result; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs.meta new file mode 100644 index 0000000..f203a4c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/PendingBuffer.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: a7733b687dddb439388bb5e1c3b5e226 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams.meta new file mode 100644 index 0000000..76539fa --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: f02fc8859f2f447ae9530e37a7b3c770 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs new file mode 100644 index 0000000..03cac73 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs @@ -0,0 +1,495 @@ +using ICSharpCode.SharpZipLib.Encryption; +using System; +using System.IO; +using System.Security.Cryptography; + +namespace ICSharpCode.SharpZipLib.Zip.Compression.Streams +{ + /// + /// A special stream deflating or compressing the bytes that are + /// written to it. It uses a Deflater to perform actual deflating.
+ /// Authors of the original java version : Tom Tromey, Jochen Hoenicke + ///
+ public class DeflaterOutputStream : Stream + { + #region Constructors + + /// + /// Creates a new DeflaterOutputStream with a default Deflater and default buffer size. + /// + /// + /// the output stream where deflated output should be written. + /// + public DeflaterOutputStream(Stream baseOutputStream) + : this(baseOutputStream, new Deflater(), 512) + { + } + + /// + /// Creates a new DeflaterOutputStream with the given Deflater and + /// default buffer size. + /// + /// + /// the output stream where deflated output should be written. + /// + /// + /// the underlying deflater. + /// + public DeflaterOutputStream(Stream baseOutputStream, Deflater deflater) + : this(baseOutputStream, deflater, 512) + { + } + + /// + /// Creates a new DeflaterOutputStream with the given Deflater and + /// buffer size. + /// + /// + /// The output stream where deflated output is written. + /// + /// + /// The underlying deflater to use + /// + /// + /// The buffer size in bytes to use when deflating (minimum value 512) + /// + /// + /// bufsize is less than or equal to zero. + /// + /// + /// baseOutputStream does not support writing + /// + /// + /// deflater instance is null + /// + public DeflaterOutputStream(Stream baseOutputStream, Deflater deflater, int bufferSize) + { + if (baseOutputStream == null) + { + throw new ArgumentNullException(nameof(baseOutputStream)); + } + + if (baseOutputStream.CanWrite == false) + { + throw new ArgumentException("Must support writing", nameof(baseOutputStream)); + } + + if (bufferSize < 512) + { + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + } + + baseOutputStream_ = baseOutputStream; + buffer_ = new byte[bufferSize]; + deflater_ = deflater ?? throw new ArgumentNullException(nameof(deflater)); + } + + #endregion Constructors + + #region Public API + + /// + /// Finishes the stream by calling finish() on the deflater. + /// + /// + /// Not all input is deflated + /// + public virtual void Finish() + { + deflater_.Finish(); + while (!deflater_.IsFinished) + { + int len = deflater_.Deflate(buffer_, 0, buffer_.Length); + if (len <= 0) + { + break; + } + + if (cryptoTransform_ != null) + { + EncryptBlock(buffer_, 0, len); + } + + baseOutputStream_.Write(buffer_, 0, len); + } + + if (!deflater_.IsFinished) + { + throw new SharpZipBaseException("Can't deflate all input?"); + } + + baseOutputStream_.Flush(); + + if (cryptoTransform_ != null) + { + if (cryptoTransform_ is ZipAESTransform) + { + AESAuthCode = ((ZipAESTransform)cryptoTransform_).GetAuthCode(); + } + cryptoTransform_.Dispose(); + cryptoTransform_ = null; + } + } + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = true; + + /// + /// Allows client to determine if an entry can be patched after its added + /// + public bool CanPatchEntries + { + get + { + return baseOutputStream_.CanSeek; + } + } + + #endregion Public API + + #region Encryption + + private string password; + + private ICryptoTransform cryptoTransform_; + + /// + /// Returns the 10 byte AUTH CODE to be appended immediately following the AES data stream. + /// + protected byte[] AESAuthCode; + + /// + /// Get/set the password used for encryption. + /// + /// When set to null or if the password is empty no encryption is performed + public string Password + { + get + { + return password; + } + set + { + if ((value != null) && (value.Length == 0)) + { + password = null; + } + else + { + password = value; + } + } + } + + /// + /// Encrypt a block of data + /// + /// + /// Data to encrypt. NOTE the original contents of the buffer are lost + /// + /// + /// Offset of first byte in buffer to encrypt + /// + /// + /// Number of bytes in buffer to encrypt + /// + protected void EncryptBlock(byte[] buffer, int offset, int length) + { + cryptoTransform_.TransformBlock(buffer, 0, length, buffer, 0); + } + + /// + /// Initializes encryption keys based on given . + /// + /// The password. + protected void InitializePassword(string password) + { + var pkManaged = new PkzipClassicManaged(); + byte[] key = PkzipClassic.GenerateKeys(ZipStrings.ConvertToArray(password)); + cryptoTransform_ = pkManaged.CreateEncryptor(key, null); + } + + /// + /// Initializes encryption keys based on given password. + /// + protected void InitializeAESPassword(ZipEntry entry, string rawPassword, + out byte[] salt, out byte[] pwdVerifier) + { + salt = new byte[entry.AESSaltLen]; + // Salt needs to be cryptographically random, and unique per file + if (_aesRnd == null) + _aesRnd = RandomNumberGenerator.Create(); + _aesRnd.GetBytes(salt); + int blockSize = entry.AESKeySize / 8; // bits to bytes + + cryptoTransform_ = new ZipAESTransform(rawPassword, salt, blockSize, true); + pwdVerifier = ((ZipAESTransform)cryptoTransform_).PwdVerifier; + } + + #endregion Encryption + + #region Deflation Support + + /// + /// Deflates everything in the input buffers. This will call + /// def.deflate() until all bytes from the input buffers + /// are processed. + /// + protected void Deflate() + { + Deflate(false); + } + + private void Deflate(bool flushing) + { + while (flushing || !deflater_.IsNeedingInput) + { + int deflateCount = deflater_.Deflate(buffer_, 0, buffer_.Length); + + if (deflateCount <= 0) + { + break; + } + if (cryptoTransform_ != null) + { + EncryptBlock(buffer_, 0, deflateCount); + } + + baseOutputStream_.Write(buffer_, 0, deflateCount); + } + + if (!deflater_.IsNeedingInput) + { + throw new SharpZipBaseException("DeflaterOutputStream can't deflate all input?"); + } + } + + #endregion Deflation Support + + #region Stream Overrides + + /// + /// Gets value indicating stream can be read from + /// + public override bool CanRead + { + get + { + return false; + } + } + + /// + /// Gets a value indicating if seeking is supported for this stream + /// This property always returns false + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Get value indicating if this stream supports writing + /// + public override bool CanWrite + { + get + { + return baseOutputStream_.CanWrite; + } + } + + /// + /// Get current length of stream + /// + public override long Length + { + get + { + return baseOutputStream_.Length; + } + } + + /// + /// Gets the current position within the stream. + /// + /// Any attempt to set position + public override long Position + { + get + { + return baseOutputStream_.Position; + } + set + { + throw new NotSupportedException("Position property not supported"); + } + } + + /// + /// Sets the current position of this stream to the given value. Not supported by this class! + /// + /// The offset relative to the to seek. + /// The to seek from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("DeflaterOutputStream Seek not supported"); + } + + /// + /// Sets the length of this stream to the given value. Not supported by this class! + /// + /// The new stream length. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("DeflaterOutputStream SetLength not supported"); + } + + /// + /// Read a byte from stream advancing position by one + /// + /// The byte read cast to an int. THe value is -1 if at the end of the stream. + /// Any access + public override int ReadByte() + { + throw new NotSupportedException("DeflaterOutputStream ReadByte not supported"); + } + + /// + /// Read a block of bytes from stream + /// + /// The buffer to store read data in. + /// The offset to start storing at. + /// The maximum number of bytes to read. + /// The actual number of bytes read. Zero if end of stream is detected. + /// Any access + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("DeflaterOutputStream Read not supported"); + } + + /// + /// Flushes the stream by calling Flush on the deflater and then + /// on the underlying stream. This ensures that all bytes are flushed. + /// + public override void Flush() + { + deflater_.Flush(); + Deflate(true); + baseOutputStream_.Flush(); + } + + /// + /// Calls and closes the underlying + /// stream when is true. + /// + protected override void Dispose(bool disposing) + { + if (!isClosed_) + { + isClosed_ = true; + + try + { + Finish(); + if (cryptoTransform_ != null) + { + GetAuthCodeIfAES(); + cryptoTransform_.Dispose(); + cryptoTransform_ = null; + } + } + finally + { + if (IsStreamOwner) + { + baseOutputStream_.Dispose(); + } + } + } + } + + /// + /// Get the Auth code for AES encrypted entries + /// + protected void GetAuthCodeIfAES() + { + if (cryptoTransform_ is ZipAESTransform) + { + AESAuthCode = ((ZipAESTransform)cryptoTransform_).GetAuthCode(); + } + } + + /// + /// Writes a single byte to the compressed output stream. + /// + /// + /// The byte value. + /// + public override void WriteByte(byte value) + { + byte[] b = new byte[1]; + b[0] = value; + Write(b, 0, 1); + } + + /// + /// Writes bytes from an array to the compressed stream. + /// + /// + /// The byte array + /// + /// + /// The offset into the byte array where to start. + /// + /// + /// The number of bytes to write. + /// + public override void Write(byte[] buffer, int offset, int count) + { + deflater_.SetInput(buffer, offset, count); + Deflate(); + } + + #endregion Stream Overrides + + #region Instance Fields + + /// + /// This buffer is used temporarily to retrieve the bytes from the + /// deflater and write them to the underlying output stream. + /// + private byte[] buffer_; + + /// + /// The deflater which is used to deflate the stream. + /// + protected Deflater deflater_; + + /// + /// Base stream the deflater depends on. + /// + protected Stream baseOutputStream_; + + private bool isClosed_; + + #endregion Instance Fields + + #region Static Fields + + // Static to help ensure that multiple files within a zip will get different random salt + private static RandomNumberGenerator _aesRnd = RandomNumberGenerator.Create(); + + #endregion Static Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs.meta new file mode 100644 index 0000000..c3750c7 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/DeflaterOutputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 9ae4d6257b3f6424599057b5167debf3 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs new file mode 100644 index 0000000..3fb2579 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs @@ -0,0 +1,713 @@ +using System; +using System.IO; +using System.Security.Cryptography; + +namespace ICSharpCode.SharpZipLib.Zip.Compression.Streams +{ + /// + /// An input buffer customised for use by + /// + /// + /// The buffer supports decryption of incoming data. + /// + public class InflaterInputBuffer + { + #region Constructors + + /// + /// Initialise a new instance of with a default buffer size + /// + /// The stream to buffer. + public InflaterInputBuffer(Stream stream) : this(stream, 4096) + { + } + + /// + /// Initialise a new instance of + /// + /// The stream to buffer. + /// The size to use for the buffer + /// A minimum buffer size of 1KB is permitted. Lower sizes are treated as 1KB. + public InflaterInputBuffer(Stream stream, int bufferSize) + { + inputStream = stream; + if (bufferSize < 1024) + { + bufferSize = 1024; + } + rawData = new byte[bufferSize]; + clearText = rawData; + } + + #endregion Constructors + + /// + /// Get the length of bytes in the + /// + public int RawLength + { + get + { + return rawLength; + } + } + + /// + /// Get the contents of the raw data buffer. + /// + /// This may contain encrypted data. + public byte[] RawData + { + get + { + return rawData; + } + } + + /// + /// Get the number of useable bytes in + /// + public int ClearTextLength + { + get + { + return clearTextLength; + } + } + + /// + /// Get the contents of the clear text buffer. + /// + public byte[] ClearText + { + get + { + return clearText; + } + } + + /// + /// Get/set the number of bytes available + /// + public int Available + { + get { return available; } + set { available = value; } + } + + /// + /// Call passing the current clear text buffer contents. + /// + /// The inflater to set input for. + public void SetInflaterInput(Inflater inflater) + { + if (available > 0) + { + inflater.SetInput(clearText, clearTextLength - available, available); + available = 0; + } + } + + /// + /// Fill the buffer from the underlying input stream. + /// + public void Fill() + { + rawLength = 0; + int toRead = rawData.Length; + + while (toRead > 0 && inputStream.CanRead) + { + int count = inputStream.Read(rawData, rawLength, toRead); + if (count <= 0) + { + break; + } + rawLength += count; + toRead -= count; + } + + if (cryptoTransform != null) + { + clearTextLength = cryptoTransform.TransformBlock(rawData, 0, rawLength, clearText, 0); + } + else + { + clearTextLength = rawLength; + } + + available = clearTextLength; + } + + /// + /// Read a buffer directly from the input stream + /// + /// The buffer to fill + /// Returns the number of bytes read. + public int ReadRawBuffer(byte[] buffer) + { + return ReadRawBuffer(buffer, 0, buffer.Length); + } + + /// + /// Read a buffer directly from the input stream + /// + /// The buffer to read into + /// The offset to start reading data into. + /// The number of bytes to read. + /// Returns the number of bytes read. + public int ReadRawBuffer(byte[] outBuffer, int offset, int length) + { + if (length < 0) + { + throw new ArgumentOutOfRangeException(nameof(length)); + } + + int currentOffset = offset; + int currentLength = length; + + while (currentLength > 0) + { + if (available <= 0) + { + Fill(); + if (available <= 0) + { + return 0; + } + } + int toCopy = Math.Min(currentLength, available); + System.Array.Copy(rawData, rawLength - (int)available, outBuffer, currentOffset, toCopy); + currentOffset += toCopy; + currentLength -= toCopy; + available -= toCopy; + } + return length; + } + + /// + /// Read clear text data from the input stream. + /// + /// The buffer to add data to. + /// The offset to start adding data at. + /// The number of bytes to read. + /// Returns the number of bytes actually read. + public int ReadClearTextBuffer(byte[] outBuffer, int offset, int length) + { + if (length < 0) + { + throw new ArgumentOutOfRangeException(nameof(length)); + } + + int currentOffset = offset; + int currentLength = length; + + while (currentLength > 0) + { + if (available <= 0) + { + Fill(); + if (available <= 0) + { + return 0; + } + } + + int toCopy = Math.Min(currentLength, available); + Array.Copy(clearText, clearTextLength - (int)available, outBuffer, currentOffset, toCopy); + currentOffset += toCopy; + currentLength -= toCopy; + available -= toCopy; + } + return length; + } + + /// + /// Read a from the input stream. + /// + /// Returns the byte read. + public int ReadLeByte() + { + if (available <= 0) + { + Fill(); + if (available <= 0) + { + throw new ZipException("EOF in header"); + } + } + byte result = rawData[rawLength - available]; + available -= 1; + return result; + } + + /// + /// Read an in little endian byte order. + /// + /// The short value read case to an int. + public int ReadLeShort() + { + return ReadLeByte() | (ReadLeByte() << 8); + } + + /// + /// Read an in little endian byte order. + /// + /// The int value read. + public int ReadLeInt() + { + return ReadLeShort() | (ReadLeShort() << 16); + } + + /// + /// Read a in little endian byte order. + /// + /// The long value read. + public long ReadLeLong() + { + return (uint)ReadLeInt() | ((long)ReadLeInt() << 32); + } + + /// + /// Get/set the to apply to any data. + /// + /// Set this value to null to have no transform applied. + public ICryptoTransform CryptoTransform + { + set + { + cryptoTransform = value; + if (cryptoTransform != null) + { + if (rawData == clearText) + { + if (internalClearText == null) + { + internalClearText = new byte[rawData.Length]; + } + clearText = internalClearText; + } + clearTextLength = rawLength; + if (available > 0) + { + cryptoTransform.TransformBlock(rawData, rawLength - available, available, clearText, rawLength - available); + } + } + else + { + clearText = rawData; + clearTextLength = rawLength; + } + } + } + + #region Instance Fields + + private int rawLength; + private byte[] rawData; + + private int clearTextLength; + private byte[] clearText; + private byte[] internalClearText; + + private int available; + + private ICryptoTransform cryptoTransform; + private Stream inputStream; + + #endregion Instance Fields + } + + /// + /// This filter stream is used to decompress data compressed using the "deflate" + /// format. The "deflate" format is described in RFC 1951. + /// + /// This stream may form the basis for other decompression filters, such + /// as the GZipInputStream. + /// + /// Author of the original java version : John Leuner. + /// + public class InflaterInputStream : Stream + { + #region Constructors + + /// + /// Create an InflaterInputStream with the default decompressor + /// and a default buffer size of 4KB. + /// + /// + /// The InputStream to read bytes from + /// + public InflaterInputStream(Stream baseInputStream) + : this(baseInputStream, new Inflater(), 4096) + { + } + + /// + /// Create an InflaterInputStream with the specified decompressor + /// and a default buffer size of 4KB. + /// + /// + /// The source of input data + /// + /// + /// The decompressor used to decompress data read from baseInputStream + /// + public InflaterInputStream(Stream baseInputStream, Inflater inf) + : this(baseInputStream, inf, 4096) + { + } + + /// + /// Create an InflaterInputStream with the specified decompressor + /// and the specified buffer size. + /// + /// + /// The InputStream to read bytes from + /// + /// + /// The decompressor to use + /// + /// + /// Size of the buffer to use + /// + public InflaterInputStream(Stream baseInputStream, Inflater inflater, int bufferSize) + { + if (baseInputStream == null) + { + throw new ArgumentNullException(nameof(baseInputStream)); + } + + if (inflater == null) + { + throw new ArgumentNullException(nameof(inflater)); + } + + if (bufferSize <= 0) + { + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + } + + this.baseInputStream = baseInputStream; + this.inf = inflater; + + inputBuffer = new InflaterInputBuffer(baseInputStream, bufferSize); + } + + #endregion Constructors + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = true; + + /// + /// Skip specified number of bytes of uncompressed data + /// + /// + /// Number of bytes to skip + /// + /// + /// The number of bytes skipped, zero if the end of + /// stream has been reached + /// + /// + /// The number of bytes to skip is less than or equal to zero. + /// + public long Skip(long count) + { + if (count <= 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + // v0.80 Skip by seeking if underlying stream supports it... + if (baseInputStream.CanSeek) + { + baseInputStream.Seek(count, SeekOrigin.Current); + return count; + } + else + { + int length = 2048; + if (count < length) + { + length = (int)count; + } + + byte[] tmp = new byte[length]; + int readCount = 1; + long toSkip = count; + + while ((toSkip > 0) && (readCount > 0)) + { + if (toSkip < length) + { + length = (int)toSkip; + } + + readCount = baseInputStream.Read(tmp, 0, length); + toSkip -= readCount; + } + + return count - toSkip; + } + } + + /// + /// Clear any cryptographic state. + /// + protected void StopDecrypting() + { + inputBuffer.CryptoTransform = null; + } + + /// + /// Returns 0 once the end of the stream (EOF) has been reached. + /// Otherwise returns 1. + /// + public virtual int Available + { + get + { + return inf.IsFinished ? 0 : 1; + } + } + + /// + /// Fills the buffer with more data to decompress. + /// + /// + /// Stream ends early + /// + protected void Fill() + { + // Protect against redundant calls + if (inputBuffer.Available <= 0) + { + inputBuffer.Fill(); + if (inputBuffer.Available <= 0) + { + throw new SharpZipBaseException("Unexpected EOF"); + } + } + inputBuffer.SetInflaterInput(inf); + } + + #region Stream Overrides + + /// + /// Gets a value indicating whether the current stream supports reading + /// + public override bool CanRead + { + get + { + return baseInputStream.CanRead; + } + } + + /// + /// Gets a value of false indicating seeking is not supported for this stream. + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Gets a value of false indicating that this stream is not writeable. + /// + public override bool CanWrite + { + get + { + return false; + } + } + + /// + /// A value representing the length of the stream in bytes. + /// + public override long Length + { + get + { + //return inputBuffer.RawLength; + throw new NotSupportedException("InflaterInputStream Length is not supported"); + } + } + + /// + /// The current position within the stream. + /// Throws a NotSupportedException when attempting to set the position + /// + /// Attempting to set the position + public override long Position + { + get + { + return baseInputStream.Position; + } + set + { + throw new NotSupportedException("InflaterInputStream Position not supported"); + } + } + + /// + /// Flushes the baseInputStream + /// + public override void Flush() + { + baseInputStream.Flush(); + } + + /// + /// Sets the position within the current stream + /// Always throws a NotSupportedException + /// + /// The relative offset to seek to. + /// The defining where to seek from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Seek not supported"); + } + + /// + /// Set the length of the current stream + /// Always throws a NotSupportedException + /// + /// The new length value for the stream. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("InflaterInputStream SetLength not supported"); + } + + /// + /// Writes a sequence of bytes to stream and advances the current position + /// This method always throws a NotSupportedException + /// + /// The buffer containing data to write. + /// The offset of the first byte to write. + /// The number of bytes to write. + /// Any access + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("InflaterInputStream Write not supported"); + } + + /// + /// Writes one byte to the current stream and advances the current position + /// Always throws a NotSupportedException + /// + /// The byte to write. + /// Any access + public override void WriteByte(byte value) + { + throw new NotSupportedException("InflaterInputStream WriteByte not supported"); + } + + /// + /// Closes the input stream. When + /// is true the underlying stream is also closed. + /// + protected override void Dispose(bool disposing) + { + if (!isClosed) + { + isClosed = true; + if (IsStreamOwner) + { + baseInputStream.Dispose(); + } + } + } + + /// + /// Reads decompressed data into the provided buffer byte array + /// + /// + /// The array to read and decompress data into + /// + /// + /// The offset indicating where the data should be placed + /// + /// + /// The number of bytes to decompress + /// + /// The number of bytes read. Zero signals the end of stream + /// + /// Inflater needs a dictionary + /// + public override int Read(byte[] buffer, int offset, int count) + { + if (inf.IsNeedingDictionary) + { + throw new SharpZipBaseException("Need a dictionary"); + } + + int remainingBytes = count; + while (true) + { + int bytesRead = inf.Inflate(buffer, offset, remainingBytes); + offset += bytesRead; + remainingBytes -= bytesRead; + + if (remainingBytes == 0 || inf.IsFinished) + { + break; + } + + if (inf.IsNeedingInput) + { + Fill(); + } + else if (bytesRead == 0) + { + throw new ZipException("Invalid input data"); + } + } + return count - remainingBytes; + } + + #endregion Stream Overrides + + #region Instance Fields + + /// + /// Decompressor for this stream + /// + protected Inflater inf; + + /// + /// Input buffer for this stream. + /// + protected InflaterInputBuffer inputBuffer; + + /// + /// Base stream the inflater reads from. + /// + private Stream baseInputStream; + + /// + /// The compressed size + /// + protected long csize; + + /// + /// Flag indicating whether this instance has been closed or not. + /// + private bool isClosed; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs.meta new file mode 100644 index 0000000..c82d162 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/InflaterInputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 011cf9bf651424006a919cb8c58e0e6c +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs new file mode 100644 index 0000000..d8241c1 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs @@ -0,0 +1,220 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression.Streams +{ + /// + /// Contains the output from the Inflation process. + /// We need to have a window so that we can refer backwards into the output stream + /// to repeat stuff.
+ /// Author of the original java version : John Leuner + ///
+ public class OutputWindow + { + #region Constants + + private const int WindowSize = 1 << 15; + private const int WindowMask = WindowSize - 1; + + #endregion Constants + + #region Instance Fields + + private byte[] window = new byte[WindowSize]; //The window is 2^15 bytes + private int windowEnd; + private int windowFilled; + + #endregion Instance Fields + + /// + /// Write a byte to this output window + /// + /// value to write + /// + /// if window is full + /// + public void Write(int value) + { + if (windowFilled++ == WindowSize) + { + throw new InvalidOperationException("Window full"); + } + window[windowEnd++] = (byte)value; + windowEnd &= WindowMask; + } + + private void SlowRepeat(int repStart, int length, int distance) + { + while (length-- > 0) + { + window[windowEnd++] = window[repStart++]; + windowEnd &= WindowMask; + repStart &= WindowMask; + } + } + + /// + /// Append a byte pattern already in the window itself + /// + /// length of pattern to copy + /// distance from end of window pattern occurs + /// + /// If the repeated data overflows the window + /// + public void Repeat(int length, int distance) + { + if ((windowFilled += length) > WindowSize) + { + throw new InvalidOperationException("Window full"); + } + + int repStart = (windowEnd - distance) & WindowMask; + int border = WindowSize - length; + if ((repStart <= border) && (windowEnd < border)) + { + if (length <= distance) + { + System.Array.Copy(window, repStart, window, windowEnd, length); + windowEnd += length; + } + else + { + // We have to copy manually, since the repeat pattern overlaps. + while (length-- > 0) + { + window[windowEnd++] = window[repStart++]; + } + } + } + else + { + SlowRepeat(repStart, length, distance); + } + } + + /// + /// Copy from input manipulator to internal window + /// + /// source of data + /// length of data to copy + /// the number of bytes copied + public int CopyStored(StreamManipulator input, int length) + { + length = Math.Min(Math.Min(length, WindowSize - windowFilled), input.AvailableBytes); + int copied; + + int tailLen = WindowSize - windowEnd; + if (length > tailLen) + { + copied = input.CopyBytes(window, windowEnd, tailLen); + if (copied == tailLen) + { + copied += input.CopyBytes(window, 0, length - tailLen); + } + } + else + { + copied = input.CopyBytes(window, windowEnd, length); + } + + windowEnd = (windowEnd + copied) & WindowMask; + windowFilled += copied; + return copied; + } + + /// + /// Copy dictionary to window + /// + /// source dictionary + /// offset of start in source dictionary + /// length of dictionary + /// + /// If window isnt empty + /// + public void CopyDict(byte[] dictionary, int offset, int length) + { + if (dictionary == null) + { + throw new ArgumentNullException(nameof(dictionary)); + } + + if (windowFilled > 0) + { + throw new InvalidOperationException(); + } + + if (length > WindowSize) + { + offset += length - WindowSize; + length = WindowSize; + } + System.Array.Copy(dictionary, offset, window, 0, length); + windowEnd = length & WindowMask; + } + + /// + /// Get remaining unfilled space in window + /// + /// Number of bytes left in window + public int GetFreeSpace() + { + return WindowSize - windowFilled; + } + + /// + /// Get bytes available for output in window + /// + /// Number of bytes filled + public int GetAvailable() + { + return windowFilled; + } + + /// + /// Copy contents of window to output + /// + /// buffer to copy to + /// offset to start at + /// number of bytes to count + /// The number of bytes copied + /// + /// If a window underflow occurs + /// + public int CopyOutput(byte[] output, int offset, int len) + { + int copyEnd = windowEnd; + if (len > windowFilled) + { + len = windowFilled; + } + else + { + copyEnd = (windowEnd - windowFilled + len) & WindowMask; + } + + int copied = len; + int tailLen = len - copyEnd; + + if (tailLen > 0) + { + System.Array.Copy(window, WindowSize - tailLen, output, offset, tailLen); + offset += tailLen; + len = copyEnd; + } + System.Array.Copy(window, copyEnd - len, output, offset, len); + windowFilled -= copied; + if (windowFilled < 0) + { + throw new InvalidOperationException(); + } + return copied; + } + + /// + /// Reset by clearing window so GetAvailable returns 0 + /// + public void Reset() + { + windowFilled = windowEnd = 0; + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs.meta new file mode 100644 index 0000000..efafff5 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/OutputWindow.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d2b610fe8a87846de84d0a6e739f008a +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs new file mode 100644 index 0000000..aff6a9c --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs @@ -0,0 +1,298 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip.Compression.Streams +{ + /// + /// This class allows us to retrieve a specified number of bits from + /// the input buffer, as well as copy big byte blocks. + /// + /// It uses an int buffer to store up to 31 bits for direct + /// manipulation. This guarantees that we can get at least 16 bits, + /// but we only need at most 15, so this is all safe. + /// + /// There are some optimizations in this class, for example, you must + /// never peek more than 8 bits more than needed, and you must first + /// peek bits before you may drop them. This is not a general purpose + /// class but optimized for the behaviour of the Inflater. + /// + /// authors of the original java version : John Leuner, Jochen Hoenicke + /// + public class StreamManipulator + { + /// + /// Get the next sequence of bits but don't increase input pointer. bitCount must be + /// less or equal 16 and if this call succeeds, you must drop + /// at least n - 8 bits in the next call. + /// + /// The number of bits to peek. + /// + /// the value of the bits, or -1 if not enough bits available. */ + /// + public int PeekBits(int bitCount) + { + if (bitsInBuffer_ < bitCount) + { + if (windowStart_ == windowEnd_) + { + return -1; // ok + } + buffer_ |= (uint)((window_[windowStart_++] & 0xff | + (window_[windowStart_++] & 0xff) << 8) << bitsInBuffer_); + bitsInBuffer_ += 16; + } + return (int)(buffer_ & ((1 << bitCount) - 1)); + } + + /// + /// Tries to grab the next bits from the input and + /// sets to the value, adding . + /// + /// true if enough bits could be read, otherwise false + public bool TryGetBits(int bitCount, ref int output, int outputOffset = 0) + { + var bits = PeekBits(bitCount); + if (bits < 0) + { + return false; + } + output = bits + outputOffset; + DropBits(bitCount); + return true; + } + + /// + /// Tries to grab the next bits from the input and + /// sets of to the value. + /// + /// true if enough bits could be read, otherwise false + public bool TryGetBits(int bitCount, ref byte[] array, int index) + { + var bits = PeekBits(bitCount); + if (bits < 0) + { + return false; + } + array[index] = (byte)bits; + DropBits(bitCount); + return true; + } + + /// + /// Drops the next n bits from the input. You should have called PeekBits + /// with a bigger or equal n before, to make sure that enough bits are in + /// the bit buffer. + /// + /// The number of bits to drop. + public void DropBits(int bitCount) + { + buffer_ >>= bitCount; + bitsInBuffer_ -= bitCount; + } + + /// + /// Gets the next n bits and increases input pointer. This is equivalent + /// to followed by , except for correct error handling. + /// + /// The number of bits to retrieve. + /// + /// the value of the bits, or -1 if not enough bits available. + /// + public int GetBits(int bitCount) + { + int bits = PeekBits(bitCount); + if (bits >= 0) + { + DropBits(bitCount); + } + return bits; + } + + /// + /// Gets the number of bits available in the bit buffer. This must be + /// only called when a previous PeekBits() returned -1. + /// + /// + /// the number of bits available. + /// + public int AvailableBits + { + get + { + return bitsInBuffer_; + } + } + + /// + /// Gets the number of bytes available. + /// + /// + /// The number of bytes available. + /// + public int AvailableBytes + { + get + { + return windowEnd_ - windowStart_ + (bitsInBuffer_ >> 3); + } + } + + /// + /// Skips to the next byte boundary. + /// + public void SkipToByteBoundary() + { + buffer_ >>= (bitsInBuffer_ & 7); + bitsInBuffer_ &= ~7; + } + + /// + /// Returns true when SetInput can be called + /// + public bool IsNeedingInput + { + get + { + return windowStart_ == windowEnd_; + } + } + + /// + /// Copies bytes from input buffer to output buffer starting + /// at output[offset]. You have to make sure, that the buffer is + /// byte aligned. If not enough bytes are available, copies fewer + /// bytes. + /// + /// + /// The buffer to copy bytes to. + /// + /// + /// The offset in the buffer at which copying starts + /// + /// + /// The length to copy, 0 is allowed. + /// + /// + /// The number of bytes copied, 0 if no bytes were available. + /// + /// + /// Length is less than zero + /// + /// + /// Bit buffer isnt byte aligned + /// + public int CopyBytes(byte[] output, int offset, int length) + { + if (length < 0) + { + throw new ArgumentOutOfRangeException(nameof(length)); + } + + if ((bitsInBuffer_ & 7) != 0) + { + // bits_in_buffer may only be 0 or a multiple of 8 + throw new InvalidOperationException("Bit buffer is not byte aligned!"); + } + + int count = 0; + while ((bitsInBuffer_ > 0) && (length > 0)) + { + output[offset++] = (byte)buffer_; + buffer_ >>= 8; + bitsInBuffer_ -= 8; + length--; + count++; + } + + if (length == 0) + { + return count; + } + + int avail = windowEnd_ - windowStart_; + if (length > avail) + { + length = avail; + } + System.Array.Copy(window_, windowStart_, output, offset, length); + windowStart_ += length; + + if (((windowStart_ - windowEnd_) & 1) != 0) + { + // We always want an even number of bytes in input, see peekBits + buffer_ = (uint)(window_[windowStart_++] & 0xff); + bitsInBuffer_ = 8; + } + return count + length; + } + + /// + /// Resets state and empties internal buffers + /// + public void Reset() + { + buffer_ = 0; + windowStart_ = windowEnd_ = bitsInBuffer_ = 0; + } + + /// + /// Add more input for consumption. + /// Only call when IsNeedingInput returns true + /// + /// data to be input + /// offset of first byte of input + /// number of bytes of input to add. + public void SetInput(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative"); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative"); + } + + if (windowStart_ < windowEnd_) + { + throw new InvalidOperationException("Old input was not completely processed"); + } + + int end = offset + count; + + // We want to throw an ArrayIndexOutOfBoundsException early. + // Note the check also handles integer wrap around. + if ((offset > end) || (end > buffer.Length)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + if ((count & 1) != 0) + { + // We always want an even number of bytes in input, see PeekBits + buffer_ |= (uint)((buffer[offset++] & 0xff) << bitsInBuffer_); + bitsInBuffer_ += 8; + } + + window_ = buffer; + windowStart_ = offset; + windowEnd_ = end; + } + + #region Instance Fields + + private byte[] window_; + private int windowStart_; + private int windowEnd_; + + private uint buffer_; + private int bitsInBuffer_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs.meta new file mode 100644 index 0000000..e49b560 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/Compression/Streams/StreamManipulator.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: bff14b5eefb4544b799a8e27d8b956e9 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs new file mode 100644 index 0000000..71a7396 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs @@ -0,0 +1,784 @@ +using ICSharpCode.SharpZipLib.Core; +using ICSharpCode.SharpZipLib.Zip.Compression; +using System; +using System.IO; +using static ICSharpCode.SharpZipLib.Zip.Compression.Deflater; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// FastZipEvents supports all events applicable to FastZip operations. + /// + public class FastZipEvents + { + /// + /// Delegate to invoke when processing directories. + /// + public event EventHandler ProcessDirectory; + + /// + /// Delegate to invoke when processing files. + /// + public ProcessFileHandler ProcessFile; + + /// + /// Delegate to invoke during processing of files. + /// + public ProgressHandler Progress; + + /// + /// Delegate to invoke when processing for a file has been completed. + /// + public CompletedFileHandler CompletedFile; + + /// + /// Delegate to invoke when processing directory failures. + /// + public DirectoryFailureHandler DirectoryFailure; + + /// + /// Delegate to invoke when processing file failures. + /// + public FileFailureHandler FileFailure; + + /// + /// Raise the directory failure event. + /// + /// The directory causing the failure. + /// The exception for this event. + /// A boolean indicating if execution should continue or not. + public bool OnDirectoryFailure(string directory, Exception e) + { + bool result = false; + DirectoryFailureHandler handler = DirectoryFailure; + + if (handler != null) + { + var args = new ScanFailureEventArgs(directory, e); + handler(this, args); + result = args.ContinueRunning; + } + return result; + } + + /// + /// Fires the file failure handler delegate. + /// + /// The file causing the failure. + /// The exception for this failure. + /// A boolean indicating if execution should continue or not. + public bool OnFileFailure(string file, Exception e) + { + FileFailureHandler handler = FileFailure; + bool result = (handler != null); + + if (result) + { + var args = new ScanFailureEventArgs(file, e); + handler(this, args); + result = args.ContinueRunning; + } + return result; + } + + /// + /// Fires the ProcessFile delegate. + /// + /// The file being processed. + /// A boolean indicating if execution should continue or not. + public bool OnProcessFile(string file) + { + bool result = true; + ProcessFileHandler handler = ProcessFile; + + if (handler != null) + { + var args = new ScanEventArgs(file); + handler(this, args); + result = args.ContinueRunning; + } + return result; + } + + /// + /// Fires the delegate + /// + /// The file whose processing has been completed. + /// A boolean indicating if execution should continue or not. + public bool OnCompletedFile(string file) + { + bool result = true; + CompletedFileHandler handler = CompletedFile; + if (handler != null) + { + var args = new ScanEventArgs(file); + handler(this, args); + result = args.ContinueRunning; + } + return result; + } + + /// + /// Fires the process directory delegate. + /// + /// The directory being processed. + /// Flag indicating if the directory has matching files as determined by the current filter. + /// A of true if the operation should continue; false otherwise. + public bool OnProcessDirectory(string directory, bool hasMatchingFiles) + { + bool result = true; + EventHandler handler = ProcessDirectory; + if (handler != null) + { + var args = new DirectoryEventArgs(directory, hasMatchingFiles); + handler(this, args); + result = args.ContinueRunning; + } + return result; + } + + /// + /// The minimum timespan between events. + /// + /// The minimum period of time between events. + /// + /// The default interval is three seconds. + public TimeSpan ProgressInterval + { + get { return progressInterval_; } + set { progressInterval_ = value; } + } + + #region Instance Fields + + private TimeSpan progressInterval_ = TimeSpan.FromSeconds(3); + + #endregion Instance Fields + } + + /// + /// FastZip provides facilities for creating and extracting zip files. + /// + public class FastZip + { + #region Enumerations + + /// + /// Defines the desired handling when overwriting files during extraction. + /// + public enum Overwrite + { + /// + /// Prompt the user to confirm overwriting + /// + Prompt, + + /// + /// Never overwrite files. + /// + Never, + + /// + /// Always overwrite files. + /// + Always + } + + #endregion Enumerations + + #region Constructors + + /// + /// Initialise a default instance of . + /// + public FastZip() + { + } + + /// + /// Initialise a new instance of + /// + /// The events to use during operations. + public FastZip(FastZipEvents events) + { + events_ = events; + } + + #endregion Constructors + + #region Properties + + /// + /// Get/set a value indicating whether empty directories should be created. + /// + public bool CreateEmptyDirectories + { + get { return createEmptyDirectories_; } + set { createEmptyDirectories_ = value; } + } + + /// + /// Get / set the password value. + /// + public string Password + { + get { return password_; } + set { password_ = value; } + } + + /// + /// Get or set the active when creating Zip files. + /// + /// + public INameTransform NameTransform + { + get { return entryFactory_.NameTransform; } + set + { + entryFactory_.NameTransform = value; + } + } + + /// + /// Get or set the active when creating Zip files. + /// + public IEntryFactory EntryFactory + { + get { return entryFactory_; } + set + { + if (value == null) + { + entryFactory_ = new ZipEntryFactory(); + } + else + { + entryFactory_ = value; + } + } + } + + /// + /// Gets or sets the setting for Zip64 handling when writing. + /// + /// + /// The default value is dynamic which is not backwards compatible with old + /// programs and can cause problems with XP's built in compression which cant + /// read Zip64 archives. However it does avoid the situation were a large file + /// is added and cannot be completed correctly. + /// NOTE: Setting the size for entries before they are added is the best solution! + /// By default the EntryFactory used by FastZip will set the file size. + /// + public UseZip64 UseZip64 + { + get { return useZip64_; } + set { useZip64_ = value; } + } + + /// + /// Get/set a value indicating whether file dates and times should + /// be restored when extracting files from an archive. + /// + /// The default value is false. + public bool RestoreDateTimeOnExtract + { + get + { + return restoreDateTimeOnExtract_; + } + set + { + restoreDateTimeOnExtract_ = value; + } + } + + /// + /// Get/set a value indicating whether file attributes should + /// be restored during extract operations + /// + public bool RestoreAttributesOnExtract + { + get { return restoreAttributesOnExtract_; } + set { restoreAttributesOnExtract_ = value; } + } + + /// + /// Get/set the Compression Level that will be used + /// when creating the zip + /// + public Deflater.CompressionLevel CompressionLevel + { + get { return compressionLevel_; } + set { compressionLevel_ = value; } + } + + #endregion Properties + + #region Delegates + + /// + /// Delegate called when confirming overwriting of files. + /// + public delegate bool ConfirmOverwriteDelegate(string fileName); + + #endregion Delegates + + #region CreateZip + + /// + /// Create a zip file. + /// + /// The name of the zip file to create. + /// The directory to source files from. + /// True to recurse directories, false for no recursion. + /// The file filter to apply. + /// The directory filter to apply. + public void CreateZip(string zipFileName, string sourceDirectory, + bool recurse, string fileFilter, string directoryFilter) + { + CreateZip(File.Create(zipFileName), sourceDirectory, recurse, fileFilter, directoryFilter); + } + + /// + /// Create a zip file/archive. + /// + /// The name of the zip file to create. + /// The directory to obtain files and directories from. + /// True to recurse directories, false for no recursion. + /// The file filter to apply. + public void CreateZip(string zipFileName, string sourceDirectory, bool recurse, string fileFilter) + { + CreateZip(File.Create(zipFileName), sourceDirectory, recurse, fileFilter, null); + } + + /// + /// Create a zip archive sending output to the passed. + /// + /// The stream to write archive data to. + /// The directory to source files from. + /// True to recurse directories, false for no recursion. + /// The file filter to apply. + /// The directory filter to apply. + /// The is closed after creation. + public void CreateZip(Stream outputStream, string sourceDirectory, bool recurse, string fileFilter, string directoryFilter) + { + NameTransform = new ZipNameTransform(sourceDirectory); + sourceDirectory_ = sourceDirectory; + + using (outputStream_ = new ZipOutputStream(outputStream)) + { + outputStream_.SetLevel((int)CompressionLevel); + + if (password_ != null) + { + outputStream_.Password = password_; + } + + outputStream_.UseZip64 = UseZip64; + var scanner = new FileSystemScanner(fileFilter, directoryFilter); + scanner.ProcessFile += ProcessFile; + if (this.CreateEmptyDirectories) + { + scanner.ProcessDirectory += ProcessDirectory; + } + + if (events_ != null) + { + if (events_.FileFailure != null) + { + scanner.FileFailure += events_.FileFailure; + } + + if (events_.DirectoryFailure != null) + { + scanner.DirectoryFailure += events_.DirectoryFailure; + } + } + + scanner.Scan(sourceDirectory, recurse); + } + } + + #endregion CreateZip + + #region ExtractZip + + /// + /// Extract the contents of a zip file. + /// + /// The zip file to extract from. + /// The directory to save extracted information in. + /// A filter to apply to files. + public void ExtractZip(string zipFileName, string targetDirectory, string fileFilter) + { + ExtractZip(zipFileName, targetDirectory, Overwrite.Always, null, fileFilter, null, restoreDateTimeOnExtract_); + } + + /// + /// Extract the contents of a zip file. + /// + /// The zip file to extract from. + /// The directory to save extracted information in. + /// The style of overwriting to apply. + /// A delegate to invoke when confirming overwriting. + /// A filter to apply to files. + /// A filter to apply to directories. + /// Flag indicating whether to restore the date and time for extracted files. + /// Allow parent directory traversal in file paths (e.g. ../file) + public void ExtractZip(string zipFileName, string targetDirectory, + Overwrite overwrite, ConfirmOverwriteDelegate confirmDelegate, + string fileFilter, string directoryFilter, bool restoreDateTime, bool allowParentTraversal = false) + { + Stream inputStream = File.Open(zipFileName, FileMode.Open, FileAccess.Read, FileShare.Read); + ExtractZip(inputStream, targetDirectory, overwrite, confirmDelegate, fileFilter, directoryFilter, restoreDateTime, true, allowParentTraversal); + } + + /// + /// Extract the contents of a zip file held in a stream. + /// + /// The seekable input stream containing the zip to extract from. + /// The directory to save extracted information in. + /// The style of overwriting to apply. + /// A delegate to invoke when confirming overwriting. + /// A filter to apply to files. + /// A filter to apply to directories. + /// Flag indicating whether to restore the date and time for extracted files. + /// Flag indicating whether the inputStream will be closed by this method. + /// Allow parent directory traversal in file paths (e.g. ../file) + public void ExtractZip(Stream inputStream, string targetDirectory, + Overwrite overwrite, ConfirmOverwriteDelegate confirmDelegate, + string fileFilter, string directoryFilter, bool restoreDateTime, + bool isStreamOwner, bool allowParentTraversal = false) + { + if ((overwrite == Overwrite.Prompt) && (confirmDelegate == null)) + { + throw new ArgumentNullException(nameof(confirmDelegate)); + } + + continueRunning_ = true; + overwrite_ = overwrite; + confirmDelegate_ = confirmDelegate; + extractNameTransform_ = new WindowsNameTransform(targetDirectory, allowParentTraversal); + + fileFilter_ = new NameFilter(fileFilter); + directoryFilter_ = new NameFilter(directoryFilter); + restoreDateTimeOnExtract_ = restoreDateTime; + + using (zipFile_ = new ZipFile(inputStream, !isStreamOwner)) + { + if (password_ != null) + { + zipFile_.Password = password_; + } + + System.Collections.IEnumerator enumerator = zipFile_.GetEnumerator(); + while (continueRunning_ && enumerator.MoveNext()) + { + var entry = (ZipEntry)enumerator.Current; + if (entry.IsFile) + { + // TODO Path.GetDirectory can fail here on invalid characters. + if (directoryFilter_.IsMatch(Path.GetDirectoryName(entry.Name)) && fileFilter_.IsMatch(entry.Name)) + { + ExtractEntry(entry); + } + } + else if (entry.IsDirectory) + { + if (directoryFilter_.IsMatch(entry.Name) && CreateEmptyDirectories) + { + ExtractEntry(entry); + } + } + else + { + // Do nothing for volume labels etc... + } + } + } + } + + #endregion ExtractZip + + #region Internal Processing + + private void ProcessDirectory(object sender, DirectoryEventArgs e) + { + if (!e.HasMatchingFiles && CreateEmptyDirectories) + { + if (events_ != null) + { + events_.OnProcessDirectory(e.Name, e.HasMatchingFiles); + } + + if (e.ContinueRunning) + { + if (e.Name != sourceDirectory_) + { + ZipEntry entry = entryFactory_.MakeDirectoryEntry(e.Name); + outputStream_.PutNextEntry(entry); + } + } + } + } + + private void ProcessFile(object sender, ScanEventArgs e) + { + if ((events_ != null) && (events_.ProcessFile != null)) + { + events_.ProcessFile(sender, e); + } + + if (e.ContinueRunning) + { + try + { + // The open below is equivalent to OpenRead which guarantees that if opened the + // file will not be changed by subsequent openers, but precludes opening in some cases + // were it could succeed. ie the open may fail as its already open for writing and the share mode should reflect that. + using (FileStream stream = File.Open(e.Name, FileMode.Open, FileAccess.Read, FileShare.Read)) + { + ZipEntry entry = entryFactory_.MakeFileEntry(e.Name); + outputStream_.PutNextEntry(entry); + AddFileContents(e.Name, stream); + } + } + catch (Exception ex) + { + if (events_ != null) + { + continueRunning_ = events_.OnFileFailure(e.Name, ex); + } + else + { + continueRunning_ = false; + throw; + } + } + } + } + + private void AddFileContents(string name, Stream stream) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + if (buffer_ == null) + { + buffer_ = new byte[4096]; + } + + if ((events_ != null) && (events_.Progress != null)) + { + StreamUtils.Copy(stream, outputStream_, buffer_, + events_.Progress, events_.ProgressInterval, this, name); + } + else + { + StreamUtils.Copy(stream, outputStream_, buffer_); + } + + if (events_ != null) + { + continueRunning_ = events_.OnCompletedFile(name); + } + } + + private void ExtractFileEntry(ZipEntry entry, string targetName) + { + bool proceed = true; + if (overwrite_ != Overwrite.Always) + { + if (File.Exists(targetName)) + { + if ((overwrite_ == Overwrite.Prompt) && (confirmDelegate_ != null)) + { + proceed = confirmDelegate_(targetName); + } + else + { + proceed = false; + } + } + } + + if (proceed) + { + if (events_ != null) + { + continueRunning_ = events_.OnProcessFile(entry.Name); + } + + if (continueRunning_) + { + try + { + using (FileStream outputStream = File.Create(targetName)) + { + if (buffer_ == null) + { + buffer_ = new byte[4096]; + } + if ((events_ != null) && (events_.Progress != null)) + { + StreamUtils.Copy(zipFile_.GetInputStream(entry), outputStream, buffer_, + events_.Progress, events_.ProgressInterval, this, entry.Name, entry.Size); + } + else + { + StreamUtils.Copy(zipFile_.GetInputStream(entry), outputStream, buffer_); + } + + if (events_ != null) + { + continueRunning_ = events_.OnCompletedFile(entry.Name); + } + } + + if (restoreDateTimeOnExtract_) + { + File.SetLastWriteTime(targetName, entry.DateTime); + } + + if (RestoreAttributesOnExtract && entry.IsDOSEntry && (entry.ExternalFileAttributes != -1)) + { + var fileAttributes = (FileAttributes)entry.ExternalFileAttributes; + // TODO: FastZip - Setting of other file attributes on extraction is a little trickier. + fileAttributes &= (FileAttributes.Archive | FileAttributes.Normal | FileAttributes.ReadOnly | FileAttributes.Hidden); + File.SetAttributes(targetName, fileAttributes); + } + } + catch (Exception ex) + { + if (events_ != null) + { + continueRunning_ = events_.OnFileFailure(targetName, ex); + } + else + { + continueRunning_ = false; + throw; + } + } + } + } + } + + private void ExtractEntry(ZipEntry entry) + { + bool doExtraction = entry.IsCompressionMethodSupported(); + string targetName = entry.Name; + + if (doExtraction) + { + if (entry.IsFile) + { + targetName = extractNameTransform_.TransformFile(targetName); + } + else if (entry.IsDirectory) + { + targetName = extractNameTransform_.TransformDirectory(targetName); + } + + doExtraction = !(string.IsNullOrEmpty(targetName)); + } + + // TODO: Fire delegate/throw exception were compression method not supported, or name is invalid? + + string dirName = null; + + if (doExtraction) + { + if (entry.IsDirectory) + { + dirName = targetName; + } + else + { + dirName = Path.GetDirectoryName(Path.GetFullPath(targetName)); + } + } + + if (doExtraction && !Directory.Exists(dirName)) + { + if (!entry.IsDirectory || CreateEmptyDirectories) + { + try + { + Directory.CreateDirectory(dirName); + + if (entry.IsDirectory && restoreDateTimeOnExtract_) + { + Directory.SetLastWriteTime(dirName, entry.DateTime); + } + } + catch (Exception ex) + { + doExtraction = false; + if (events_ != null) + { + if (entry.IsDirectory) + { + continueRunning_ = events_.OnDirectoryFailure(targetName, ex); + } + else + { + continueRunning_ = events_.OnFileFailure(targetName, ex); + } + } + else + { + continueRunning_ = false; + throw; + } + } + } + } + + if (doExtraction && entry.IsFile) + { + ExtractFileEntry(entry, targetName); + } + } + + private static int MakeExternalAttributes(FileInfo info) + { + return (int)info.Attributes; + } + + private static bool NameIsValid(string name) + { + return !string.IsNullOrEmpty(name) && + (name.IndexOfAny(Path.GetInvalidPathChars()) < 0); + } + + #endregion Internal Processing + + #region Instance Fields + + private bool continueRunning_; + private byte[] buffer_; + private ZipOutputStream outputStream_; + private ZipFile zipFile_; + private string sourceDirectory_; + private NameFilter fileFilter_; + private NameFilter directoryFilter_; + private Overwrite overwrite_; + private ConfirmOverwriteDelegate confirmDelegate_; + + private bool restoreDateTimeOnExtract_; + private bool restoreAttributesOnExtract_; + private bool createEmptyDirectories_; + private FastZipEvents events_; + private IEntryFactory entryFactory_ = new ZipEntryFactory(); + private INameTransform extractNameTransform_; + private UseZip64 useZip64_ = UseZip64.Dynamic; + private CompressionLevel compressionLevel_ = CompressionLevel.DEFAULT_COMPRESSION; + + private string password_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs.meta new file mode 100644 index 0000000..275b636 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/FastZip.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 4c86b8d60bd1b4b448cfd477342c9536 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs new file mode 100644 index 0000000..bbe40c4 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs @@ -0,0 +1,54 @@ +using ICSharpCode.SharpZipLib.Core; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// Defines factory methods for creating new values. + /// + public interface IEntryFactory + { + /// + /// Create a for a file given its name + /// + /// The name of the file to create an entry for. + /// Returns a file entry based on the passed. + ZipEntry MakeFileEntry(string fileName); + + /// + /// Create a for a file given its name + /// + /// The name of the file to create an entry for. + /// If true get details from the file system if the file exists. + /// Returns a file entry based on the passed. + ZipEntry MakeFileEntry(string fileName, bool useFileSystem); + + /// + /// Create a for a file given its actual name and optional override name + /// + /// The name of the file to create an entry for. + /// An alternative name to be used for the new entry. Null if not applicable. + /// If true get details from the file system if the file exists. + /// Returns a file entry based on the passed. + ZipEntry MakeFileEntry(string fileName, string entryName, bool useFileSystem); + + /// + /// Create a for a directory given its name + /// + /// The name of the directory to create an entry for. + /// Returns a directory entry based on the passed. + ZipEntry MakeDirectoryEntry(string directoryName); + + /// + /// Create a for a directory given its name + /// + /// The name of the directory to create an entry for. + /// If true get details from the file system for this directory if it exists. + /// Returns a directory entry based on the passed. + ZipEntry MakeDirectoryEntry(string directoryName, bool useFileSystem); + + /// + /// Get/set the applicable. + /// + INameTransform NameTransform { get; set; } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs.meta new file mode 100644 index 0000000..6511719 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/IEntryFactory.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d173c94c3bbd6429ca4f661d311ff412 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs new file mode 100644 index 0000000..c10f5ce --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs @@ -0,0 +1,258 @@ +using ICSharpCode.SharpZipLib.Core; +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// WindowsNameTransform transforms names to windows compatible ones. + /// + public class WindowsNameTransform : INameTransform + { + /// + /// The maximum windows path name permitted. + /// + /// This may not valid for all windows systems - CE?, etc but I cant find the equivalent in the CLR. + private const int MaxPath = 260; + + private string _baseDirectory; + private bool _trimIncomingPaths; + private char _replacementChar = '_'; + private bool _allowParentTraversal; + + /// + /// In this case we need Windows' invalid path characters. + /// Path.GetInvalidPathChars() only returns a subset invalid on all platforms. + /// + private static readonly char[] InvalidEntryChars = new char[] { + '"', '<', '>', '|', '\0', '\u0001', '\u0002', '\u0003', '\u0004', '\u0005', + '\u0006', '\a', '\b', '\t', '\n', '\v', '\f', '\r', '\u000e', '\u000f', + '\u0010', '\u0011', '\u0012', '\u0013', '\u0014', '\u0015', '\u0016', + '\u0017', '\u0018', '\u0019', '\u001a', '\u001b', '\u001c', '\u001d', + '\u001e', '\u001f', + // extra characters for masks, etc. + '*', '?', ':' + }; + + /// + /// Initialises a new instance of + /// + /// + /// Allow parent directory traversal in file paths (e.g. ../file) + public WindowsNameTransform(string baseDirectory, bool allowParentTraversal = false) + { + BaseDirectory = baseDirectory ?? throw new ArgumentNullException(nameof(baseDirectory), "Directory name is invalid"); + AllowParentTraversal = allowParentTraversal; + } + + /// + /// Initialise a default instance of + /// + public WindowsNameTransform() + { + // Do nothing. + } + + /// + /// Gets or sets a value containing the target directory to prefix values with. + /// + public string BaseDirectory + { + get { return _baseDirectory; } + set + { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + + _baseDirectory = Path.GetFullPath(value); + } + } + + /// + /// Allow parent directory traversal in file paths (e.g. ../file) + /// + public bool AllowParentTraversal + { + get => _allowParentTraversal; + set => _allowParentTraversal = value; + } + + /// + /// Gets or sets a value indicating whether paths on incoming values should be removed. + /// + public bool TrimIncomingPaths + { + get { return _trimIncomingPaths; } + set { _trimIncomingPaths = value; } + } + + /// + /// Transform a Zip directory name to a windows directory name. + /// + /// The directory name to transform. + /// The transformed name. + public string TransformDirectory(string name) + { + name = TransformFile(name); + if (name.Length > 0) + { + while (name.EndsWith(Path.DirectorySeparatorChar.ToString(), StringComparison.Ordinal)) + { + name = name.Remove(name.Length - 1, 1); + } + } + else + { + throw new InvalidNameException("Cannot have an empty directory name"); + } + return name; + } + + /// + /// Transform a Zip format file name to a windows style one. + /// + /// The file name to transform. + /// The transformed name. + public string TransformFile(string name) + { + if (name != null) + { + name = MakeValidName(name, _replacementChar); + + if (_trimIncomingPaths) + { + name = Path.GetFileName(name); + } + + // This may exceed windows length restrictions. + // Combine will throw a PathTooLongException in that case. + if (_baseDirectory != null) + { + name = Path.Combine(_baseDirectory, name); + + if (!_allowParentTraversal && !Path.GetFullPath(name).StartsWith(_baseDirectory, StringComparison.InvariantCultureIgnoreCase)) + { + throw new InvalidNameException("Parent traversal in paths is not allowed"); + } + } + } + else + { + name = string.Empty; + } + return name; + } + + /// + /// Test a name to see if it is a valid name for a windows filename as extracted from a Zip archive. + /// + /// The name to test. + /// Returns true if the name is a valid zip name; false otherwise. + /// The filename isnt a true windows path in some fundamental ways like no absolute paths, no rooted paths etc. + public static bool IsValidName(string name) + { + bool result = + (name != null) && + (name.Length <= MaxPath) && + (string.Compare(name, MakeValidName(name, '_'), StringComparison.Ordinal) == 0) + ; + + return result; + } + + /// + /// Force a name to be valid by replacing invalid characters with a fixed value + /// + /// The name to make valid + /// The replacement character to use for any invalid characters. + /// Returns a valid name + public static string MakeValidName(string name, char replacement) + { + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + name = WindowsPathUtils.DropPathRoot(name.Replace("/", Path.DirectorySeparatorChar.ToString())); + + // Drop any leading slashes. + while ((name.Length > 0) && (name[0] == Path.DirectorySeparatorChar)) + { + name = name.Remove(0, 1); + } + + // Drop any trailing slashes. + while ((name.Length > 0) && (name[name.Length - 1] == Path.DirectorySeparatorChar)) + { + name = name.Remove(name.Length - 1, 1); + } + + // Convert consecutive \\ characters to \ + int index = name.IndexOf(string.Format("{0}{0}", Path.DirectorySeparatorChar), StringComparison.Ordinal); + while (index >= 0) + { + name = name.Remove(index, 1); + index = name.IndexOf(string.Format("{0}{0}", Path.DirectorySeparatorChar), StringComparison.Ordinal); + } + + // Convert any invalid characters using the replacement one. + index = name.IndexOfAny(InvalidEntryChars); + if (index >= 0) + { + var builder = new StringBuilder(name); + + while (index >= 0) + { + builder[index] = replacement; + + if (index >= name.Length) + { + index = -1; + } + else + { + index = name.IndexOfAny(InvalidEntryChars, index + 1); + } + } + name = builder.ToString(); + } + + // Check for names greater than MaxPath characters. + // TODO: Were is CLR version of MaxPath defined? Can't find it in Environment. + if (name.Length > MaxPath) + { + throw new PathTooLongException(); + } + + return name; + } + + /// + /// Gets or set the character to replace invalid characters during transformations. + /// + public char Replacement + { + get { return _replacementChar; } + set + { + for (int i = 0; i < InvalidEntryChars.Length; ++i) + { + if (InvalidEntryChars[i] == value) + { + throw new ArgumentException("invalid path character"); + } + } + + if ((value == Path.DirectorySeparatorChar) || (value == Path.AltDirectorySeparatorChar)) + { + throw new ArgumentException("invalid replacement character"); + } + + _replacementChar = value; + } + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs.meta new file mode 100644 index 0000000..5c91ea0 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/WindowsNameTransform.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 1148d87befa7a499c8f231dbbc5122d1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs new file mode 100644 index 0000000..cc2fd27 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs @@ -0,0 +1,512 @@ +using System; + +namespace ICSharpCode.SharpZipLib.Zip +{ + #region Enumerations + + /// + /// Determines how entries are tested to see if they should use Zip64 extensions or not. + /// + public enum UseZip64 + { + /// + /// Zip64 will not be forced on entries during processing. + /// + /// An entry can have this overridden if required + Off, + + /// + /// Zip64 should always be used. + /// + On, + + /// + /// #ZipLib will determine use based on entry values when added to archive. + /// + Dynamic, + } + + /// + /// The kind of compression used for an entry in an archive + /// + public enum CompressionMethod + { + /// + /// A direct copy of the file contents is held in the archive + /// + Stored = 0, + + /// + /// Common Zip compression method using a sliding dictionary + /// of up to 32KB and secondary compression from Huffman/Shannon-Fano trees + /// + Deflated = 8, + + /// + /// An extension to deflate with a 64KB window. Not supported by #Zip currently + /// + Deflate64 = 9, + + /// + /// BZip2 compression. Not supported by #Zip. + /// + BZip2 = 12, + + /// + /// LZMA compression. Not supported by #Zip. + /// + LZMA = 14, + + /// + /// PPMd compression. Not supported by #Zip. + /// + PPMd = 98, + + /// + /// WinZip special for AES encryption, Now supported by #Zip. + /// + WinZipAES = 99, + } + + /// + /// Identifies the encryption algorithm used for an entry + /// + public enum EncryptionAlgorithm + { + /// + /// No encryption has been used. + /// + None = 0, + + /// + /// Encrypted using PKZIP 2.0 or 'classic' encryption. + /// + PkzipClassic = 1, + + /// + /// DES encryption has been used. + /// + Des = 0x6601, + + /// + /// RC2 encryption has been used for encryption. + /// + RC2 = 0x6602, + + /// + /// Triple DES encryption with 168 bit keys has been used for this entry. + /// + TripleDes168 = 0x6603, + + /// + /// Triple DES with 112 bit keys has been used for this entry. + /// + TripleDes112 = 0x6609, + + /// + /// AES 128 has been used for encryption. + /// + Aes128 = 0x660e, + + /// + /// AES 192 has been used for encryption. + /// + Aes192 = 0x660f, + + /// + /// AES 256 has been used for encryption. + /// + Aes256 = 0x6610, + + /// + /// RC2 corrected has been used for encryption. + /// + RC2Corrected = 0x6702, + + /// + /// Blowfish has been used for encryption. + /// + Blowfish = 0x6720, + + /// + /// Twofish has been used for encryption. + /// + Twofish = 0x6721, + + /// + /// RC4 has been used for encryption. + /// + RC4 = 0x6801, + + /// + /// An unknown algorithm has been used for encryption. + /// + Unknown = 0xffff + } + + /// + /// Defines the contents of the general bit flags field for an archive entry. + /// + [Flags] + public enum GeneralBitFlags + { + /// + /// Bit 0 if set indicates that the file is encrypted + /// + Encrypted = 0x0001, + + /// + /// Bits 1 and 2 - Two bits defining the compression method (only for Method 6 Imploding and 8,9 Deflating) + /// + Method = 0x0006, + + /// + /// Bit 3 if set indicates a trailing data descriptor is appended to the entry data + /// + Descriptor = 0x0008, + + /// + /// Bit 4 is reserved for use with method 8 for enhanced deflation + /// + ReservedPKware4 = 0x0010, + + /// + /// Bit 5 if set indicates the file contains Pkzip compressed patched data. + /// Requires version 2.7 or greater. + /// + Patched = 0x0020, + + /// + /// Bit 6 if set indicates strong encryption has been used for this entry. + /// + StrongEncryption = 0x0040, + + /// + /// Bit 7 is currently unused + /// + Unused7 = 0x0080, + + /// + /// Bit 8 is currently unused + /// + Unused8 = 0x0100, + + /// + /// Bit 9 is currently unused + /// + Unused9 = 0x0200, + + /// + /// Bit 10 is currently unused + /// + Unused10 = 0x0400, + + /// + /// Bit 11 if set indicates the filename and + /// comment fields for this file must be encoded using UTF-8. + /// + UnicodeText = 0x0800, + + /// + /// Bit 12 is documented as being reserved by PKware for enhanced compression. + /// + EnhancedCompress = 0x1000, + + /// + /// Bit 13 if set indicates that values in the local header are masked to hide + /// their actual values, and the central directory is encrypted. + /// + /// + /// Used when encrypting the central directory contents. + /// + HeaderMasked = 0x2000, + + /// + /// Bit 14 is documented as being reserved for use by PKware + /// + ReservedPkware14 = 0x4000, + + /// + /// Bit 15 is documented as being reserved for use by PKware + /// + ReservedPkware15 = 0x8000 + } + + #endregion Enumerations + + /// + /// This class contains constants used for Zip format files + /// + public static class ZipConstants + { + #region Versions + + /// + /// The version made by field for entries in the central header when created by this library + /// + /// + /// This is also the Zip version for the library when comparing against the version required to extract + /// for an entry. See . + /// + public const int VersionMadeBy = 51; // was 45 before AES + + /// + /// The version made by field for entries in the central header when created by this library + /// + /// + /// This is also the Zip version for the library when comparing against the version required to extract + /// for an entry. See ZipInputStream.CanDecompressEntry. + /// + [Obsolete("Use VersionMadeBy instead")] + public const int VERSION_MADE_BY = 51; + + /// + /// The minimum version required to support strong encryption + /// + public const int VersionStrongEncryption = 50; + + /// + /// The minimum version required to support strong encryption + /// + [Obsolete("Use VersionStrongEncryption instead")] + public const int VERSION_STRONG_ENCRYPTION = 50; + + /// + /// Version indicating AES encryption + /// + public const int VERSION_AES = 51; + + /// + /// The version required for Zip64 extensions (4.5 or higher) + /// + public const int VersionZip64 = 45; + + #endregion Versions + + #region Header Sizes + + /// + /// Size of local entry header (excluding variable length fields at end) + /// + public const int LocalHeaderBaseSize = 30; + + /// + /// Size of local entry header (excluding variable length fields at end) + /// + [Obsolete("Use LocalHeaderBaseSize instead")] + public const int LOCHDR = 30; + + /// + /// Size of Zip64 data descriptor + /// + public const int Zip64DataDescriptorSize = 24; + + /// + /// Size of data descriptor + /// + public const int DataDescriptorSize = 16; + + /// + /// Size of data descriptor + /// + [Obsolete("Use DataDescriptorSize instead")] + public const int EXTHDR = 16; + + /// + /// Size of central header entry (excluding variable fields) + /// + public const int CentralHeaderBaseSize = 46; + + /// + /// Size of central header entry + /// + [Obsolete("Use CentralHeaderBaseSize instead")] + public const int CENHDR = 46; + + /// + /// Size of end of central record (excluding variable fields) + /// + public const int EndOfCentralRecordBaseSize = 22; + + /// + /// Size of end of central record (excluding variable fields) + /// + [Obsolete("Use EndOfCentralRecordBaseSize instead")] + public const int ENDHDR = 22; + + /// + /// Size of 'classic' cryptographic header stored before any entry data + /// + public const int CryptoHeaderSize = 12; + + /// + /// Size of cryptographic header stored before entry data + /// + [Obsolete("Use CryptoHeaderSize instead")] + public const int CRYPTO_HEADER_SIZE = 12; + + /// + /// The size of the Zip64 central directory locator. + /// + public const int Zip64EndOfCentralDirectoryLocatorSize = 20; + + #endregion Header Sizes + + #region Header Signatures + + /// + /// Signature for local entry header + /// + public const int LocalHeaderSignature = 'P' | ('K' << 8) | (3 << 16) | (4 << 24); + + /// + /// Signature for local entry header + /// + [Obsolete("Use LocalHeaderSignature instead")] + public const int LOCSIG = 'P' | ('K' << 8) | (3 << 16) | (4 << 24); + + /// + /// Signature for spanning entry + /// + public const int SpanningSignature = 'P' | ('K' << 8) | (7 << 16) | (8 << 24); + + /// + /// Signature for spanning entry + /// + [Obsolete("Use SpanningSignature instead")] + public const int SPANNINGSIG = 'P' | ('K' << 8) | (7 << 16) | (8 << 24); + + /// + /// Signature for temporary spanning entry + /// + public const int SpanningTempSignature = 'P' | ('K' << 8) | ('0' << 16) | ('0' << 24); + + /// + /// Signature for temporary spanning entry + /// + [Obsolete("Use SpanningTempSignature instead")] + public const int SPANTEMPSIG = 'P' | ('K' << 8) | ('0' << 16) | ('0' << 24); + + /// + /// Signature for data descriptor + /// + /// + /// This is only used where the length, Crc, or compressed size isnt known when the + /// entry is created and the output stream doesnt support seeking. + /// The local entry cannot be 'patched' with the correct values in this case + /// so the values are recorded after the data prefixed by this header, as well as in the central directory. + /// + public const int DataDescriptorSignature = 'P' | ('K' << 8) | (7 << 16) | (8 << 24); + + /// + /// Signature for data descriptor + /// + /// + /// This is only used where the length, Crc, or compressed size isnt known when the + /// entry is created and the output stream doesnt support seeking. + /// The local entry cannot be 'patched' with the correct values in this case + /// so the values are recorded after the data prefixed by this header, as well as in the central directory. + /// + [Obsolete("Use DataDescriptorSignature instead")] + public const int EXTSIG = 'P' | ('K' << 8) | (7 << 16) | (8 << 24); + + /// + /// Signature for central header + /// + [Obsolete("Use CentralHeaderSignature instead")] + public const int CENSIG = 'P' | ('K' << 8) | (1 << 16) | (2 << 24); + + /// + /// Signature for central header + /// + public const int CentralHeaderSignature = 'P' | ('K' << 8) | (1 << 16) | (2 << 24); + + /// + /// Signature for Zip64 central file header + /// + public const int Zip64CentralFileHeaderSignature = 'P' | ('K' << 8) | (6 << 16) | (6 << 24); + + /// + /// Signature for Zip64 central file header + /// + [Obsolete("Use Zip64CentralFileHeaderSignature instead")] + public const int CENSIG64 = 'P' | ('K' << 8) | (6 << 16) | (6 << 24); + + /// + /// Signature for Zip64 central directory locator + /// + public const int Zip64CentralDirLocatorSignature = 'P' | ('K' << 8) | (6 << 16) | (7 << 24); + + /// + /// Signature for archive extra data signature (were headers are encrypted). + /// + public const int ArchiveExtraDataSignature = 'P' | ('K' << 8) | (6 << 16) | (7 << 24); + + /// + /// Central header digital signature + /// + public const int CentralHeaderDigitalSignature = 'P' | ('K' << 8) | (5 << 16) | (5 << 24); + + /// + /// Central header digital signature + /// + [Obsolete("Use CentralHeaderDigitalSignaure instead")] + public const int CENDIGITALSIG = 'P' | ('K' << 8) | (5 << 16) | (5 << 24); + + /// + /// End of central directory record signature + /// + public const int EndOfCentralDirectorySignature = 'P' | ('K' << 8) | (5 << 16) | (6 << 24); + + /// + /// End of central directory record signature + /// + [Obsolete("Use EndOfCentralDirectorySignature instead")] + public const int ENDSIG = 'P' | ('K' << 8) | (5 << 16) | (6 << 24); + + #endregion Header Signatures + + /// + /// Default encoding used for string conversion. 0 gives the default system OEM code page. + /// Using the default code page isnt the full solution necessarily + /// there are many variable factors, codepage 850 is often a good choice for + /// European users, however be careful about compatability. + /// + [Obsolete("Use ZipStrings instead")] + public static int DefaultCodePage + { + get => ZipStrings.CodePage; + set => ZipStrings.CodePage = value; + } + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToString instead")] + public static string ConvertToString(byte[] data, int count) + => ZipStrings.ConvertToString(data, count); + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToString instead")] + public static string ConvertToString(byte[] data) + => ZipStrings.ConvertToString(data); + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToStringExt instead")] + public static string ConvertToStringExt(int flags, byte[] data, int count) + => ZipStrings.ConvertToStringExt(flags, data, count); + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToStringExt instead")] + public static string ConvertToStringExt(int flags, byte[] data) + => ZipStrings.ConvertToStringExt(flags, data); + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToArray instead")] + public static byte[] ConvertToArray(string str) + => ZipStrings.ConvertToArray(str); + + /// Deprecated wrapper for + [Obsolete("Use ZipStrings.ConvertToArray instead")] + public static byte[] ConvertToArray(int flags, string str) + => ZipStrings.ConvertToArray(flags, str); + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs.meta new file mode 100644 index 0000000..a03b4fc --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipConstants.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 890b93a581974407aa480e49310bbec3 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs new file mode 100644 index 0000000..7d15b01 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs @@ -0,0 +1,1337 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// Defines known values for the property. + /// + public enum HostSystemID + { + /// + /// Host system = MSDOS + /// + Msdos = 0, + + /// + /// Host system = Amiga + /// + Amiga = 1, + + /// + /// Host system = Open VMS + /// + OpenVms = 2, + + /// + /// Host system = Unix + /// + Unix = 3, + + /// + /// Host system = VMCms + /// + VMCms = 4, + + /// + /// Host system = Atari ST + /// + AtariST = 5, + + /// + /// Host system = OS2 + /// + OS2 = 6, + + /// + /// Host system = Macintosh + /// + Macintosh = 7, + + /// + /// Host system = ZSystem + /// + ZSystem = 8, + + /// + /// Host system = Cpm + /// + Cpm = 9, + + /// + /// Host system = Windows NT + /// + WindowsNT = 10, + + /// + /// Host system = MVS + /// + MVS = 11, + + /// + /// Host system = VSE + /// + Vse = 12, + + /// + /// Host system = Acorn RISC + /// + AcornRisc = 13, + + /// + /// Host system = VFAT + /// + Vfat = 14, + + /// + /// Host system = Alternate MVS + /// + AlternateMvs = 15, + + /// + /// Host system = BEOS + /// + BeOS = 16, + + /// + /// Host system = Tandem + /// + Tandem = 17, + + /// + /// Host system = OS400 + /// + OS400 = 18, + + /// + /// Host system = OSX + /// + OSX = 19, + + /// + /// Host system = WinZIP AES + /// + WinZipAES = 99, + } + + /// + /// This class represents an entry in a zip archive. This can be a file + /// or a directory + /// ZipFile and ZipInputStream will give you instances of this class as + /// information about the members in an archive. ZipOutputStream + /// uses an instance of this class when creating an entry in a Zip file. + ///
+ ///
Author of the original java version : Jochen Hoenicke + ///
+ public class ZipEntry + { + [Flags] + private enum Known : byte + { + None = 0, + Size = 0x01, + CompressedSize = 0x02, + Crc = 0x04, + Time = 0x08, + ExternalAttributes = 0x10, + } + + #region Constructors + + /// + /// Creates a zip entry with the given name. + /// + /// + /// The name for this entry. Can include directory components. + /// The convention for names is 'unix' style paths with relative names only. + /// There are with no device names and path elements are separated by '/' characters. + /// + /// + /// The name passed is null + /// + public ZipEntry(string name) + : this(name, 0, ZipConstants.VersionMadeBy, CompressionMethod.Deflated) + { + } + + /// + /// Creates a zip entry with the given name and version required to extract + /// + /// + /// The name for this entry. Can include directory components. + /// The convention for names is 'unix' style paths with no device names and + /// path elements separated by '/' characters. This is not enforced see CleanName + /// on how to ensure names are valid if this is desired. + /// + /// + /// The minimum 'feature version' required this entry + /// + /// + /// The name passed is null + /// + internal ZipEntry(string name, int versionRequiredToExtract) + : this(name, versionRequiredToExtract, ZipConstants.VersionMadeBy, + CompressionMethod.Deflated) + { + } + + /// + /// Initializes an entry with the given name and made by information + /// + /// Name for this entry + /// Version and HostSystem Information + /// Minimum required zip feature version required to extract this entry + /// Compression method for this entry. + /// + /// The name passed is null + /// + /// + /// versionRequiredToExtract should be 0 (auto-calculate) or > 10 + /// + /// + /// This constructor is used by the ZipFile class when reading from the central header + /// It is not generally useful, use the constructor specifying the name only. + /// + internal ZipEntry(string name, int versionRequiredToExtract, int madeByInfo, + CompressionMethod method) + { + if (name == null) + { + throw new ArgumentNullException(nameof(name)); + } + + if (name.Length > 0xffff) + { + throw new ArgumentException("Name is too long", nameof(name)); + } + + if ((versionRequiredToExtract != 0) && (versionRequiredToExtract < 10)) + { + throw new ArgumentOutOfRangeException(nameof(versionRequiredToExtract)); + } + + this.DateTime = DateTime.Now; + this.name = name; + this.versionMadeBy = (ushort)madeByInfo; + this.versionToExtract = (ushort)versionRequiredToExtract; + this.method = method; + + IsUnicodeText = ZipStrings.UseUnicode; + } + + /// + /// Creates a deep copy of the given zip entry. + /// + /// + /// The entry to copy. + /// + [Obsolete("Use Clone instead")] + public ZipEntry(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + known = entry.known; + name = entry.name; + size = entry.size; + compressedSize = entry.compressedSize; + crc = entry.crc; + dateTime = entry.DateTime; + method = entry.method; + comment = entry.comment; + versionToExtract = entry.versionToExtract; + versionMadeBy = entry.versionMadeBy; + externalFileAttributes = entry.externalFileAttributes; + flags = entry.flags; + + zipFileIndex = entry.zipFileIndex; + offset = entry.offset; + + forceZip64_ = entry.forceZip64_; + + if (entry.extra != null) + { + extra = new byte[entry.extra.Length]; + Array.Copy(entry.extra, 0, extra, 0, entry.extra.Length); + } + } + + #endregion Constructors + + /// + /// Get a value indicating whether the entry has a CRC value available. + /// + public bool HasCrc + { + get + { + return (known & Known.Crc) != 0; + } + } + + /// + /// Get/Set flag indicating if entry is encrypted. + /// A simple helper routine to aid interpretation of flags + /// + /// This is an assistant that interprets the flags property. + public bool IsCrypted + { + get + { + return (flags & 1) != 0; + } + set + { + if (value) + { + flags |= 1; + } + else + { + flags &= ~1; + } + } + } + + /// + /// Get / set a flag indicating whether entry name and comment text are + /// encoded in unicode UTF8. + /// + /// This is an assistant that interprets the flags property. + public bool IsUnicodeText + { + get + { + return (flags & (int)GeneralBitFlags.UnicodeText) != 0; + } + set + { + if (value) + { + flags |= (int)GeneralBitFlags.UnicodeText; + } + else + { + flags &= ~(int)GeneralBitFlags.UnicodeText; + } + } + } + + /// + /// Value used during password checking for PKZIP 2.0 / 'classic' encryption. + /// + internal byte CryptoCheckValue + { + get + { + return cryptoCheckValue_; + } + + set + { + cryptoCheckValue_ = value; + } + } + + /// + /// Get/Set general purpose bit flag for entry + /// + /// + /// General purpose bit flag
+ ///
+ /// Bit 0: If set, indicates the file is encrypted
+ /// Bit 1-2 Only used for compression type 6 Imploding, and 8, 9 deflating
+ /// Imploding:
+ /// Bit 1 if set indicates an 8K sliding dictionary was used. If clear a 4k dictionary was used
+ /// Bit 2 if set indicates 3 Shannon-Fanno trees were used to encode the sliding dictionary, 2 otherwise
+ ///
+ /// Deflating:
+ /// Bit 2 Bit 1
+ /// 0 0 Normal compression was used
+ /// 0 1 Maximum compression was used
+ /// 1 0 Fast compression was used
+ /// 1 1 Super fast compression was used
+ ///
+ /// Bit 3: If set, the fields crc-32, compressed size + /// and uncompressed size are were not able to be written during zip file creation + /// The correct values are held in a data descriptor immediately following the compressed data.
+ /// Bit 4: Reserved for use by PKZIP for enhanced deflating
+ /// Bit 5: If set indicates the file contains compressed patch data
+ /// Bit 6: If set indicates strong encryption was used.
+ /// Bit 7-10: Unused or reserved
+ /// Bit 11: If set the name and comments for this entry are in unicode.
+ /// Bit 12-15: Unused or reserved
+ ///
+ /// + /// + public int Flags + { + get + { + return flags; + } + set + { + flags = value; + } + } + + /// + /// Get/Set index of this entry in Zip file + /// + /// This is only valid when the entry is part of a + public long ZipFileIndex + { + get + { + return zipFileIndex; + } + set + { + zipFileIndex = value; + } + } + + /// + /// Get/set offset for use in central header + /// + public long Offset + { + get + { + return offset; + } + set + { + offset = value; + } + } + + /// + /// Get/Set external file attributes as an integer. + /// The values of this are operating system dependant see + /// HostSystem for details + /// + public int ExternalFileAttributes + { + get + { + if ((known & Known.ExternalAttributes) == 0) + { + return -1; + } + else + { + return externalFileAttributes; + } + } + + set + { + externalFileAttributes = value; + known |= Known.ExternalAttributes; + } + } + + /// + /// Get the version made by for this entry or zero if unknown. + /// The value / 10 indicates the major version number, and + /// the value mod 10 is the minor version number + /// + public int VersionMadeBy + { + get + { + return (versionMadeBy & 0xff); + } + } + + /// + /// Get a value indicating this entry is for a DOS/Windows system. + /// + public bool IsDOSEntry + { + get + { + return ((HostSystem == (int)HostSystemID.Msdos) || + (HostSystem == (int)HostSystemID.WindowsNT)); + } + } + + /// + /// Test the external attributes for this to + /// see if the external attributes are Dos based (including WINNT and variants) + /// and match the values + /// + /// The attributes to test. + /// Returns true if the external attributes are known to be DOS/Windows + /// based and have the same attributes set as the value passed. + private bool HasDosAttributes(int attributes) + { + bool result = false; + if ((known & Known.ExternalAttributes) != 0) + { + result |= (((HostSystem == (int)HostSystemID.Msdos) || + (HostSystem == (int)HostSystemID.WindowsNT)) && + (ExternalFileAttributes & attributes) == attributes); + } + return result; + } + + /// + /// Gets the compatability information for the external file attribute + /// If the external file attributes are compatible with MS-DOS and can be read + /// by PKZIP for DOS version 2.04g then this value will be zero. Otherwise the value + /// will be non-zero and identify the host system on which the attributes are compatible. + /// + /// + /// + /// The values for this as defined in the Zip File format and by others are shown below. The values are somewhat + /// misleading in some cases as they are not all used as shown. You should consult the relevant documentation + /// to obtain up to date and correct information. The modified appnote by the infozip group is + /// particularly helpful as it documents a lot of peculiarities. The document is however a little dated. + /// + /// 0 - MS-DOS and OS/2 (FAT / VFAT / FAT32 file systems) + /// 1 - Amiga + /// 2 - OpenVMS + /// 3 - Unix + /// 4 - VM/CMS + /// 5 - Atari ST + /// 6 - OS/2 HPFS + /// 7 - Macintosh + /// 8 - Z-System + /// 9 - CP/M + /// 10 - Windows NTFS + /// 11 - MVS (OS/390 - Z/OS) + /// 12 - VSE + /// 13 - Acorn Risc + /// 14 - VFAT + /// 15 - Alternate MVS + /// 16 - BeOS + /// 17 - Tandem + /// 18 - OS/400 + /// 19 - OS/X (Darwin) + /// 99 - WinZip AES + /// remainder - unused + /// + /// + public int HostSystem + { + get + { + return (versionMadeBy >> 8) & 0xff; + } + + set + { + versionMadeBy &= 0x00ff; + versionMadeBy |= (ushort)((value & 0xff) << 8); + } + } + + /// + /// Get minimum Zip feature version required to extract this entry + /// + /// + /// Minimum features are defined as:
+ /// 1.0 - Default value
+ /// 1.1 - File is a volume label
+ /// 2.0 - File is a folder/directory
+ /// 2.0 - File is compressed using Deflate compression
+ /// 2.0 - File is encrypted using traditional encryption
+ /// 2.1 - File is compressed using Deflate64
+ /// 2.5 - File is compressed using PKWARE DCL Implode
+ /// 2.7 - File is a patch data set
+ /// 4.5 - File uses Zip64 format extensions
+ /// 4.6 - File is compressed using BZIP2 compression
+ /// 5.0 - File is encrypted using DES
+ /// 5.0 - File is encrypted using 3DES
+ /// 5.0 - File is encrypted using original RC2 encryption
+ /// 5.0 - File is encrypted using RC4 encryption
+ /// 5.1 - File is encrypted using AES encryption
+ /// 5.1 - File is encrypted using corrected RC2 encryption
+ /// 5.1 - File is encrypted using corrected RC2-64 encryption
+ /// 6.1 - File is encrypted using non-OAEP key wrapping
+ /// 6.2 - Central directory encryption (not confirmed yet)
+ /// 6.3 - File is compressed using LZMA
+ /// 6.3 - File is compressed using PPMD+
+ /// 6.3 - File is encrypted using Blowfish
+ /// 6.3 - File is encrypted using Twofish
+ ///
+ /// + public int Version + { + get + { + // Return recorded version if known. + if (versionToExtract != 0) + { + return versionToExtract & 0x00ff; // Only lower order byte. High order is O/S file system. + } + else + { + int result = 10; + if (AESKeySize > 0) + { + result = ZipConstants.VERSION_AES; // Ver 5.1 = AES + } + else if (CentralHeaderRequiresZip64) + { + result = ZipConstants.VersionZip64; + } + else if (CompressionMethod.Deflated == method) + { + result = 20; + } + else if (IsDirectory == true) + { + result = 20; + } + else if (IsCrypted == true) + { + result = 20; + } + else if (HasDosAttributes(0x08)) + { + result = 11; + } + return result; + } + } + } + + /// + /// Get a value indicating whether this entry can be decompressed by the library. + /// + /// This is based on the and + /// whether the compression method is supported. + public bool CanDecompress + { + get + { + return (Version <= ZipConstants.VersionMadeBy) && + ((Version == 10) || + (Version == 11) || + (Version == 20) || + (Version == 45) || + (Version == 51)) && + IsCompressionMethodSupported(); + } + } + + /// + /// Force this entry to be recorded using Zip64 extensions. + /// + public void ForceZip64() + { + forceZip64_ = true; + } + + /// + /// Get a value indicating whether Zip64 extensions were forced. + /// + /// A value of true if Zip64 extensions have been forced on; false if not. + public bool IsZip64Forced() + { + return forceZip64_; + } + + /// + /// Gets a value indicating if the entry requires Zip64 extensions + /// to store the full entry values. + /// + /// A value of true if a local header requires Zip64 extensions; false if not. + public bool LocalHeaderRequiresZip64 + { + get + { + bool result = forceZip64_; + + if (!result) + { + ulong trueCompressedSize = compressedSize; + + if ((versionToExtract == 0) && IsCrypted) + { + trueCompressedSize += ZipConstants.CryptoHeaderSize; + } + + // TODO: A better estimation of the true limit based on compression overhead should be used + // to determine when an entry should use Zip64. + result = + ((this.size >= uint.MaxValue) || (trueCompressedSize >= uint.MaxValue)) && + ((versionToExtract == 0) || (versionToExtract >= ZipConstants.VersionZip64)); + } + + return result; + } + } + + /// + /// Get a value indicating whether the central directory entry requires Zip64 extensions to be stored. + /// + public bool CentralHeaderRequiresZip64 + { + get + { + return LocalHeaderRequiresZip64 || (offset >= uint.MaxValue); + } + } + + /// + /// Get/Set DosTime value. + /// + /// + /// The MS-DOS date format can only represent dates between 1/1/1980 and 12/31/2107. + /// + public long DosTime + { + get + { + if ((known & Known.Time) == 0) + { + return 0; + } + else + { + var year = (uint)DateTime.Year; + var month = (uint)DateTime.Month; + var day = (uint)DateTime.Day; + var hour = (uint)DateTime.Hour; + var minute = (uint)DateTime.Minute; + var second = (uint)DateTime.Second; + + if (year < 1980) + { + year = 1980; + month = 1; + day = 1; + hour = 0; + minute = 0; + second = 0; + } + else if (year > 2107) + { + year = 2107; + month = 12; + day = 31; + hour = 23; + minute = 59; + second = 59; + } + + return ((year - 1980) & 0x7f) << 25 | + (month << 21) | + (day << 16) | + (hour << 11) | + (minute << 5) | + (second >> 1); + } + } + + set + { + unchecked + { + var dosTime = (uint)value; + uint sec = Math.Min(59, 2 * (dosTime & 0x1f)); + uint min = Math.Min(59, (dosTime >> 5) & 0x3f); + uint hrs = Math.Min(23, (dosTime >> 11) & 0x1f); + uint mon = Math.Max(1, Math.Min(12, ((uint)(value >> 21) & 0xf))); + uint year = ((dosTime >> 25) & 0x7f) + 1980; + int day = Math.Max(1, Math.Min(DateTime.DaysInMonth((int)year, (int)mon), (int)((value >> 16) & 0x1f))); + DateTime = new DateTime((int)year, (int)mon, day, (int)hrs, (int)min, (int)sec, DateTimeKind.Utc); + } + } + } + + /// + /// Gets/Sets the time of last modification of the entry. + /// + /// + /// The property is updated to match this as far as possible. + /// + public DateTime DateTime + { + get + { + return dateTime; + } + + set + { + dateTime = value; + known |= Known.Time; + } + } + + /// + /// Returns the entry name. + /// + /// + /// The unix naming convention is followed. + /// Path components in the entry should always separated by forward slashes ('/'). + /// Dos device names like C: should also be removed. + /// See the class, or + /// + public string Name + { + get + { + return name; + } + } + + /// + /// Gets/Sets the size of the uncompressed data. + /// + /// + /// The size or -1 if unknown. + /// + /// Setting the size before adding an entry to an archive can help + /// avoid compatability problems with some archivers which dont understand Zip64 extensions. + public long Size + { + get + { + return (known & Known.Size) != 0 ? (long)size : -1L; + } + set + { + this.size = (ulong)value; + this.known |= Known.Size; + } + } + + /// + /// Gets/Sets the size of the compressed data. + /// + /// + /// The compressed entry size or -1 if unknown. + /// + public long CompressedSize + { + get + { + return (known & Known.CompressedSize) != 0 ? (long)compressedSize : -1L; + } + set + { + this.compressedSize = (ulong)value; + this.known |= Known.CompressedSize; + } + } + + /// + /// Gets/Sets the crc of the uncompressed data. + /// + /// + /// Crc is not in the range 0..0xffffffffL + /// + /// + /// The crc value or -1 if unknown. + /// + public long Crc + { + get + { + return (known & Known.Crc) != 0 ? crc & 0xffffffffL : -1L; + } + set + { + if (((ulong)crc & 0xffffffff00000000L) != 0) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + this.crc = (uint)value; + this.known |= Known.Crc; + } + } + + /// + /// Gets/Sets the compression method. Only Deflated and Stored are supported. + /// + /// + /// The compression method for this entry + /// + /// + /// + public CompressionMethod CompressionMethod + { + get + { + return method; + } + + set + { + if (!IsCompressionMethodSupported(value)) + { + throw new NotSupportedException("Compression method not supported"); + } + this.method = value; + } + } + + /// + /// Gets the compression method for outputting to the local or central header. + /// Returns same value as CompressionMethod except when AES encrypting, which + /// places 99 in the method and places the real method in the extra data. + /// + internal CompressionMethod CompressionMethodForHeader + { + get + { + return (AESKeySize > 0) ? CompressionMethod.WinZipAES : method; + } + } + + /// + /// Gets/Sets the extra data. + /// + /// + /// Extra data is longer than 64KB (0xffff) bytes. + /// + /// + /// Extra data or null if not set. + /// + public byte[] ExtraData + { + get + { + // TODO: This is slightly safer but less efficient. Think about whether it should change. + // return (byte[]) extra.Clone(); + return extra; + } + + set + { + if (value == null) + { + extra = null; + } + else + { + if (value.Length > 0xffff) + { + throw new System.ArgumentOutOfRangeException(nameof(value)); + } + + extra = new byte[value.Length]; + Array.Copy(value, 0, extra, 0, value.Length); + } + } + } + + /// + /// For AES encrypted files returns or sets the number of bits of encryption (128, 192 or 256). + /// When setting, only 0 (off), 128 or 256 is supported. + /// + public int AESKeySize + { + get + { + // the strength (1 or 3) is in the entry header + switch (_aesEncryptionStrength) + { + case 0: + return 0; // Not AES + case 1: + return 128; + + case 2: + return 192; // Not used by WinZip + case 3: + return 256; + + default: + throw new ZipException("Invalid AESEncryptionStrength " + _aesEncryptionStrength); + } + } + set + { + switch (value) + { + case 0: + _aesEncryptionStrength = 0; + break; + + case 128: + _aesEncryptionStrength = 1; + break; + + case 256: + _aesEncryptionStrength = 3; + break; + + default: + throw new ZipException("AESKeySize must be 0, 128 or 256: " + value); + } + } + } + + /// + /// AES Encryption strength for storage in extra data in entry header. + /// 1 is 128 bit, 2 is 192 bit, 3 is 256 bit. + /// + internal byte AESEncryptionStrength + { + get + { + return (byte)_aesEncryptionStrength; + } + } + + /// + /// Returns the length of the salt, in bytes + /// + internal int AESSaltLen + { + get + { + // Key size -> Salt length: 128 bits = 8 bytes, 192 bits = 12 bytes, 256 bits = 16 bytes. + return AESKeySize / 16; + } + } + + /// + /// Number of extra bytes required to hold the AES Header fields (Salt, Pwd verify, AuthCode) + /// + internal int AESOverheadSize + { + get + { + // File format: + // Bytes Content + // Variable Salt value + // 2 Password verification value + // Variable Encrypted file data + // 10 Authentication code + return 12 + AESSaltLen; + } + } + + /// + /// Process extra data fields updating the entry based on the contents. + /// + /// True if the extra data fields should be handled + /// for a local header, rather than for a central header. + /// + internal void ProcessExtraData(bool localHeader) + { + var extraData = new ZipExtraData(this.extra); + + if (extraData.Find(0x0001)) + { + // Version required to extract is ignored here as some archivers dont set it correctly + // in theory it should be version 45 or higher + + // The recorded size will change but remember that this is zip64. + forceZip64_ = true; + + if (extraData.ValueLength < 4) + { + throw new ZipException("Extra data extended Zip64 information length is invalid"); + } + + // (localHeader ||) was deleted, because actually there is no specific difference with reading sizes between local header & central directory + // https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT + // ... + // 4.4 Explanation of fields + // ... + // 4.4.8 compressed size: (4 bytes) + // 4.4.9 uncompressed size: (4 bytes) + // + // The size of the file compressed (4.4.8) and uncompressed, + // (4.4.9) respectively. When a decryption header is present it + // will be placed in front of the file data and the value of the + // compressed file size will include the bytes of the decryption + // header. If bit 3 of the general purpose bit flag is set, + // these fields are set to zero in the local header and the + // correct values are put in the data descriptor and + // in the central directory. If an archive is in ZIP64 format + // and the value in this field is 0xFFFFFFFF, the size will be + // in the corresponding 8 byte ZIP64 extended information + // extra field. When encrypting the central directory, if the + // local header is not in ZIP64 format and general purpose bit + // flag 13 is set indicating masking, the value stored for the + // uncompressed size in the Local Header will be zero. + // + // Otherwise there is problem with minizip implementation + if (size == uint.MaxValue) + { + size = (ulong)extraData.ReadLong(); + } + + if (compressedSize == uint.MaxValue) + { + compressedSize = (ulong)extraData.ReadLong(); + } + + if (!localHeader && (offset == uint.MaxValue)) + { + offset = extraData.ReadLong(); + } + + // Disk number on which file starts is ignored + } + else + { + if ( + ((versionToExtract & 0xff) >= ZipConstants.VersionZip64) && + ((size == uint.MaxValue) || (compressedSize == uint.MaxValue)) + ) + { + throw new ZipException("Zip64 Extended information required but is missing."); + } + } + + DateTime = GetDateTime(extraData) ?? DateTime; + if (method == CompressionMethod.WinZipAES) + { + ProcessAESExtraData(extraData); + } + } + + private DateTime? GetDateTime(ZipExtraData extraData) + { + // Check for NT timestamp + // NOTE: Disable by default to match behavior of InfoZIP +#if RESPECT_NT_TIMESTAMP + NTTaggedData ntData = extraData.GetData(); + if (ntData != null) + return ntData.LastModificationTime; +#endif + + // Check for Unix timestamp + ExtendedUnixData unixData = extraData.GetData(); + if (unixData != null && unixData.Include.HasFlag(ExtendedUnixData.Flags.ModificationTime)) + return unixData.ModificationTime; + + return null; + } + + // For AES the method in the entry is 99, and the real compression method is in the extradata + // + private void ProcessAESExtraData(ZipExtraData extraData) + { + if (extraData.Find(0x9901)) + { + // Set version for Zipfile.CreateAndInitDecryptionStream + versionToExtract = ZipConstants.VERSION_AES; // Ver 5.1 = AES see "Version" getter + + // + // Unpack AES extra data field see http://www.winzip.com/aes_info.htm + int length = extraData.ValueLength; // Data size currently 7 + if (length < 7) + throw new ZipException("AES Extra Data Length " + length + " invalid."); + int ver = extraData.ReadShort(); // Version number (1=AE-1 2=AE-2) + int vendorId = extraData.ReadShort(); // 2-character vendor ID 0x4541 = "AE" + int encrStrength = extraData.ReadByte(); // encryption strength 1 = 128 2 = 192 3 = 256 + int actualCompress = extraData.ReadShort(); // The actual compression method used to compress the file + _aesVer = ver; + _aesEncryptionStrength = encrStrength; + method = (CompressionMethod)actualCompress; + } + else + throw new ZipException("AES Extra Data missing"); + } + + /// + /// Gets/Sets the entry comment. + /// + /// + /// If comment is longer than 0xffff. + /// + /// + /// The comment or null if not set. + /// + /// + /// A comment is only available for entries when read via the class. + /// The class doesnt have the comment data available. + /// + public string Comment + { + get + { + return comment; + } + set + { + // This test is strictly incorrect as the length is in characters + // while the storage limit is in bytes. + // While the test is partially correct in that a comment of this length or greater + // is definitely invalid, shorter comments may also have an invalid length + // where there are multi-byte characters + // The full test is not possible here however as the code page to apply conversions with + // isnt available. + if ((value != null) && (value.Length > 0xffff)) + { + throw new ArgumentOutOfRangeException(nameof(value), "cannot exceed 65535"); + } + + comment = value; + } + } + + /// + /// Gets a value indicating if the entry is a directory. + /// however. + /// + /// + /// A directory is determined by an entry name with a trailing slash '/'. + /// The external file attributes can also indicate an entry is for a directory. + /// Currently only dos/windows attributes are tested in this manner. + /// The trailing slash convention should always be followed. + /// + public bool IsDirectory + { + get + { + int nameLength = name.Length; + bool result = + ((nameLength > 0) && + ((name[nameLength - 1] == '/') || (name[nameLength - 1] == '\\'))) || + HasDosAttributes(16) + ; + return result; + } + } + + /// + /// Get a value of true if the entry appears to be a file; false otherwise + /// + /// + /// This only takes account of DOS/Windows attributes. Other operating systems are ignored. + /// For linux and others the result may be incorrect. + /// + public bool IsFile + { + get + { + return !IsDirectory && !HasDosAttributes(8); + } + } + + /// + /// Test entry to see if data can be extracted. + /// + /// Returns true if data can be extracted for this entry; false otherwise. + public bool IsCompressionMethodSupported() + { + return IsCompressionMethodSupported(CompressionMethod); + } + + #region ICloneable Members + + /// + /// Creates a copy of this zip entry. + /// + /// An that is a copy of the current instance. + public object Clone() + { + var result = (ZipEntry)this.MemberwiseClone(); + + // Ensure extra data is unique if it exists. + if (extra != null) + { + result.extra = new byte[extra.Length]; + Array.Copy(extra, 0, result.extra, 0, extra.Length); + } + + return result; + } + + #endregion ICloneable Members + + /// + /// Gets a string representation of this ZipEntry. + /// + /// A readable textual representation of this + public override string ToString() + { + return name; + } + + /// + /// Test a compression method to see if this library + /// supports extracting data compressed with that method + /// + /// The compression method to test. + /// Returns true if the compression method is supported; false otherwise + public static bool IsCompressionMethodSupported(CompressionMethod method) + { + return + (method == CompressionMethod.Deflated) || + (method == CompressionMethod.Stored); + } + + /// + /// Cleans a name making it conform to Zip file conventions. + /// Devices names ('c:\') and UNC share names ('\\server\share') are removed + /// and forward slashes ('\') are converted to back slashes ('/'). + /// Names are made relative by trimming leading slashes which is compatible + /// with the ZIP naming convention. + /// + /// The name to clean + /// The 'cleaned' name. + /// + /// The Zip name transform class is more flexible. + /// + public static string CleanName(string name) + { + if (name == null) + { + return string.Empty; + } + + if (Path.IsPathRooted(name)) + { + // NOTE: + // for UNC names... \\machine\share\zoom\beet.txt gives \zoom\beet.txt + name = name.Substring(Path.GetPathRoot(name).Length); + } + + name = name.Replace(@"\", "/"); + + while ((name.Length > 0) && (name[0] == '/')) + { + name = name.Remove(0, 1); + } + return name; + } + + #region Instance Fields + + private Known known; + private int externalFileAttributes = -1; // contains external attributes (O/S dependant) + + private ushort versionMadeBy; // Contains host system and version information + // only relevant for central header entries + + private string name; + private ulong size; + private ulong compressedSize; + private ushort versionToExtract; // Version required to extract (library handles <= 2.0) + private uint crc; + private DateTime dateTime; + + private CompressionMethod method = CompressionMethod.Deflated; + private byte[] extra; + private string comment; + + private int flags; // general purpose bit flags + + private long zipFileIndex = -1; // used by ZipFile + private long offset; // used by ZipFile and ZipOutputStream + + private bool forceZip64_; + private byte cryptoCheckValue_; + private int _aesVer; // Version number (2 = AE-2 ?). Assigned but not used. + private int _aesEncryptionStrength; // Encryption strength 1 = 128 2 = 192 3 = 256 + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs.meta new file mode 100644 index 0000000..c5a2d34 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntry.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 81e58720c7c5e4f608a0794c854f6559 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs new file mode 100644 index 0000000..e82eafc --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs @@ -0,0 +1,375 @@ +using ICSharpCode.SharpZipLib.Core; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// Basic implementation of + /// + public class ZipEntryFactory : IEntryFactory + { + #region Enumerations + + /// + /// Defines the possible values to be used for the . + /// + public enum TimeSetting + { + /// + /// Use the recorded LastWriteTime value for the file. + /// + LastWriteTime, + + /// + /// Use the recorded LastWriteTimeUtc value for the file + /// + LastWriteTimeUtc, + + /// + /// Use the recorded CreateTime value for the file. + /// + CreateTime, + + /// + /// Use the recorded CreateTimeUtc value for the file. + /// + CreateTimeUtc, + + /// + /// Use the recorded LastAccessTime value for the file. + /// + LastAccessTime, + + /// + /// Use the recorded LastAccessTimeUtc value for the file. + /// + LastAccessTimeUtc, + + /// + /// Use a fixed value. + /// + /// The actual value used can be + /// specified via the constructor or + /// using the with the setting set + /// to which will use the when this class was constructed. + /// The property can also be used to set this value. + Fixed, + } + + #endregion Enumerations + + #region Constructors + + /// + /// Initialise a new instance of the class. + /// + /// A default , and the LastWriteTime for files is used. + public ZipEntryFactory() + { + nameTransform_ = new ZipNameTransform(); + isUnicodeText_ = ZipStrings.UseUnicode; + } + + /// + /// Initialise a new instance of using the specified + /// + /// The time setting to use when creating Zip entries. + public ZipEntryFactory(TimeSetting timeSetting) : this() + { + timeSetting_ = timeSetting; + } + + /// + /// Initialise a new instance of using the specified + /// + /// The time to set all values to. + public ZipEntryFactory(DateTime time) : this() + { + timeSetting_ = TimeSetting.Fixed; + FixedDateTime = time; + } + + #endregion Constructors + + #region Properties + + /// + /// Get / set the to be used when creating new values. + /// + /// + /// Setting this property to null will cause a default name transform to be used. + /// + public INameTransform NameTransform + { + get { return nameTransform_; } + set + { + if (value == null) + { + nameTransform_ = new ZipNameTransform(); + } + else + { + nameTransform_ = value; + } + } + } + + /// + /// Get / set the in use. + /// + public TimeSetting Setting + { + get { return timeSetting_; } + set { timeSetting_ = value; } + } + + /// + /// Get / set the value to use when is set to + /// + public DateTime FixedDateTime + { + get { return fixedDateTime_; } + set + { + if (value.Year < 1970) + { + throw new ArgumentException("Value is too old to be valid", nameof(value)); + } + fixedDateTime_ = value; + } + } + + /// + /// A bitmask defining the attributes to be retrieved from the actual file. + /// + /// The default is to get all possible attributes from the actual file. + public int GetAttributes + { + get { return getAttributes_; } + set { getAttributes_ = value; } + } + + /// + /// A bitmask defining which attributes are to be set on. + /// + /// By default no attributes are set on. + public int SetAttributes + { + get { return setAttributes_; } + set { setAttributes_ = value; } + } + + /// + /// Get set a value indicating whether unidoce text should be set on. + /// + public bool IsUnicodeText + { + get { return isUnicodeText_; } + set { isUnicodeText_ = value; } + } + + #endregion Properties + + #region IEntryFactory Members + + /// + /// Make a new for a file. + /// + /// The name of the file to create a new entry for. + /// Returns a new based on the . + public ZipEntry MakeFileEntry(string fileName) + { + return MakeFileEntry(fileName, null, true); + } + + /// + /// Make a new for a file. + /// + /// The name of the file to create a new entry for. + /// If true entry detail is retrieved from the file system if the file exists. + /// Returns a new based on the . + public ZipEntry MakeFileEntry(string fileName, bool useFileSystem) + { + return MakeFileEntry(fileName, null, useFileSystem); + } + + /// + /// Make a new from a name. + /// + /// The name of the file to create a new entry for. + /// An alternative name to be used for the new entry. Null if not applicable. + /// If true entry detail is retrieved from the file system if the file exists. + /// Returns a new based on the . + public ZipEntry MakeFileEntry(string fileName, string entryName, bool useFileSystem) + { + var result = new ZipEntry(nameTransform_.TransformFile(!string.IsNullOrEmpty(entryName) ? entryName : fileName)); + result.IsUnicodeText = isUnicodeText_; + + int externalAttributes = 0; + bool useAttributes = (setAttributes_ != 0); + + FileInfo fi = null; + if (useFileSystem) + { + fi = new FileInfo(fileName); + } + + if ((fi != null) && fi.Exists) + { + switch (timeSetting_) + { + case TimeSetting.CreateTime: + result.DateTime = fi.CreationTime; + break; + + case TimeSetting.CreateTimeUtc: + result.DateTime = fi.CreationTimeUtc; + break; + + case TimeSetting.LastAccessTime: + result.DateTime = fi.LastAccessTime; + break; + + case TimeSetting.LastAccessTimeUtc: + result.DateTime = fi.LastAccessTimeUtc; + break; + + case TimeSetting.LastWriteTime: + result.DateTime = fi.LastWriteTime; + break; + + case TimeSetting.LastWriteTimeUtc: + result.DateTime = fi.LastWriteTimeUtc; + break; + + case TimeSetting.Fixed: + result.DateTime = fixedDateTime_; + break; + + default: + throw new ZipException("Unhandled time setting in MakeFileEntry"); + } + + result.Size = fi.Length; + + useAttributes = true; + externalAttributes = ((int)fi.Attributes & getAttributes_); + } + else + { + if (timeSetting_ == TimeSetting.Fixed) + { + result.DateTime = fixedDateTime_; + } + } + + if (useAttributes) + { + externalAttributes |= setAttributes_; + result.ExternalFileAttributes = externalAttributes; + } + + return result; + } + + /// + /// Make a new for a directory. + /// + /// The raw untransformed name for the new directory + /// Returns a new representing a directory. + public ZipEntry MakeDirectoryEntry(string directoryName) + { + return MakeDirectoryEntry(directoryName, true); + } + + /// + /// Make a new for a directory. + /// + /// The raw untransformed name for the new directory + /// If true entry detail is retrieved from the file system if the file exists. + /// Returns a new representing a directory. + public ZipEntry MakeDirectoryEntry(string directoryName, bool useFileSystem) + { + var result = new ZipEntry(nameTransform_.TransformDirectory(directoryName)); + result.IsUnicodeText = isUnicodeText_; + result.Size = 0; + + int externalAttributes = 0; + + DirectoryInfo di = null; + + if (useFileSystem) + { + di = new DirectoryInfo(directoryName); + } + + if ((di != null) && di.Exists) + { + switch (timeSetting_) + { + case TimeSetting.CreateTime: + result.DateTime = di.CreationTime; + break; + + case TimeSetting.CreateTimeUtc: + result.DateTime = di.CreationTimeUtc; + break; + + case TimeSetting.LastAccessTime: + result.DateTime = di.LastAccessTime; + break; + + case TimeSetting.LastAccessTimeUtc: + result.DateTime = di.LastAccessTimeUtc; + break; + + case TimeSetting.LastWriteTime: + result.DateTime = di.LastWriteTime; + break; + + case TimeSetting.LastWriteTimeUtc: + result.DateTime = di.LastWriteTimeUtc; + break; + + case TimeSetting.Fixed: + result.DateTime = fixedDateTime_; + break; + + default: + throw new ZipException("Unhandled time setting in MakeDirectoryEntry"); + } + + externalAttributes = ((int)di.Attributes & getAttributes_); + } + else + { + if (timeSetting_ == TimeSetting.Fixed) + { + result.DateTime = fixedDateTime_; + } + } + + // Always set directory attribute on. + externalAttributes |= (setAttributes_ | 16); + result.ExternalFileAttributes = externalAttributes; + + return result; + } + + #endregion IEntryFactory Members + + #region Instance Fields + + private INameTransform nameTransform_; + private DateTime fixedDateTime_ = DateTime.Now; + private TimeSetting timeSetting_; + private bool isUnicodeText_; + + private int getAttributes_ = -1; + private int setAttributes_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs.meta new file mode 100644 index 0000000..e516f0d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipEntryFactory.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: cbffce1f64a1e4e7d85f33b78384fa6c +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs new file mode 100644 index 0000000..ef8142b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs @@ -0,0 +1,54 @@ +using System; +using System.Runtime.Serialization; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// ZipException represents exceptions specific to Zip classes and code. + /// + [Serializable] + public class ZipException : SharpZipBaseException + { + /// + /// Initialise a new instance of . + /// + public ZipException() + { + } + + /// + /// Initialise a new instance of with its message string. + /// + /// A that describes the error. + public ZipException(string message) + : base(message) + { + } + + /// + /// Initialise a new instance of . + /// + /// A that describes the error. + /// The that caused this exception. + public ZipException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Initializes a new instance of the ZipException class with serialized data. + /// + /// + /// The System.Runtime.Serialization.SerializationInfo that holds the serialized + /// object data about the exception being thrown. + /// + /// + /// The System.Runtime.Serialization.StreamingContext that contains contextual information + /// about the source or destination. + /// + protected ZipException(SerializationInfo info, StreamingContext context) + : base(info, context) + { + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs.meta new file mode 100644 index 0000000..2073dba --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipException.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: a3fbfc79132a94b11ad2090c88c5a54b +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs new file mode 100644 index 0000000..0535b12 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs @@ -0,0 +1,979 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + // TODO: Sort out whether tagged data is useful and what a good implementation might look like. + // Its just a sketch of an idea at the moment. + + /// + /// ExtraData tagged value interface. + /// + public interface ITaggedData + { + /// + /// Get the ID for this tagged data value. + /// + short TagID { get; } + + /// + /// Set the contents of this instance from the data passed. + /// + /// The data to extract contents from. + /// The offset to begin extracting data from. + /// The number of bytes to extract. + void SetData(byte[] data, int offset, int count); + + /// + /// Get the data representing this instance. + /// + /// Returns the data for this instance. + byte[] GetData(); + } + + /// + /// A raw binary tagged value + /// + public class RawTaggedData : ITaggedData + { + /// + /// Initialise a new instance. + /// + /// The tag ID. + public RawTaggedData(short tag) + { + _tag = tag; + } + + #region ITaggedData Members + + /// + /// Get the ID for this tagged data value. + /// + public short TagID + { + get { return _tag; } + set { _tag = value; } + } + + /// + /// Set the data from the raw values provided. + /// + /// The raw data to extract values from. + /// The index to start extracting values from. + /// The number of bytes available. + public void SetData(byte[] data, int offset, int count) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + + _data = new byte[count]; + Array.Copy(data, offset, _data, 0, count); + } + + /// + /// Get the binary data representing this instance. + /// + /// The raw binary data representing this instance. + public byte[] GetData() + { + return _data; + } + + #endregion ITaggedData Members + + /// + /// Get /set the binary data representing this instance. + /// + /// The raw binary data representing this instance. + public byte[] Data + { + get { return _data; } + set { _data = value; } + } + + #region Instance Fields + + /// + /// The tag ID for this instance. + /// + private short _tag; + + private byte[] _data; + + #endregion Instance Fields + } + + /// + /// Class representing extended unix date time values. + /// + public class ExtendedUnixData : ITaggedData + { + /// + /// Flags indicate which values are included in this instance. + /// + [Flags] + public enum Flags : byte + { + /// + /// The modification time is included + /// + ModificationTime = 0x01, + + /// + /// The access time is included + /// + AccessTime = 0x02, + + /// + /// The create time is included. + /// + CreateTime = 0x04, + } + + #region ITaggedData Members + + /// + /// Get the ID + /// + public short TagID + { + get { return 0x5455; } + } + + /// + /// Set the data from the raw values provided. + /// + /// The raw data to extract values from. + /// The index to start extracting values from. + /// The number of bytes available. + public void SetData(byte[] data, int index, int count) + { + using (MemoryStream ms = new MemoryStream(data, index, count, false)) + using (ZipHelperStream helperStream = new ZipHelperStream(ms)) + { + // bit 0 if set, modification time is present + // bit 1 if set, access time is present + // bit 2 if set, creation time is present + + _flags = (Flags)helperStream.ReadByte(); + if (((_flags & Flags.ModificationTime) != 0)) + { + int iTime = helperStream.ReadLEInt(); + + _modificationTime = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc) + + new TimeSpan(0, 0, 0, iTime, 0); + + // Central-header version is truncated after modification time + if (count <= 5) return; + } + + if ((_flags & Flags.AccessTime) != 0) + { + int iTime = helperStream.ReadLEInt(); + + _lastAccessTime = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc) + + new TimeSpan(0, 0, 0, iTime, 0); + } + + if ((_flags & Flags.CreateTime) != 0) + { + int iTime = helperStream.ReadLEInt(); + + _createTime = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc) + + new TimeSpan(0, 0, 0, iTime, 0); + } + } + } + + /// + /// Get the binary data representing this instance. + /// + /// The raw binary data representing this instance. + public byte[] GetData() + { + using (MemoryStream ms = new MemoryStream()) + using (ZipHelperStream helperStream = new ZipHelperStream(ms)) + { + helperStream.IsStreamOwner = false; + helperStream.WriteByte((byte)_flags); // Flags + if ((_flags & Flags.ModificationTime) != 0) + { + TimeSpan span = _modificationTime - new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc); + var seconds = (int)span.TotalSeconds; + helperStream.WriteLEInt(seconds); + } + if ((_flags & Flags.AccessTime) != 0) + { + TimeSpan span = _lastAccessTime - new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc); + var seconds = (int)span.TotalSeconds; + helperStream.WriteLEInt(seconds); + } + if ((_flags & Flags.CreateTime) != 0) + { + TimeSpan span = _createTime - new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc); + var seconds = (int)span.TotalSeconds; + helperStream.WriteLEInt(seconds); + } + return ms.ToArray(); + } + } + + #endregion ITaggedData Members + + /// + /// Test a value to see if is valid and can be represented here. + /// + /// The value to test. + /// Returns true if the value is valid and can be represented; false if not. + /// The standard Unix time is a signed integer data type, directly encoding the Unix time number, + /// which is the number of seconds since 1970-01-01. + /// Being 32 bits means the values here cover a range of about 136 years. + /// The minimum representable time is 1901-12-13 20:45:52, + /// and the maximum representable time is 2038-01-19 03:14:07. + /// + public static bool IsValidValue(DateTime value) + { + return ((value >= new DateTime(1901, 12, 13, 20, 45, 52)) || + (value <= new DateTime(2038, 1, 19, 03, 14, 07))); + } + + /// + /// Get /set the Modification Time + /// + /// + /// + public DateTime ModificationTime + { + get { return _modificationTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + _flags |= Flags.ModificationTime; + _modificationTime = value; + } + } + + /// + /// Get / set the Access Time + /// + /// + /// + public DateTime AccessTime + { + get { return _lastAccessTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + _flags |= Flags.AccessTime; + _lastAccessTime = value; + } + } + + /// + /// Get / Set the Create Time + /// + /// + /// + public DateTime CreateTime + { + get { return _createTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + _flags |= Flags.CreateTime; + _createTime = value; + } + } + + /// + /// Get/set the values to include. + /// + public Flags Include + { + get { return _flags; } + set { _flags = value; } + } + + #region Instance Fields + + private Flags _flags; + private DateTime _modificationTime = new DateTime(1970, 1, 1); + private DateTime _lastAccessTime = new DateTime(1970, 1, 1); + private DateTime _createTime = new DateTime(1970, 1, 1); + + #endregion Instance Fields + } + + /// + /// Class handling NT date time values. + /// + public class NTTaggedData : ITaggedData + { + /// + /// Get the ID for this tagged data value. + /// + public short TagID + { + get { return 10; } + } + + /// + /// Set the data from the raw values provided. + /// + /// The raw data to extract values from. + /// The index to start extracting values from. + /// The number of bytes available. + public void SetData(byte[] data, int index, int count) + { + using (MemoryStream ms = new MemoryStream(data, index, count, false)) + using (ZipHelperStream helperStream = new ZipHelperStream(ms)) + { + helperStream.ReadLEInt(); // Reserved + while (helperStream.Position < helperStream.Length) + { + int ntfsTag = helperStream.ReadLEShort(); + int ntfsLength = helperStream.ReadLEShort(); + if (ntfsTag == 1) + { + if (ntfsLength >= 24) + { + long lastModificationTicks = helperStream.ReadLELong(); + _lastModificationTime = DateTime.FromFileTimeUtc(lastModificationTicks); + + long lastAccessTicks = helperStream.ReadLELong(); + _lastAccessTime = DateTime.FromFileTimeUtc(lastAccessTicks); + + long createTimeTicks = helperStream.ReadLELong(); + _createTime = DateTime.FromFileTimeUtc(createTimeTicks); + } + break; + } + else + { + // An unknown NTFS tag so simply skip it. + helperStream.Seek(ntfsLength, SeekOrigin.Current); + } + } + } + } + + /// + /// Get the binary data representing this instance. + /// + /// The raw binary data representing this instance. + public byte[] GetData() + { + using (MemoryStream ms = new MemoryStream()) + using (ZipHelperStream helperStream = new ZipHelperStream(ms)) + { + helperStream.IsStreamOwner = false; + helperStream.WriteLEInt(0); // Reserved + helperStream.WriteLEShort(1); // Tag + helperStream.WriteLEShort(24); // Length = 3 x 8. + helperStream.WriteLELong(_lastModificationTime.ToFileTimeUtc()); + helperStream.WriteLELong(_lastAccessTime.ToFileTimeUtc()); + helperStream.WriteLELong(_createTime.ToFileTimeUtc()); + return ms.ToArray(); + } + } + + /// + /// Test a valuie to see if is valid and can be represented here. + /// + /// The value to test. + /// Returns true if the value is valid and can be represented; false if not. + /// + /// NTFS filetimes are 64-bit unsigned integers, stored in Intel + /// (least significant byte first) byte order. They determine the + /// number of 1.0E-07 seconds (1/10th microseconds!) past WinNT "epoch", + /// which is "01-Jan-1601 00:00:00 UTC". 28 May 60056 is the upper limit + /// + public static bool IsValidValue(DateTime value) + { + bool result = true; + try + { + value.ToFileTimeUtc(); + } + catch + { + result = false; + } + return result; + } + + /// + /// Get/set the last modification time. + /// + public DateTime LastModificationTime + { + get { return _lastModificationTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + _lastModificationTime = value; + } + } + + /// + /// Get /set the create time + /// + public DateTime CreateTime + { + get { return _createTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + _createTime = value; + } + } + + /// + /// Get /set the last access time. + /// + public DateTime LastAccessTime + { + get { return _lastAccessTime; } + set + { + if (!IsValidValue(value)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + _lastAccessTime = value; + } + } + + #region Instance Fields + + private DateTime _lastAccessTime = DateTime.FromFileTimeUtc(0); + private DateTime _lastModificationTime = DateTime.FromFileTimeUtc(0); + private DateTime _createTime = DateTime.FromFileTimeUtc(0); + + #endregion Instance Fields + } + + /// + /// A factory that creates tagged data instances. + /// + internal interface ITaggedDataFactory + { + /// + /// Get data for a specific tag value. + /// + /// The tag ID to find. + /// The data to search. + /// The offset to begin extracting data from. + /// The number of bytes to extract. + /// The located value found, or null if not found. + ITaggedData Create(short tag, byte[] data, int offset, int count); + } + + /// + /// + /// A class to handle the extra data field for Zip entries + /// + /// + /// Extra data contains 0 or more values each prefixed by a header tag and length. + /// They contain zero or more bytes of actual data. + /// The data is held internally using a copy on write strategy. This is more efficient but + /// means that for extra data created by passing in data can have the values modified by the caller + /// in some circumstances. + /// + sealed public class ZipExtraData : IDisposable + { + #region Constructors + + /// + /// Initialise a default instance. + /// + public ZipExtraData() + { + Clear(); + } + + /// + /// Initialise with known extra data. + /// + /// The extra data. + public ZipExtraData(byte[] data) + { + if (data == null) + { + _data = new byte[0]; + } + else + { + _data = data; + } + } + + #endregion Constructors + + /// + /// Get the raw extra data value + /// + /// Returns the raw byte[] extra data this instance represents. + public byte[] GetEntryData() + { + if (Length > ushort.MaxValue) + { + throw new ZipException("Data exceeds maximum length"); + } + + return (byte[])_data.Clone(); + } + + /// + /// Clear the stored data. + /// + public void Clear() + { + if ((_data == null) || (_data.Length != 0)) + { + _data = new byte[0]; + } + } + + /// + /// Gets the current extra data length. + /// + public int Length + { + get { return _data.Length; } + } + + /// + /// Get a read-only for the associated tag. + /// + /// The tag to locate data for. + /// Returns a containing tag data or null if no tag was found. + public Stream GetStreamForTag(int tag) + { + Stream result = null; + if (Find(tag)) + { + result = new MemoryStream(_data, _index, _readValueLength, false); + } + return result; + } + + /// + /// Get the tagged data for a tag. + /// + /// The tag to search for. + /// Returns a tagged value or null if none found. + public T GetData() + where T : class, ITaggedData, new() + { + T result = new T(); + if (Find(result.TagID)) + { + result.SetData(_data, _readValueStart, _readValueLength); + return result; + } + else return null; + } + + /// + /// Get the length of the last value found by + /// + /// This is only valid if has previously returned true. + public int ValueLength + { + get { return _readValueLength; } + } + + /// + /// Get the index for the current read value. + /// + /// This is only valid if has previously returned true. + /// Initially the result will be the index of the first byte of actual data. The value is updated after calls to + /// , and . + public int CurrentReadIndex + { + get { return _index; } + } + + /// + /// Get the number of bytes remaining to be read for the current value; + /// + public int UnreadCount + { + get + { + if ((_readValueStart > _data.Length) || + (_readValueStart < 4)) + { + throw new ZipException("Find must be called before calling a Read method"); + } + + return _readValueStart + _readValueLength - _index; + } + } + + /// + /// Find an extra data value + /// + /// The identifier for the value to find. + /// Returns true if the value was found; false otherwise. + public bool Find(int headerID) + { + _readValueStart = _data.Length; + _readValueLength = 0; + _index = 0; + + int localLength = _readValueStart; + int localTag = headerID - 1; + + // Trailing bytes that cant make up an entry (as there arent enough + // bytes for a tag and length) are ignored! + while ((localTag != headerID) && (_index < _data.Length - 3)) + { + localTag = ReadShortInternal(); + localLength = ReadShortInternal(); + if (localTag != headerID) + { + _index += localLength; + } + } + + bool result = (localTag == headerID) && ((_index + localLength) <= _data.Length); + + if (result) + { + _readValueStart = _index; + _readValueLength = localLength; + } + + return result; + } + + /// + /// Add a new entry to extra data. + /// + /// The value to add. + public void AddEntry(ITaggedData taggedData) + { + if (taggedData == null) + { + throw new ArgumentNullException(nameof(taggedData)); + } + AddEntry(taggedData.TagID, taggedData.GetData()); + } + + /// + /// Add a new entry to extra data + /// + /// The ID for this entry. + /// The data to add. + /// If the ID already exists its contents are replaced. + public void AddEntry(int headerID, byte[] fieldData) + { + if ((headerID > ushort.MaxValue) || (headerID < 0)) + { + throw new ArgumentOutOfRangeException(nameof(headerID)); + } + + int addLength = (fieldData == null) ? 0 : fieldData.Length; + + if (addLength > ushort.MaxValue) + { + throw new ArgumentOutOfRangeException(nameof(fieldData), "exceeds maximum length"); + } + + // Test for new length before adjusting data. + int newLength = _data.Length + addLength + 4; + + if (Find(headerID)) + { + newLength -= (ValueLength + 4); + } + + if (newLength > ushort.MaxValue) + { + throw new ZipException("Data exceeds maximum length"); + } + + Delete(headerID); + + byte[] newData = new byte[newLength]; + _data.CopyTo(newData, 0); + int index = _data.Length; + _data = newData; + SetShort(ref index, headerID); + SetShort(ref index, addLength); + if (fieldData != null) + { + fieldData.CopyTo(newData, index); + } + } + + /// + /// Start adding a new entry. + /// + /// Add data using , , , or . + /// The new entry is completed and actually added by calling + /// + public void StartNewEntry() + { + _newEntry = new MemoryStream(); + } + + /// + /// Add entry data added since using the ID passed. + /// + /// The identifier to use for this entry. + public void AddNewEntry(int headerID) + { + byte[] newData = _newEntry.ToArray(); + _newEntry = null; + AddEntry(headerID, newData); + } + + /// + /// Add a byte of data to the pending new entry. + /// + /// The byte to add. + /// + public void AddData(byte data) + { + _newEntry.WriteByte(data); + } + + /// + /// Add data to a pending new entry. + /// + /// The data to add. + /// + public void AddData(byte[] data) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + + _newEntry.Write(data, 0, data.Length); + } + + /// + /// Add a short value in little endian order to the pending new entry. + /// + /// The data to add. + /// + public void AddLeShort(int toAdd) + { + unchecked + { + _newEntry.WriteByte((byte)toAdd); + _newEntry.WriteByte((byte)(toAdd >> 8)); + } + } + + /// + /// Add an integer value in little endian order to the pending new entry. + /// + /// The data to add. + /// + public void AddLeInt(int toAdd) + { + unchecked + { + AddLeShort((short)toAdd); + AddLeShort((short)(toAdd >> 16)); + } + } + + /// + /// Add a long value in little endian order to the pending new entry. + /// + /// The data to add. + /// + public void AddLeLong(long toAdd) + { + unchecked + { + AddLeInt((int)(toAdd & 0xffffffff)); + AddLeInt((int)(toAdd >> 32)); + } + } + + /// + /// Delete an extra data field. + /// + /// The identifier of the field to delete. + /// Returns true if the field was found and deleted. + public bool Delete(int headerID) + { + bool result = false; + + if (Find(headerID)) + { + result = true; + int trueStart = _readValueStart - 4; + + byte[] newData = new byte[_data.Length - (ValueLength + 4)]; + Array.Copy(_data, 0, newData, 0, trueStart); + + int trueEnd = trueStart + ValueLength + 4; + Array.Copy(_data, trueEnd, newData, trueStart, _data.Length - trueEnd); + _data = newData; + } + return result; + } + + #region Reading Support + + /// + /// Read a long in little endian form from the last found data value + /// + /// Returns the long value read. + public long ReadLong() + { + ReadCheck(8); + return (ReadInt() & 0xffffffff) | (((long)ReadInt()) << 32); + } + + /// + /// Read an integer in little endian form from the last found data value. + /// + /// Returns the integer read. + public int ReadInt() + { + ReadCheck(4); + + int result = _data[_index] + (_data[_index + 1] << 8) + + (_data[_index + 2] << 16) + (_data[_index + 3] << 24); + _index += 4; + return result; + } + + /// + /// Read a short value in little endian form from the last found data value. + /// + /// Returns the short value read. + public int ReadShort() + { + ReadCheck(2); + int result = _data[_index] + (_data[_index + 1] << 8); + _index += 2; + return result; + } + + /// + /// Read a byte from an extra data + /// + /// The byte value read or -1 if the end of data has been reached. + public int ReadByte() + { + int result = -1; + if ((_index < _data.Length) && (_readValueStart + _readValueLength > _index)) + { + result = _data[_index]; + _index += 1; + } + return result; + } + + /// + /// Skip data during reading. + /// + /// The number of bytes to skip. + public void Skip(int amount) + { + ReadCheck(amount); + _index += amount; + } + + private void ReadCheck(int length) + { + if ((_readValueStart > _data.Length) || + (_readValueStart < 4)) + { + throw new ZipException("Find must be called before calling a Read method"); + } + + if (_index > _readValueStart + _readValueLength - length) + { + throw new ZipException("End of extra data"); + } + + if (_index + length < 4) + { + throw new ZipException("Cannot read before start of tag"); + } + } + + /// + /// Internal form of that reads data at any location. + /// + /// Returns the short value read. + private int ReadShortInternal() + { + if (_index > _data.Length - 2) + { + throw new ZipException("End of extra data"); + } + + int result = _data[_index] + (_data[_index + 1] << 8); + _index += 2; + return result; + } + + private void SetShort(ref int index, int source) + { + _data[index] = (byte)source; + _data[index + 1] = (byte)(source >> 8); + index += 2; + } + + #endregion Reading Support + + #region IDisposable Members + + /// + /// Dispose of this instance. + /// + public void Dispose() + { + if (_newEntry != null) + { + _newEntry.Dispose(); + } + } + + #endregion IDisposable Members + + #region Instance Fields + + private int _index; + private int _readValueStart; + private int _readValueLength; + + private MemoryStream _newEntry; + private byte[] _data; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs.meta new file mode 100644 index 0000000..0ea3c7e --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipExtraData.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 4d82abd82be7642ca99b0c678f8fb9eb +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs new file mode 100644 index 0000000..c12a53d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs @@ -0,0 +1,4916 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Core; +using ICSharpCode.SharpZipLib.Encryption; +using ICSharpCode.SharpZipLib.Zip.Compression; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.Collections; +using System.Collections.Generic; +using System.IO; +using System.Security.Cryptography; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Zip +{ + #region Keys Required Event Args + + /// + /// Arguments used with KeysRequiredEvent + /// + public class KeysRequiredEventArgs : EventArgs + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The name of the file for which keys are required. + public KeysRequiredEventArgs(string name) + { + fileName = name; + } + + /// + /// Initialise a new instance of + /// + /// The name of the file for which keys are required. + /// The current key value. + public KeysRequiredEventArgs(string name, byte[] keyValue) + { + fileName = name; + key = keyValue; + } + + #endregion Constructors + + #region Properties + + /// + /// Gets the name of the file for which keys are required. + /// + public string FileName + { + get { return fileName; } + } + + /// + /// Gets or sets the key value + /// + public byte[] Key + { + get { return key; } + set { key = value; } + } + + #endregion Properties + + #region Instance Fields + + private readonly string fileName; + private byte[] key; + + #endregion Instance Fields + } + + #endregion Keys Required Event Args + + #region Test Definitions + + /// + /// The strategy to apply to testing. + /// + public enum TestStrategy + { + /// + /// Find the first error only. + /// + FindFirstError, + + /// + /// Find all possible errors. + /// + FindAllErrors, + } + + /// + /// The operation in progress reported by a during testing. + /// + /// TestArchive + public enum TestOperation + { + /// + /// Setting up testing. + /// + Initialising, + + /// + /// Testing an individual entries header + /// + EntryHeader, + + /// + /// Testing an individual entries data + /// + EntryData, + + /// + /// Testing an individual entry has completed. + /// + EntryComplete, + + /// + /// Running miscellaneous tests + /// + MiscellaneousTests, + + /// + /// Testing is complete + /// + Complete, + } + + /// + /// Status returned by during testing. + /// + /// TestArchive + public class TestStatus + { + #region Constructors + + /// + /// Initialise a new instance of + /// + /// The this status applies to. + public TestStatus(ZipFile file) + { + file_ = file; + } + + #endregion Constructors + + #region Properties + + /// + /// Get the current in progress. + /// + public TestOperation Operation + { + get { return operation_; } + } + + /// + /// Get the this status is applicable to. + /// + public ZipFile File + { + get { return file_; } + } + + /// + /// Get the current/last entry tested. + /// + public ZipEntry Entry + { + get { return entry_; } + } + + /// + /// Get the number of errors detected so far. + /// + public int ErrorCount + { + get { return errorCount_; } + } + + /// + /// Get the number of bytes tested so far for the current entry. + /// + public long BytesTested + { + get { return bytesTested_; } + } + + /// + /// Get a value indicating whether the last entry test was valid. + /// + public bool EntryValid + { + get { return entryValid_; } + } + + #endregion Properties + + #region Internal API + + internal void AddError() + { + errorCount_++; + entryValid_ = false; + } + + internal void SetOperation(TestOperation operation) + { + operation_ = operation; + } + + internal void SetEntry(ZipEntry entry) + { + entry_ = entry; + entryValid_ = true; + bytesTested_ = 0; + } + + internal void SetBytesTested(long value) + { + bytesTested_ = value; + } + + #endregion Internal API + + #region Instance Fields + + private readonly ZipFile file_; + private ZipEntry entry_; + private bool entryValid_; + private int errorCount_; + private long bytesTested_; + private TestOperation operation_; + + #endregion Instance Fields + } + + /// + /// Delegate invoked during testing if supplied indicating current progress and status. + /// + /// If the message is non-null an error has occured. If the message is null + /// the operation as found in status has started. + public delegate void ZipTestResultHandler(TestStatus status, string message); + + #endregion Test Definitions + + #region Update Definitions + + /// + /// The possible ways of applying updates to an archive. + /// + public enum FileUpdateMode + { + /// + /// Perform all updates on temporary files ensuring that the original file is saved. + /// + Safe, + + /// + /// Update the archive directly, which is faster but less safe. + /// + Direct, + } + + #endregion Update Definitions + + #region ZipFile Class + + /// + /// This class represents a Zip archive. You can ask for the contained + /// entries, or get an input stream for a file entry. The entry is + /// automatically decompressed. + /// + /// You can also update the archive adding or deleting entries. + /// + /// This class is thread safe for input: You can open input streams for arbitrary + /// entries in different threads. + ///
+ ///
Author of the original java version : Jochen Hoenicke + ///
+ /// + /// + /// using System; + /// using System.Text; + /// using System.Collections; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.Zip; + /// + /// class MainClass + /// { + /// static public void Main(string[] args) + /// { + /// using (ZipFile zFile = new ZipFile(args[0])) { + /// Console.WriteLine("Listing of : " + zFile.Name); + /// Console.WriteLine(""); + /// Console.WriteLine("Raw Size Size Date Time Name"); + /// Console.WriteLine("-------- -------- -------- ------ ---------"); + /// foreach (ZipEntry e in zFile) { + /// if ( e.IsFile ) { + /// DateTime d = e.DateTime; + /// Console.WriteLine("{0, -10}{1, -10}{2} {3} {4}", e.Size, e.CompressedSize, + /// d.ToString("dd-MM-yy"), d.ToString("HH:mm"), + /// e.Name); + /// } + /// } + /// } + /// } + /// } + /// + /// + public class ZipFile : IEnumerable, IDisposable + { + #region KeyHandling + + /// + /// Delegate for handling keys/password setting during compression/decompression. + /// + public delegate void KeysRequiredEventHandler( + object sender, + KeysRequiredEventArgs e + ); + + /// + /// Event handler for handling encryption keys. + /// + public KeysRequiredEventHandler KeysRequired; + + /// + /// Handles getting of encryption keys when required. + /// + /// The file for which encryption keys are required. + private void OnKeysRequired(string fileName) + { + if (KeysRequired != null) + { + var krea = new KeysRequiredEventArgs(fileName, key); + KeysRequired(this, krea); + key = krea.Key; + } + } + + /// + /// Get/set the encryption key value. + /// + private byte[] Key + { + get { return key; } + set { key = value; } + } + + /// + /// Password to be used for encrypting/decrypting files. + /// + /// Set to null if no password is required. + public string Password + { + set + { + if (string.IsNullOrEmpty(value)) + { + key = null; + } + else + { + key = PkzipClassic.GenerateKeys(ZipStrings.ConvertToArray(value)); + } + + rawPassword_ = value; + } + } + + /// + /// Get a value indicating whether encryption keys are currently available. + /// + private bool HaveKeys + { + get { return key != null; } + } + + #endregion KeyHandling + + #region Constructors + + /// + /// Opens a Zip file with the given name for reading. + /// + /// The name of the file to open. + /// The argument supplied is null. + /// + /// An i/o error occurs + /// + /// + /// The file doesn't contain a valid zip archive. + /// + public ZipFile(string name) + { + name_ = name ?? throw new ArgumentNullException(nameof(name)); + + baseStream_ = File.Open(name, FileMode.Open, FileAccess.Read, FileShare.Read); + isStreamOwner = true; + + try + { + ReadEntries(); + } + catch + { + DisposeInternal(true); + throw; + } + } + + /// + /// Opens a Zip file reading the given . + /// + /// The to read archive data from. + /// The supplied argument is null. + /// + /// An i/o error occurs. + /// + /// + /// The file doesn't contain a valid zip archive. + /// + public ZipFile(FileStream file) : + this(file, false) + { + + } + + /// + /// Opens a Zip file reading the given . + /// + /// The to read archive data from. + /// true to leave the file open when the ZipFile is disposed, false to dispose of it + /// The supplied argument is null. + /// + /// An i/o error occurs. + /// + /// + /// The file doesn't contain a valid zip archive. + /// + public ZipFile(FileStream file, bool leaveOpen) + { + if (file == null) + { + throw new ArgumentNullException(nameof(file)); + } + + if (!file.CanSeek) + { + throw new ArgumentException("Stream is not seekable", nameof(file)); + } + + baseStream_ = file; + name_ = file.Name; + isStreamOwner = !leaveOpen; + + try + { + ReadEntries(); + } + catch + { + DisposeInternal(true); + throw; + } + } + + /// + /// Opens a Zip file reading the given . + /// + /// The to read archive data from. + /// + /// An i/o error occurs + /// + /// + /// The stream doesn't contain a valid zip archive.
+ ///
+ /// + /// The stream doesnt support seeking. + /// + /// + /// The stream argument is null. + /// + public ZipFile(Stream stream) : + this(stream, false) + { + + } + + /// + /// Opens a Zip file reading the given . + /// + /// The to read archive data from. + /// true to leave the stream open when the ZipFile is disposed, false to dispose of it + /// + /// An i/o error occurs + /// + /// + /// The stream doesn't contain a valid zip archive.
+ ///
+ /// + /// The stream doesnt support seeking. + /// + /// + /// The stream argument is null. + /// + public ZipFile(Stream stream, bool leaveOpen) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + if (!stream.CanSeek) + { + throw new ArgumentException("Stream is not seekable", nameof(stream)); + } + + baseStream_ = stream; + isStreamOwner = !leaveOpen; + + if (baseStream_.Length > 0) + { + try + { + ReadEntries(); + } + catch + { + DisposeInternal(true); + throw; + } + } + else + { + entries_ = new ZipEntry[0]; + isNewArchive_ = true; + } + } + + /// + /// Initialises a default instance with no entries and no file storage. + /// + internal ZipFile() + { + entries_ = new ZipEntry[0]; + isNewArchive_ = true; + } + + #endregion Constructors + + #region Destructors and Closing + + /// + /// Finalize this instance. + /// + ~ZipFile() + { + Dispose(false); + } + + /// + /// Closes the ZipFile. If the stream is owned then this also closes the underlying input stream. + /// Once closed, no further instance methods should be called. + /// + /// + /// An i/o error occurs. + /// + public void Close() + { + DisposeInternal(true); + GC.SuppressFinalize(this); + } + + #endregion Destructors and Closing + + #region Creators + + /// + /// Create a new whose data will be stored in a file. + /// + /// The name of the archive to create. + /// Returns the newly created + /// is null + public static ZipFile Create(string fileName) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + FileStream fs = File.Create(fileName); + + return new ZipFile + { + name_ = fileName, + baseStream_ = fs, + isStreamOwner = true + }; + } + + /// + /// Create a new whose data will be stored on a stream. + /// + /// The stream providing data storage. + /// Returns the newly created + /// is null + /// doesnt support writing. + public static ZipFile Create(Stream outStream) + { + if (outStream == null) + { + throw new ArgumentNullException(nameof(outStream)); + } + + if (!outStream.CanWrite) + { + throw new ArgumentException("Stream is not writeable", nameof(outStream)); + } + + if (!outStream.CanSeek) + { + throw new ArgumentException("Stream is not seekable", nameof(outStream)); + } + + var result = new ZipFile + { + baseStream_ = outStream + }; + return result; + } + + #endregion Creators + + #region Properties + + /// + /// Get/set a flag indicating if the underlying stream is owned by the ZipFile instance. + /// If the flag is true then the stream will be closed when Close is called. + /// + /// + /// The default value is true in all cases. + /// + public bool IsStreamOwner + { + get { return isStreamOwner; } + set { isStreamOwner = value; } + } + + /// + /// Get a value indicating whether + /// this archive is embedded in another file or not. + /// + public bool IsEmbeddedArchive + { + // Not strictly correct in all circumstances currently + get { return offsetOfFirstEntry > 0; } + } + + /// + /// Get a value indicating that this archive is a new one. + /// + public bool IsNewArchive + { + get { return isNewArchive_; } + } + + /// + /// Gets the comment for the zip file. + /// + public string ZipFileComment + { + get { return comment_; } + } + + /// + /// Gets the name of this zip file. + /// + public string Name + { + get { return name_; } + } + + /// + /// Gets the number of entries in this zip file. + /// + /// + /// The Zip file has been closed. + /// + [Obsolete("Use the Count property instead")] + public int Size + { + get + { + return entries_.Length; + } + } + + /// + /// Get the number of entries contained in this . + /// + public long Count + { + get + { + return entries_.Length; + } + } + + /// + /// Indexer property for ZipEntries + /// + [System.Runtime.CompilerServices.IndexerNameAttribute("EntryByIndex")] + public ZipEntry this[int index] + { + get + { + return (ZipEntry)entries_[index].Clone(); + } + } + + #endregion Properties + + #region Input Handling + + /// + /// Gets an enumerator for the Zip entries in this Zip file. + /// + /// Returns an for this archive. + /// + /// The Zip file has been closed. + /// + public IEnumerator GetEnumerator() + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + return new ZipEntryEnumerator(entries_); + } + + /// + /// Return the index of the entry with a matching name + /// + /// Entry name to find + /// If true the comparison is case insensitive + /// The index position of the matching entry or -1 if not found + /// + /// The Zip file has been closed. + /// + public int FindEntry(string name, bool ignoreCase) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + // TODO: This will be slow as the next ice age for huge archives! + for (int i = 0; i < entries_.Length; i++) + { + if (string.Compare(name, entries_[i].Name, ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal) == 0) + { + return i; + } + } + return -1; + } + + /// + /// Searches for a zip entry in this archive with the given name. + /// String comparisons are case insensitive + /// + /// + /// The name to find. May contain directory components separated by slashes ('/'). + /// + /// + /// A clone of the zip entry, or null if no entry with that name exists. + /// + /// + /// The Zip file has been closed. + /// + public ZipEntry GetEntry(string name) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + int index = FindEntry(name, true); + return (index >= 0) ? (ZipEntry)entries_[index].Clone() : null; + } + + /// + /// Gets an input stream for reading the given zip entry data in an uncompressed form. + /// Normally the should be an entry returned by GetEntry(). + /// + /// The to obtain a data for + /// An input containing data for this + /// + /// The ZipFile has already been closed + /// + /// + /// The compression method for the entry is unknown + /// + /// + /// The entry is not found in the ZipFile + /// + public Stream GetInputStream(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + long index = entry.ZipFileIndex; + if ((index < 0) || (index >= entries_.Length) || (entries_[index].Name != entry.Name)) + { + index = FindEntry(entry.Name, true); + if (index < 0) + { + throw new ZipException("Entry cannot be found"); + } + } + return GetInputStream(index); + } + + /// + /// Creates an input stream reading a zip entry + /// + /// The index of the entry to obtain an input stream for. + /// + /// An input containing data for this + /// + /// + /// The ZipFile has already been closed + /// + /// + /// The compression method for the entry is unknown + /// + /// + /// The entry is not found in the ZipFile + /// + public Stream GetInputStream(long entryIndex) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + long start = LocateEntry(entries_[entryIndex]); + CompressionMethod method = entries_[entryIndex].CompressionMethod; + Stream result = new PartialInputStream(this, start, entries_[entryIndex].CompressedSize); + + if (entries_[entryIndex].IsCrypted == true) + { + result = CreateAndInitDecryptionStream(result, entries_[entryIndex]); + if (result == null) + { + throw new ZipException("Unable to decrypt this entry"); + } + } + + switch (method) + { + case CompressionMethod.Stored: + // read as is. + break; + + case CompressionMethod.Deflated: + // No need to worry about ownership and closing as underlying stream close does nothing. + result = new InflaterInputStream(result, new Inflater(true)); + break; + + default: + throw new ZipException("Unsupported compression method " + method); + } + + return result; + } + + #endregion Input Handling + + #region Archive Testing + + /// + /// Test an archive for integrity/validity + /// + /// Perform low level data Crc check + /// true if all tests pass, false otherwise + /// Testing will terminate on the first error found. + public bool TestArchive(bool testData) + { + return TestArchive(testData, TestStrategy.FindFirstError, null); + } + + /// + /// Test an archive for integrity/validity + /// + /// Perform low level data Crc check + /// The to apply. + /// The handler to call during testing. + /// true if all tests pass, false otherwise + /// The object has already been closed. + public bool TestArchive(bool testData, TestStrategy strategy, ZipTestResultHandler resultHandler) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + var status = new TestStatus(this); + + resultHandler?.Invoke(status, null); + + HeaderTest test = testData ? (HeaderTest.Header | HeaderTest.Extract) : HeaderTest.Header; + + bool testing = true; + + try + { + int entryIndex = 0; + + while (testing && (entryIndex < Count)) + { + if (resultHandler != null) + { + status.SetEntry(this[entryIndex]); + status.SetOperation(TestOperation.EntryHeader); + resultHandler(status, null); + } + + try + { + TestLocalHeader(this[entryIndex], test); + } + catch (ZipException ex) + { + status.AddError(); + + resultHandler?.Invoke(status, $"Exception during test - '{ex.Message}'"); + + testing &= strategy != TestStrategy.FindFirstError; + } + + if (testing && testData && this[entryIndex].IsFile) + { + if (resultHandler != null) + { + status.SetOperation(TestOperation.EntryData); + resultHandler(status, null); + } + + var crc = new Crc32(); + + using (Stream entryStream = this.GetInputStream(this[entryIndex])) + { + byte[] buffer = new byte[4096]; + long totalBytes = 0; + int bytesRead; + while ((bytesRead = entryStream.Read(buffer, 0, buffer.Length)) > 0) + { + crc.Update(new ArraySegment(buffer, 0, bytesRead)); + + if (resultHandler != null) + { + totalBytes += bytesRead; + status.SetBytesTested(totalBytes); + resultHandler(status, null); + } + } + } + + if (this[entryIndex].Crc != crc.Value) + { + status.AddError(); + + resultHandler?.Invoke(status, "CRC mismatch"); + + testing &= strategy != TestStrategy.FindFirstError; + } + + if ((this[entryIndex].Flags & (int)GeneralBitFlags.Descriptor) != 0) + { + var helper = new ZipHelperStream(baseStream_); + var data = new DescriptorData(); + helper.ReadDataDescriptor(this[entryIndex].LocalHeaderRequiresZip64, data); + if (this[entryIndex].Crc != data.Crc) + { + status.AddError(); + } + + if (this[entryIndex].CompressedSize != data.CompressedSize) + { + status.AddError(); + } + + if (this[entryIndex].Size != data.Size) + { + status.AddError(); + } + } + } + + if (resultHandler != null) + { + status.SetOperation(TestOperation.EntryComplete); + resultHandler(status, null); + } + + entryIndex += 1; + } + + if (resultHandler != null) + { + status.SetOperation(TestOperation.MiscellaneousTests); + resultHandler(status, null); + } + + // TODO: the 'Corrina Johns' test where local headers are missing from + // the central directory. They are therefore invisible to many archivers. + } + catch (Exception ex) + { + status.AddError(); + + resultHandler?.Invoke(status, $"Exception during test - '{ex.Message}'"); + } + + if (resultHandler != null) + { + status.SetOperation(TestOperation.Complete); + status.SetEntry(null); + resultHandler(status, null); + } + + return (status.ErrorCount == 0); + } + + [Flags] + private enum HeaderTest + { + Extract = 0x01, // Check that this header represents an entry whose data can be extracted + Header = 0x02, // Check that this header contents are valid + } + + /// + /// Test a local header against that provided from the central directory + /// + /// + /// The entry to test against + /// + /// The type of tests to carry out. + /// The offset of the entries data in the file + private long TestLocalHeader(ZipEntry entry, HeaderTest tests) + { + lock (baseStream_) + { + bool testHeader = (tests & HeaderTest.Header) != 0; + bool testData = (tests & HeaderTest.Extract) != 0; + + var entryAbsOffset = offsetOfFirstEntry + entry.Offset; + + baseStream_.Seek(entryAbsOffset, SeekOrigin.Begin); + var signature = (int)ReadLEUint(); + + if (signature != ZipConstants.LocalHeaderSignature) + { + throw new ZipException(string.Format("Wrong local header signature at 0x{0:x}, expected 0x{1:x8}, actual 0x{2:x8}", + entryAbsOffset, ZipConstants.LocalHeaderSignature, signature)); + } + + var extractVersion = (short)(ReadLEUshort() & 0x00ff); + var localFlags = (short)ReadLEUshort(); + var compressionMethod = (short)ReadLEUshort(); + var fileTime = (short)ReadLEUshort(); + var fileDate = (short)ReadLEUshort(); + uint crcValue = ReadLEUint(); + long compressedSize = ReadLEUint(); + long size = ReadLEUint(); + int storedNameLength = ReadLEUshort(); + int extraDataLength = ReadLEUshort(); + + byte[] nameData = new byte[storedNameLength]; + StreamUtils.ReadFully(baseStream_, nameData); + + byte[] extraData = new byte[extraDataLength]; + StreamUtils.ReadFully(baseStream_, extraData); + + var localExtraData = new ZipExtraData(extraData); + + // Extra data / zip64 checks + if (localExtraData.Find(1)) + { + // 2010-03-04 Forum 10512: removed checks for version >= ZipConstants.VersionZip64 + // and size or compressedSize = MaxValue, due to rogue creators. + + size = localExtraData.ReadLong(); + compressedSize = localExtraData.ReadLong(); + + if ((localFlags & (int)GeneralBitFlags.Descriptor) != 0) + { + // These may be valid if patched later + if ((size != -1) && (size != entry.Size)) + { + throw new ZipException("Size invalid for descriptor"); + } + + if ((compressedSize != -1) && (compressedSize != entry.CompressedSize)) + { + throw new ZipException("Compressed size invalid for descriptor"); + } + } + } + else + { + // No zip64 extra data but entry requires it. + if ((extractVersion >= ZipConstants.VersionZip64) && + (((uint)size == uint.MaxValue) || ((uint)compressedSize == uint.MaxValue))) + { + throw new ZipException("Required Zip64 extended information missing"); + } + } + + if (testData) + { + if (entry.IsFile) + { + if (!entry.IsCompressionMethodSupported()) + { + throw new ZipException("Compression method not supported"); + } + + if ((extractVersion > ZipConstants.VersionMadeBy) + || ((extractVersion > 20) && (extractVersion < ZipConstants.VersionZip64))) + { + throw new ZipException(string.Format("Version required to extract this entry not supported ({0})", extractVersion)); + } + + if ((localFlags & (int)(GeneralBitFlags.Patched | GeneralBitFlags.StrongEncryption | GeneralBitFlags.EnhancedCompress | GeneralBitFlags.HeaderMasked)) != 0) + { + throw new ZipException("The library does not support the zip version required to extract this entry"); + } + } + } + + if (testHeader) + { + if ((extractVersion <= 63) && // Ignore later versions as we dont know about them.. + (extractVersion != 10) && + (extractVersion != 11) && + (extractVersion != 20) && + (extractVersion != 21) && + (extractVersion != 25) && + (extractVersion != 27) && + (extractVersion != 45) && + (extractVersion != 46) && + (extractVersion != 50) && + (extractVersion != 51) && + (extractVersion != 52) && + (extractVersion != 61) && + (extractVersion != 62) && + (extractVersion != 63) + ) + { + throw new ZipException(string.Format("Version required to extract this entry is invalid ({0})", extractVersion)); + } + + // Local entry flags dont have reserved bit set on. + if ((localFlags & (int)(GeneralBitFlags.ReservedPKware4 | GeneralBitFlags.ReservedPkware14 | GeneralBitFlags.ReservedPkware15)) != 0) + { + throw new ZipException("Reserved bit flags cannot be set."); + } + + // Encryption requires extract version >= 20 + if (((localFlags & (int)GeneralBitFlags.Encrypted) != 0) && (extractVersion < 20)) + { + throw new ZipException(string.Format("Version required to extract this entry is too low for encryption ({0})", extractVersion)); + } + + // Strong encryption requires encryption flag to be set and extract version >= 50. + if ((localFlags & (int)GeneralBitFlags.StrongEncryption) != 0) + { + if ((localFlags & (int)GeneralBitFlags.Encrypted) == 0) + { + throw new ZipException("Strong encryption flag set but encryption flag is not set"); + } + + if (extractVersion < 50) + { + throw new ZipException(string.Format("Version required to extract this entry is too low for encryption ({0})", extractVersion)); + } + } + + // Patched entries require extract version >= 27 + if (((localFlags & (int)GeneralBitFlags.Patched) != 0) && (extractVersion < 27)) + { + throw new ZipException(string.Format("Patched data requires higher version than ({0})", extractVersion)); + } + + // Central header flags match local entry flags. + if (localFlags != entry.Flags) + { + throw new ZipException("Central header/local header flags mismatch"); + } + + // Central header compression method matches local entry + if (entry.CompressionMethod != (CompressionMethod)compressionMethod) + { + throw new ZipException("Central header/local header compression method mismatch"); + } + + if (entry.Version != extractVersion) + { + throw new ZipException("Extract version mismatch"); + } + + // Strong encryption and extract version match + if ((localFlags & (int)GeneralBitFlags.StrongEncryption) != 0) + { + if (extractVersion < 62) + { + throw new ZipException("Strong encryption flag set but version not high enough"); + } + } + + if ((localFlags & (int)GeneralBitFlags.HeaderMasked) != 0) + { + if ((fileTime != 0) || (fileDate != 0)) + { + throw new ZipException("Header masked set but date/time values non-zero"); + } + } + + if ((localFlags & (int)GeneralBitFlags.Descriptor) == 0) + { + if (crcValue != (uint)entry.Crc) + { + throw new ZipException("Central header/local header crc mismatch"); + } + } + + // Crc valid for empty entry. + // This will also apply to streamed entries where size isnt known and the header cant be patched + if ((size == 0) && (compressedSize == 0)) + { + if (crcValue != 0) + { + throw new ZipException("Invalid CRC for empty entry"); + } + } + + // TODO: make test more correct... can't compare lengths as was done originally as this can fail for MBCS strings + // Assuming a code page at this point is not valid? Best is to store the name length in the ZipEntry probably + if (entry.Name.Length > storedNameLength) + { + throw new ZipException("File name length mismatch"); + } + + // Name data has already been read convert it and compare. + string localName = ZipStrings.ConvertToStringExt(localFlags, nameData); + + // Central directory and local entry name match + if (localName != entry.Name) + { + throw new ZipException("Central header and local header file name mismatch"); + } + + // Directories have zero actual size but can have compressed size + if (entry.IsDirectory) + { + if (size > 0) + { + throw new ZipException("Directory cannot have size"); + } + + // There may be other cases where the compressed size can be greater than this? + // If so until details are known we will be strict. + if (entry.IsCrypted) + { + if (compressedSize > ZipConstants.CryptoHeaderSize + 2) + { + throw new ZipException("Directory compressed size invalid"); + } + } + else if (compressedSize > 2) + { + // When not compressed the directory size can validly be 2 bytes + // if the true size wasnt known when data was originally being written. + // NOTE: Versions of the library 0.85.4 and earlier always added 2 bytes + throw new ZipException("Directory compressed size invalid"); + } + } + + if (!ZipNameTransform.IsValidName(localName, true)) + { + throw new ZipException("Name is invalid"); + } + } + + // Tests that apply to both data and header. + + // Size can be verified only if it is known in the local header. + // it will always be known in the central header. + if (((localFlags & (int)GeneralBitFlags.Descriptor) == 0) || + ((size > 0 || compressedSize > 0) && entry.Size > 0)) + { + if ((size != 0) + && (size != entry.Size)) + { + throw new ZipException( + string.Format("Size mismatch between central header({0}) and local header({1})", + entry.Size, size)); + } + + if ((compressedSize != 0) + && (compressedSize != entry.CompressedSize && compressedSize != 0xFFFFFFFF && compressedSize != -1)) + { + throw new ZipException( + string.Format("Compressed size mismatch between central header({0}) and local header({1})", + entry.CompressedSize, compressedSize)); + } + } + + int extraLength = storedNameLength + extraDataLength; + return offsetOfFirstEntry + entry.Offset + ZipConstants.LocalHeaderBaseSize + extraLength; + } + } + + #endregion Archive Testing + + #region Updating + + private const int DefaultBufferSize = 4096; + + /// + /// The kind of update to apply. + /// + private enum UpdateCommand + { + Copy, // Copy original file contents. + Modify, // Change encryption, compression, attributes, name, time etc, of an existing file. + Add, // Add a new file to the archive. + } + + #region Properties + + /// + /// Get / set the to apply to names when updating. + /// + public INameTransform NameTransform + { + get + { + return updateEntryFactory_.NameTransform; + } + + set + { + updateEntryFactory_.NameTransform = value; + } + } + + /// + /// Get/set the used to generate values + /// during updates. + /// + public IEntryFactory EntryFactory + { + get + { + return updateEntryFactory_; + } + + set + { + if (value == null) + { + updateEntryFactory_ = new ZipEntryFactory(); + } + else + { + updateEntryFactory_ = value; + } + } + } + + /// + /// Get /set the buffer size to be used when updating this zip file. + /// + public int BufferSize + { + get { return bufferSize_; } + set + { + if (value < 1024) + { + throw new ArgumentOutOfRangeException(nameof(value), "cannot be below 1024"); + } + + if (bufferSize_ != value) + { + bufferSize_ = value; + copyBuffer_ = null; + } + } + } + + /// + /// Get a value indicating an update has been started. + /// + public bool IsUpdating + { + get { return updates_ != null; } + } + + /// + /// Get / set a value indicating how Zip64 Extension usage is determined when adding entries. + /// + public UseZip64 UseZip64 + { + get { return useZip64_; } + set { useZip64_ = value; } + } + + #endregion Properties + + #region Immediate updating + + // TBD: Direct form of updating + // + // public void Update(IEntryMatcher deleteMatcher) + // { + // } + // + // public void Update(IScanner addScanner) + // { + // } + + #endregion Immediate updating + + #region Deferred Updating + + /// + /// Begin updating this archive. + /// + /// The archive storage for use during the update. + /// The data source to utilise during updating. + /// ZipFile has been closed. + /// One of the arguments provided is null + /// ZipFile has been closed. + public void BeginUpdate(IArchiveStorage archiveStorage, IDynamicDataSource dataSource) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + if (IsEmbeddedArchive) + { + throw new ZipException("Cannot update embedded/SFX archives"); + } + + archiveStorage_ = archiveStorage ?? throw new ArgumentNullException(nameof(archiveStorage)); + updateDataSource_ = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + + // NOTE: the baseStream_ may not currently support writing or seeking. + + updateIndex_ = new Dictionary(); + + updates_ = new List(entries_.Length); + foreach (ZipEntry entry in entries_) + { + int index = updates_.Count; + updates_.Add(new ZipUpdate(entry)); + updateIndex_.Add(entry.Name, index); + } + + // We must sort by offset before using offset's calculated sizes + updates_.Sort(new UpdateComparer()); + + int idx = 0; + foreach (ZipUpdate update in updates_) + { + //If last entry, there is no next entry offset to use + if (idx == updates_.Count - 1) + break; + + update.OffsetBasedSize = ((ZipUpdate)updates_[idx + 1]).Entry.Offset - update.Entry.Offset; + idx++; + } + updateCount_ = updates_.Count; + + contentsEdited_ = false; + commentEdited_ = false; + newComment_ = null; + } + + /// + /// Begin updating to this archive. + /// + /// The storage to use during the update. + public void BeginUpdate(IArchiveStorage archiveStorage) + { + BeginUpdate(archiveStorage, new DynamicDiskDataSource()); + } + + /// + /// Begin updating this archive. + /// + /// + /// + /// + public void BeginUpdate() + { + if (Name == null) + { + BeginUpdate(new MemoryArchiveStorage(), new DynamicDiskDataSource()); + } + else + { + BeginUpdate(new DiskArchiveStorage(this), new DynamicDiskDataSource()); + } + } + + /// + /// Commit current updates, updating this archive. + /// + /// + /// + /// ZipFile has been closed. + public void CommitUpdate() + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + CheckUpdating(); + + try + { + updateIndex_.Clear(); + updateIndex_ = null; + + if (contentsEdited_) + { + RunUpdates(); + } + else if (commentEdited_) + { + UpdateCommentOnly(); + } + else + { + // Create an empty archive if none existed originally. + if (entries_.Length == 0) + { + byte[] theComment = (newComment_ != null) ? newComment_.RawComment : ZipStrings.ConvertToArray(comment_); + using (ZipHelperStream zhs = new ZipHelperStream(baseStream_)) + { + zhs.WriteEndOfCentralDirectory(0, 0, 0, theComment); + } + } + } + } + finally + { + PostUpdateCleanup(); + } + } + + /// + /// Abort updating leaving the archive unchanged. + /// + /// + /// + public void AbortUpdate() + { + PostUpdateCleanup(); + } + + /// + /// Set the file comment to be recorded when the current update is commited. + /// + /// The comment to record. + /// ZipFile has been closed. + public void SetComment(string comment) + { + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + CheckUpdating(); + + newComment_ = new ZipString(comment); + + if (newComment_.RawLength > 0xffff) + { + newComment_ = null; + throw new ZipException("Comment length exceeds maximum - 65535"); + } + + // We dont take account of the original and current comment appearing to be the same + // as encoding may be different. + commentEdited_ = true; + } + + #endregion Deferred Updating + + #region Adding Entries + + private void AddUpdate(ZipUpdate update) + { + contentsEdited_ = true; + + int index = FindExistingUpdate(update.Entry.Name); + + if (index >= 0) + { + if (updates_[index] == null) + { + updateCount_ += 1; + } + + // Direct replacement is faster than delete and add. + updates_[index] = update; + } + else + { + index = updates_.Count; + updates_.Add(update); + updateCount_ += 1; + updateIndex_.Add(update.Entry.Name, index); + } + } + + /// + /// Add a new entry to the archive. + /// + /// The name of the file to add. + /// The compression method to use. + /// Ensure Unicode text is used for name and comment for this entry. + /// Argument supplied is null. + /// ZipFile has been closed. + /// Compression method is not supported for creating entries. + public void Add(string fileName, CompressionMethod compressionMethod, bool useUnicodeText) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + if (isDisposed_) + { + throw new ObjectDisposedException("ZipFile"); + } + + CheckSupportedCompressionMethod(compressionMethod); + CheckUpdating(); + contentsEdited_ = true; + + ZipEntry entry = EntryFactory.MakeFileEntry(fileName); + entry.IsUnicodeText = useUnicodeText; + entry.CompressionMethod = compressionMethod; + + AddUpdate(new ZipUpdate(fileName, entry)); + } + + /// + /// Add a new entry to the archive. + /// + /// The name of the file to add. + /// The compression method to use. + /// ZipFile has been closed. + /// Compression method is not supported for creating entries. + public void Add(string fileName, CompressionMethod compressionMethod) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + CheckSupportedCompressionMethod(compressionMethod); + CheckUpdating(); + contentsEdited_ = true; + + ZipEntry entry = EntryFactory.MakeFileEntry(fileName); + entry.CompressionMethod = compressionMethod; + AddUpdate(new ZipUpdate(fileName, entry)); + } + + /// + /// Add a file to the archive. + /// + /// The name of the file to add. + /// Argument supplied is null. + public void Add(string fileName) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + CheckUpdating(); + AddUpdate(new ZipUpdate(fileName, EntryFactory.MakeFileEntry(fileName))); + } + + /// + /// Add a file to the archive. + /// + /// The name of the file to add. + /// The name to use for the on the Zip file created. + /// Argument supplied is null. + public void Add(string fileName, string entryName) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + if (entryName == null) + { + throw new ArgumentNullException(nameof(entryName)); + } + + CheckUpdating(); + AddUpdate(new ZipUpdate(fileName, EntryFactory.MakeFileEntry(fileName, entryName, true))); + } + + /// + /// Add a file entry with data. + /// + /// The source of the data for this entry. + /// The name to give to the entry. + public void Add(IStaticDataSource dataSource, string entryName) + { + if (dataSource == null) + { + throw new ArgumentNullException(nameof(dataSource)); + } + + if (entryName == null) + { + throw new ArgumentNullException(nameof(entryName)); + } + + CheckUpdating(); + AddUpdate(new ZipUpdate(dataSource, EntryFactory.MakeFileEntry(entryName, false))); + } + + /// + /// Add a file entry with data. + /// + /// The source of the data for this entry. + /// The name to give to the entry. + /// The compression method to use. + /// Compression method is not supported for creating entries. + public void Add(IStaticDataSource dataSource, string entryName, CompressionMethod compressionMethod) + { + if (dataSource == null) + { + throw new ArgumentNullException(nameof(dataSource)); + } + + if (entryName == null) + { + throw new ArgumentNullException(nameof(entryName)); + } + + CheckSupportedCompressionMethod(compressionMethod); + CheckUpdating(); + + ZipEntry entry = EntryFactory.MakeFileEntry(entryName, false); + entry.CompressionMethod = compressionMethod; + + AddUpdate(new ZipUpdate(dataSource, entry)); + } + + /// + /// Add a file entry with data. + /// + /// The source of the data for this entry. + /// The name to give to the entry. + /// The compression method to use. + /// Ensure Unicode text is used for name and comments for this entry. + /// Compression method is not supported for creating entries. + public void Add(IStaticDataSource dataSource, string entryName, CompressionMethod compressionMethod, bool useUnicodeText) + { + if (dataSource == null) + { + throw new ArgumentNullException(nameof(dataSource)); + } + + if (entryName == null) + { + throw new ArgumentNullException(nameof(entryName)); + } + + CheckSupportedCompressionMethod(compressionMethod); + CheckUpdating(); + + ZipEntry entry = EntryFactory.MakeFileEntry(entryName, false); + entry.IsUnicodeText = useUnicodeText; + entry.CompressionMethod = compressionMethod; + + AddUpdate(new ZipUpdate(dataSource, entry)); + } + + /// + /// Add a that contains no data. + /// + /// The entry to add. + /// This can be used to add directories, volume labels, or empty file entries. + public void Add(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + CheckUpdating(); + + if ((entry.Size != 0) || (entry.CompressedSize != 0)) + { + throw new ZipException("Entry cannot have any data"); + } + + AddUpdate(new ZipUpdate(UpdateCommand.Add, entry)); + } + + /// + /// Add a with data. + /// + /// The source of the data for this entry. + /// The entry to add. + /// This can be used to add file entries with a custom data source. + /// + /// The encryption method specified in is unsupported. + /// + /// Compression method is not supported for creating entries. + public void Add(IStaticDataSource dataSource, ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + if (dataSource == null) + { + throw new ArgumentNullException(nameof(dataSource)); + } + + // We don't currently support adding entries with AES encryption, so throw + // up front instead of failing or falling back to ZipCrypto later on + if (entry.AESKeySize > 0) + { + throw new NotSupportedException("Creation of AES encrypted entries is not supported"); + } + + CheckSupportedCompressionMethod(entry.CompressionMethod); + CheckUpdating(); + + AddUpdate(new ZipUpdate(dataSource, entry)); + } + + /// + /// Add a directory entry to the archive. + /// + /// The directory to add. + public void AddDirectory(string directoryName) + { + if (directoryName == null) + { + throw new ArgumentNullException(nameof(directoryName)); + } + + CheckUpdating(); + + ZipEntry dirEntry = EntryFactory.MakeDirectoryEntry(directoryName); + AddUpdate(new ZipUpdate(UpdateCommand.Add, dirEntry)); + } + + /// + /// Check if the specified compression method is supported for adding a new entry. + /// + /// The compression method for the new entry. + private void CheckSupportedCompressionMethod(CompressionMethod compressionMethod) + { + if (compressionMethod != CompressionMethod.Deflated && compressionMethod != CompressionMethod.Stored) + { + throw new NotImplementedException("Compression method not supported"); + } + } + + #endregion Adding Entries + + #region Modifying Entries + + /* Modify not yet ready for public consumption. + Direct modification of an entry should not overwrite original data before its read. + Safe mode is trivial in this sense. + public void Modify(ZipEntry original, ZipEntry updated) + { + if ( original == null ) { + throw new ArgumentNullException("original"); + } + + if ( updated == null ) { + throw new ArgumentNullException("updated"); + } + + CheckUpdating(); + contentsEdited_ = true; + updates_.Add(new ZipUpdate(original, updated)); + } + */ + + #endregion Modifying Entries + + #region Deleting Entries + + /// + /// Delete an entry by name + /// + /// The filename to delete + /// True if the entry was found and deleted; false otherwise. + public bool Delete(string fileName) + { + if (fileName == null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + CheckUpdating(); + + bool result = false; + int index = FindExistingUpdate(fileName); + if ((index >= 0) && (updates_[index] != null)) + { + result = true; + contentsEdited_ = true; + updates_[index] = null; + updateCount_ -= 1; + } + else + { + throw new ZipException("Cannot find entry to delete"); + } + return result; + } + + /// + /// Delete a from the archive. + /// + /// The entry to delete. + public void Delete(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + CheckUpdating(); + + int index = FindExistingUpdate(entry); + if (index >= 0) + { + contentsEdited_ = true; + updates_[index] = null; + updateCount_ -= 1; + } + else + { + throw new ZipException("Cannot find entry to delete"); + } + } + + #endregion Deleting Entries + + #region Update Support + + #region Writing Values/Headers + + private void WriteLEShort(int value) + { + baseStream_.WriteByte((byte)(value & 0xff)); + baseStream_.WriteByte((byte)((value >> 8) & 0xff)); + } + + /// + /// Write an unsigned short in little endian byte order. + /// + private void WriteLEUshort(ushort value) + { + baseStream_.WriteByte((byte)(value & 0xff)); + baseStream_.WriteByte((byte)(value >> 8)); + } + + /// + /// Write an int in little endian byte order. + /// + private void WriteLEInt(int value) + { + WriteLEShort(value & 0xffff); + WriteLEShort(value >> 16); + } + + /// + /// Write an unsigned int in little endian byte order. + /// + private void WriteLEUint(uint value) + { + WriteLEUshort((ushort)(value & 0xffff)); + WriteLEUshort((ushort)(value >> 16)); + } + + /// + /// Write a long in little endian byte order. + /// + private void WriteLeLong(long value) + { + WriteLEInt((int)(value & 0xffffffff)); + WriteLEInt((int)(value >> 32)); + } + + private void WriteLEUlong(ulong value) + { + WriteLEUint((uint)(value & 0xffffffff)); + WriteLEUint((uint)(value >> 32)); + } + + private void WriteLocalEntryHeader(ZipUpdate update) + { + ZipEntry entry = update.OutEntry; + + // TODO: Local offset will require adjusting for multi-disk zip files. + entry.Offset = baseStream_.Position; + + // TODO: Need to clear any entry flags that dont make sense or throw an exception here. + if (update.Command != UpdateCommand.Copy) + { + if (entry.CompressionMethod == CompressionMethod.Deflated) + { + if (entry.Size == 0) + { + // No need to compress - no data. + entry.CompressedSize = entry.Size; + entry.Crc = 0; + entry.CompressionMethod = CompressionMethod.Stored; + } + } + else if (entry.CompressionMethod == CompressionMethod.Stored) + { + entry.Flags &= ~(int)GeneralBitFlags.Descriptor; + } + + if (HaveKeys) + { + entry.IsCrypted = true; + if (entry.Crc < 0) + { + entry.Flags |= (int)GeneralBitFlags.Descriptor; + } + } + else + { + entry.IsCrypted = false; + } + + switch (useZip64_) + { + case UseZip64.Dynamic: + if (entry.Size < 0) + { + entry.ForceZip64(); + } + break; + + case UseZip64.On: + entry.ForceZip64(); + break; + + case UseZip64.Off: + // Do nothing. The entry itself may be using Zip64 independently. + break; + } + } + + // Write the local file header + WriteLEInt(ZipConstants.LocalHeaderSignature); + + WriteLEShort(entry.Version); + WriteLEShort(entry.Flags); + + WriteLEShort((byte)entry.CompressionMethodForHeader); + WriteLEInt((int)entry.DosTime); + + if (!entry.HasCrc) + { + // Note patch address for updating CRC later. + update.CrcPatchOffset = baseStream_.Position; + WriteLEInt((int)0); + } + else + { + WriteLEInt(unchecked((int)entry.Crc)); + } + + if (entry.LocalHeaderRequiresZip64) + { + WriteLEInt(-1); + WriteLEInt(-1); + } + else + { + if ((entry.CompressedSize < 0) || (entry.Size < 0)) + { + update.SizePatchOffset = baseStream_.Position; + } + + WriteLEInt((int)entry.CompressedSize); + WriteLEInt((int)entry.Size); + } + + byte[] name = ZipStrings.ConvertToArray(entry.Flags, entry.Name); + + if (name.Length > 0xFFFF) + { + throw new ZipException("Entry name too long."); + } + + var ed = new ZipExtraData(entry.ExtraData); + + if (entry.LocalHeaderRequiresZip64) + { + ed.StartNewEntry(); + + // Local entry header always includes size and compressed size. + // NOTE the order of these fields is reversed when compared to the normal headers! + ed.AddLeLong(entry.Size); + ed.AddLeLong(entry.CompressedSize); + ed.AddNewEntry(1); + } + else + { + ed.Delete(1); + } + + entry.ExtraData = ed.GetEntryData(); + + WriteLEShort(name.Length); + WriteLEShort(entry.ExtraData.Length); + + if (name.Length > 0) + { + baseStream_.Write(name, 0, name.Length); + } + + if (entry.LocalHeaderRequiresZip64) + { + if (!ed.Find(1)) + { + throw new ZipException("Internal error cannot find extra data"); + } + + update.SizePatchOffset = baseStream_.Position + ed.CurrentReadIndex; + } + + if (entry.ExtraData.Length > 0) + { + baseStream_.Write(entry.ExtraData, 0, entry.ExtraData.Length); + } + } + + private int WriteCentralDirectoryHeader(ZipEntry entry) + { + if (entry.CompressedSize < 0) + { + throw new ZipException("Attempt to write central directory entry with unknown csize"); + } + + if (entry.Size < 0) + { + throw new ZipException("Attempt to write central directory entry with unknown size"); + } + + if (entry.Crc < 0) + { + throw new ZipException("Attempt to write central directory entry with unknown crc"); + } + + // Write the central file header + WriteLEInt(ZipConstants.CentralHeaderSignature); + + // Version made by + WriteLEShort((entry.HostSystem << 8) | entry.VersionMadeBy); + + // Version required to extract + WriteLEShort(entry.Version); + + WriteLEShort(entry.Flags); + + unchecked + { + WriteLEShort((byte)entry.CompressionMethodForHeader); + WriteLEInt((int)entry.DosTime); + WriteLEInt((int)entry.Crc); + } + + bool useExtraCompressedSize = false; //Do we want to store the compressed size in the extra data? + if ((entry.IsZip64Forced()) || (entry.CompressedSize >= 0xffffffff)) + { + useExtraCompressedSize = true; + WriteLEInt(-1); + } + else + { + WriteLEInt((int)(entry.CompressedSize & 0xffffffff)); + } + + bool useExtraUncompressedSize = false; //Do we want to store the uncompressed size in the extra data? + if ((entry.IsZip64Forced()) || (entry.Size >= 0xffffffff)) + { + useExtraUncompressedSize = true; + WriteLEInt(-1); + } + else + { + WriteLEInt((int)entry.Size); + } + + byte[] name = ZipStrings.ConvertToArray(entry.Flags, entry.Name); + + if (name.Length > 0xFFFF) + { + throw new ZipException("Entry name is too long."); + } + + WriteLEShort(name.Length); + + // Central header extra data is different to local header version so regenerate. + var ed = new ZipExtraData(entry.ExtraData); + + if (entry.CentralHeaderRequiresZip64) + { + ed.StartNewEntry(); + + if (useExtraUncompressedSize) + { + ed.AddLeLong(entry.Size); + } + + if (useExtraCompressedSize) + { + ed.AddLeLong(entry.CompressedSize); + } + + if (entry.Offset >= 0xffffffff) + { + ed.AddLeLong(entry.Offset); + } + + // Number of disk on which this file starts isnt supported and is never written here. + ed.AddNewEntry(1); + } + else + { + // Should have already be done when local header was added. + ed.Delete(1); + } + + byte[] centralExtraData = ed.GetEntryData(); + + WriteLEShort(centralExtraData.Length); + WriteLEShort(entry.Comment != null ? entry.Comment.Length : 0); + + WriteLEShort(0); // disk number + WriteLEShort(0); // internal file attributes + + // External file attributes... + if (entry.ExternalFileAttributes != -1) + { + WriteLEInt(entry.ExternalFileAttributes); + } + else + { + if (entry.IsDirectory) + { + WriteLEUint(16); + } + else + { + WriteLEUint(0); + } + } + + if (entry.Offset >= 0xffffffff) + { + WriteLEUint(0xffffffff); + } + else + { + WriteLEUint((uint)(int)entry.Offset); + } + + if (name.Length > 0) + { + baseStream_.Write(name, 0, name.Length); + } + + if (centralExtraData.Length > 0) + { + baseStream_.Write(centralExtraData, 0, centralExtraData.Length); + } + + byte[] rawComment = (entry.Comment != null) ? Encoding.ASCII.GetBytes(entry.Comment) : new byte[0]; + + if (rawComment.Length > 0) + { + baseStream_.Write(rawComment, 0, rawComment.Length); + } + + return ZipConstants.CentralHeaderBaseSize + name.Length + centralExtraData.Length + rawComment.Length; + } + + #endregion Writing Values/Headers + + private void PostUpdateCleanup() + { + updateDataSource_ = null; + updates_ = null; + updateIndex_ = null; + + if (archiveStorage_ != null) + { + archiveStorage_.Dispose(); + archiveStorage_ = null; + } + } + + private string GetTransformedFileName(string name) + { + INameTransform transform = NameTransform; + return (transform != null) ? + transform.TransformFile(name) : + name; + } + + private string GetTransformedDirectoryName(string name) + { + INameTransform transform = NameTransform; + return (transform != null) ? + transform.TransformDirectory(name) : + name; + } + + /// + /// Get a raw memory buffer. + /// + /// Returns a raw memory buffer. + private byte[] GetBuffer() + { + if (copyBuffer_ == null) + { + copyBuffer_ = new byte[bufferSize_]; + } + return copyBuffer_; + } + + private void CopyDescriptorBytes(ZipUpdate update, Stream dest, Stream source) + { + int bytesToCopy = GetDescriptorSize(update); + + if (bytesToCopy > 0) + { + byte[] buffer = GetBuffer(); + + while (bytesToCopy > 0) + { + int readSize = Math.Min(buffer.Length, bytesToCopy); + + int bytesRead = source.Read(buffer, 0, readSize); + if (bytesRead > 0) + { + dest.Write(buffer, 0, bytesRead); + bytesToCopy -= bytesRead; + } + else + { + throw new ZipException("Unxpected end of stream"); + } + } + } + } + + private void CopyBytes(ZipUpdate update, Stream destination, Stream source, + long bytesToCopy, bool updateCrc) + { + if (destination == source) + { + throw new InvalidOperationException("Destination and source are the same"); + } + + // NOTE: Compressed size is updated elsewhere. + var crc = new Crc32(); + byte[] buffer = GetBuffer(); + + long targetBytes = bytesToCopy; + long totalBytesRead = 0; + + int bytesRead; + do + { + int readSize = buffer.Length; + + if (bytesToCopy < readSize) + { + readSize = (int)bytesToCopy; + } + + bytesRead = source.Read(buffer, 0, readSize); + if (bytesRead > 0) + { + if (updateCrc) + { + crc.Update(new ArraySegment(buffer, 0, bytesRead)); + } + destination.Write(buffer, 0, bytesRead); + bytesToCopy -= bytesRead; + totalBytesRead += bytesRead; + } + } + while ((bytesRead > 0) && (bytesToCopy > 0)); + + if (totalBytesRead != targetBytes) + { + throw new ZipException(string.Format("Failed to copy bytes expected {0} read {1}", targetBytes, totalBytesRead)); + } + + if (updateCrc) + { + update.OutEntry.Crc = crc.Value; + } + } + + /// + /// Get the size of the source descriptor for a . + /// + /// The update to get the size for. + /// The descriptor size, zero if there isnt one. + private int GetDescriptorSize(ZipUpdate update) + { + int result = 0; + if ((update.Entry.Flags & (int)GeneralBitFlags.Descriptor) != 0) + { + result = ZipConstants.DataDescriptorSize - 4; + if (update.Entry.LocalHeaderRequiresZip64) + { + result = ZipConstants.Zip64DataDescriptorSize - 4; + } + } + return result; + } + + private void CopyDescriptorBytesDirect(ZipUpdate update, Stream stream, ref long destinationPosition, long sourcePosition) + { + int bytesToCopy = GetDescriptorSize(update); + + while (bytesToCopy > 0) + { + var readSize = (int)bytesToCopy; + byte[] buffer = GetBuffer(); + + stream.Position = sourcePosition; + int bytesRead = stream.Read(buffer, 0, readSize); + if (bytesRead > 0) + { + stream.Position = destinationPosition; + stream.Write(buffer, 0, bytesRead); + bytesToCopy -= bytesRead; + destinationPosition += bytesRead; + sourcePosition += bytesRead; + } + else + { + throw new ZipException("Unxpected end of stream"); + } + } + } + + private void CopyEntryDataDirect(ZipUpdate update, Stream stream, bool updateCrc, ref long destinationPosition, ref long sourcePosition) + { + long bytesToCopy = update.Entry.CompressedSize; + + // NOTE: Compressed size is updated elsewhere. + var crc = new Crc32(); + byte[] buffer = GetBuffer(); + + long targetBytes = bytesToCopy; + long totalBytesRead = 0; + + int bytesRead; + do + { + int readSize = buffer.Length; + + if (bytesToCopy < readSize) + { + readSize = (int)bytesToCopy; + } + + stream.Position = sourcePosition; + bytesRead = stream.Read(buffer, 0, readSize); + if (bytesRead > 0) + { + if (updateCrc) + { + crc.Update(new ArraySegment(buffer, 0, bytesRead)); + } + stream.Position = destinationPosition; + stream.Write(buffer, 0, bytesRead); + + destinationPosition += bytesRead; + sourcePosition += bytesRead; + bytesToCopy -= bytesRead; + totalBytesRead += bytesRead; + } + } + while ((bytesRead > 0) && (bytesToCopy > 0)); + + if (totalBytesRead != targetBytes) + { + throw new ZipException(string.Format("Failed to copy bytes expected {0} read {1}", targetBytes, totalBytesRead)); + } + + if (updateCrc) + { + update.OutEntry.Crc = crc.Value; + } + } + + private int FindExistingUpdate(ZipEntry entry) + { + int result = -1; + string convertedName = entry.IsDirectory + ? GetTransformedDirectoryName(entry.Name) + : GetTransformedFileName(entry.Name); + + if (updateIndex_.ContainsKey(convertedName)) + { + result = (int)updateIndex_[convertedName]; + } + /* + // This is slow like the coming of the next ice age but takes less storage and may be useful + // for CF? + for (int index = 0; index < updates_.Count; ++index) + { + ZipUpdate zu = ( ZipUpdate )updates_[index]; + if ( (zu.Entry.ZipFileIndex == entry.ZipFileIndex) && + (string.Compare(convertedName, zu.Entry.Name, true, CultureInfo.InvariantCulture) == 0) ) { + result = index; + break; + } + } + */ + return result; + } + + private int FindExistingUpdate(string fileName) + { + int result = -1; + + string convertedName = GetTransformedFileName(fileName); + + if (updateIndex_.ContainsKey(convertedName)) + { + result = (int)updateIndex_[convertedName]; + } + + /* + // This is slow like the coming of the next ice age but takes less storage and may be useful + // for CF? + for ( int index = 0; index < updates_.Count; ++index ) { + if ( string.Compare(convertedName, (( ZipUpdate )updates_[index]).Entry.Name, + true, CultureInfo.InvariantCulture) == 0 ) { + result = index; + break; + } + } + */ + + return result; + } + + /// + /// Get an output stream for the specified + /// + /// The entry to get an output stream for. + /// The output stream obtained for the entry. + private Stream GetOutputStream(ZipEntry entry) + { + Stream result = baseStream_; + + if (entry.IsCrypted == true) + { + result = CreateAndInitEncryptionStream(result, entry); + } + + switch (entry.CompressionMethod) + { + case CompressionMethod.Stored: + result = new UncompressedStream(result); + break; + + case CompressionMethod.Deflated: + var dos = new DeflaterOutputStream(result, new Deflater(9, true)) + { + IsStreamOwner = false + }; + result = dos; + break; + + default: + throw new ZipException("Unknown compression method " + entry.CompressionMethod); + } + return result; + } + + private void AddEntry(ZipFile workFile, ZipUpdate update) + { + Stream source = null; + + if (update.Entry.IsFile) + { + source = update.GetSource(); + + if (source == null) + { + source = updateDataSource_.GetSource(update.Entry, update.Filename); + } + } + + if (source != null) + { + using (source) + { + long sourceStreamLength = source.Length; + if (update.OutEntry.Size < 0) + { + update.OutEntry.Size = sourceStreamLength; + } + else + { + // Check for errant entries. + if (update.OutEntry.Size != sourceStreamLength) + { + throw new ZipException("Entry size/stream size mismatch"); + } + } + + workFile.WriteLocalEntryHeader(update); + + long dataStart = workFile.baseStream_.Position; + + using (Stream output = workFile.GetOutputStream(update.OutEntry)) + { + CopyBytes(update, output, source, sourceStreamLength, true); + } + + long dataEnd = workFile.baseStream_.Position; + update.OutEntry.CompressedSize = dataEnd - dataStart; + + if ((update.OutEntry.Flags & (int)GeneralBitFlags.Descriptor) == (int)GeneralBitFlags.Descriptor) + { + var helper = new ZipHelperStream(workFile.baseStream_); + helper.WriteDataDescriptor(update.OutEntry); + } + } + } + else + { + workFile.WriteLocalEntryHeader(update); + update.OutEntry.CompressedSize = 0; + } + } + + private void ModifyEntry(ZipFile workFile, ZipUpdate update) + { + workFile.WriteLocalEntryHeader(update); + long dataStart = workFile.baseStream_.Position; + + // TODO: This is slow if the changes don't effect the data!! + if (update.Entry.IsFile && (update.Filename != null)) + { + using (Stream output = workFile.GetOutputStream(update.OutEntry)) + { + using (Stream source = this.GetInputStream(update.Entry)) + { + CopyBytes(update, output, source, source.Length, true); + } + } + } + + long dataEnd = workFile.baseStream_.Position; + update.Entry.CompressedSize = dataEnd - dataStart; + } + + private void CopyEntryDirect(ZipFile workFile, ZipUpdate update, ref long destinationPosition) + { + bool skipOver = false || update.Entry.Offset == destinationPosition; + + if (!skipOver) + { + baseStream_.Position = destinationPosition; + workFile.WriteLocalEntryHeader(update); + destinationPosition = baseStream_.Position; + } + + long sourcePosition = 0; + + const int NameLengthOffset = 26; + + // TODO: Add base for SFX friendly handling + long entryDataOffset = update.Entry.Offset + NameLengthOffset; + + baseStream_.Seek(entryDataOffset, SeekOrigin.Begin); + + // Clumsy way of handling retrieving the original name and extra data length for now. + // TODO: Stop re-reading name and data length in CopyEntryDirect. + uint nameLength = ReadLEUshort(); + uint extraLength = ReadLEUshort(); + + sourcePosition = baseStream_.Position + nameLength + extraLength; + + if (skipOver) + { + if (update.OffsetBasedSize != -1) + destinationPosition += update.OffsetBasedSize; + else + // TODO: Find out why this calculation comes up 4 bytes short on some entries in ODT (Office Document Text) archives. + // WinZip produces a warning on these entries: + // "caution: value of lrec.csize (compressed size) changed from ..." + destinationPosition += + (sourcePosition - entryDataOffset) + NameLengthOffset + // Header size + update.Entry.CompressedSize + GetDescriptorSize(update); + } + else + { + if (update.Entry.CompressedSize > 0) + { + CopyEntryDataDirect(update, baseStream_, false, ref destinationPosition, ref sourcePosition); + } + CopyDescriptorBytesDirect(update, baseStream_, ref destinationPosition, sourcePosition); + } + } + + private void CopyEntry(ZipFile workFile, ZipUpdate update) + { + workFile.WriteLocalEntryHeader(update); + + if (update.Entry.CompressedSize > 0) + { + const int NameLengthOffset = 26; + + long entryDataOffset = update.Entry.Offset + NameLengthOffset; + + // TODO: This wont work for SFX files! + baseStream_.Seek(entryDataOffset, SeekOrigin.Begin); + + uint nameLength = ReadLEUshort(); + uint extraLength = ReadLEUshort(); + + baseStream_.Seek(nameLength + extraLength, SeekOrigin.Current); + + CopyBytes(update, workFile.baseStream_, baseStream_, update.Entry.CompressedSize, false); + } + CopyDescriptorBytes(update, workFile.baseStream_, baseStream_); + } + + private void Reopen(Stream source) + { + isNewArchive_ = false; + baseStream_ = source ?? throw new ZipException("Failed to reopen archive - no source"); + ReadEntries(); + } + + private void Reopen() + { + if (Name == null) + { + throw new InvalidOperationException("Name is not known cannot Reopen"); + } + + Reopen(File.Open(Name, FileMode.Open, FileAccess.Read, FileShare.Read)); + } + + private void UpdateCommentOnly() + { + long baseLength = baseStream_.Length; + + ZipHelperStream updateFile = null; + + if (archiveStorage_.UpdateMode == FileUpdateMode.Safe) + { + Stream copyStream = archiveStorage_.MakeTemporaryCopy(baseStream_); + updateFile = new ZipHelperStream(copyStream) + { + IsStreamOwner = true + }; + + baseStream_.Dispose(); + baseStream_ = null; + } + else + { + if (archiveStorage_.UpdateMode == FileUpdateMode.Direct) + { + // TODO: archiveStorage wasnt originally intended for this use. + // Need to revisit this to tidy up handling as archive storage currently doesnt + // handle the original stream well. + // The problem is when using an existing zip archive with an in memory archive storage. + // The open stream wont support writing but the memory storage should open the same file not an in memory one. + + // Need to tidy up the archive storage interface and contract basically. + baseStream_ = archiveStorage_.OpenForDirectUpdate(baseStream_); + updateFile = new ZipHelperStream(baseStream_); + } + else + { + baseStream_.Dispose(); + baseStream_ = null; + updateFile = new ZipHelperStream(Name); + } + } + + using (updateFile) + { + long locatedCentralDirOffset = + updateFile.LocateBlockWithSignature(ZipConstants.EndOfCentralDirectorySignature, + baseLength, ZipConstants.EndOfCentralRecordBaseSize, 0xffff); + if (locatedCentralDirOffset < 0) + { + throw new ZipException("Cannot find central directory"); + } + + const int CentralHeaderCommentSizeOffset = 16; + updateFile.Position += CentralHeaderCommentSizeOffset; + + byte[] rawComment = newComment_.RawComment; + + updateFile.WriteLEShort(rawComment.Length); + updateFile.Write(rawComment, 0, rawComment.Length); + updateFile.SetLength(updateFile.Position); + } + + if (archiveStorage_.UpdateMode == FileUpdateMode.Safe) + { + Reopen(archiveStorage_.ConvertTemporaryToFinal()); + } + else + { + ReadEntries(); + } + } + + /// + /// Class used to sort updates. + /// + private class UpdateComparer : IComparer + { + /// + /// Compares two objects and returns a value indicating whether one is + /// less than, equal to or greater than the other. + /// + /// First object to compare + /// Second object to compare. + /// Compare result. + public int Compare(ZipUpdate x, ZipUpdate y) + { + int result; + + if (x == null) + { + if (y == null) + { + result = 0; + } + else + { + result = -1; + } + } + else if (y == null) + { + result = 1; + } + else + { + int xCmdValue = ((x.Command == UpdateCommand.Copy) || (x.Command == UpdateCommand.Modify)) ? 0 : 1; + int yCmdValue = ((y.Command == UpdateCommand.Copy) || (y.Command == UpdateCommand.Modify)) ? 0 : 1; + + result = xCmdValue - yCmdValue; + if (result == 0) + { + long offsetDiff = x.Entry.Offset - y.Entry.Offset; + if (offsetDiff < 0) + { + result = -1; + } + else if (offsetDiff == 0) + { + result = 0; + } + else + { + result = 1; + } + } + } + return result; + } + } + + private void RunUpdates() + { + long sizeEntries = 0; + long endOfStream = 0; + bool directUpdate = false; + long destinationPosition = 0; // NOT SFX friendly + + ZipFile workFile; + + if (IsNewArchive) + { + workFile = this; + workFile.baseStream_.Position = 0; + directUpdate = true; + } + else if (archiveStorage_.UpdateMode == FileUpdateMode.Direct) + { + workFile = this; + workFile.baseStream_.Position = 0; + directUpdate = true; + + // Sort the updates by offset within copies/modifies, then adds. + // This ensures that data required by copies will not be overwritten. + updates_.Sort(new UpdateComparer()); + } + else + { + workFile = ZipFile.Create(archiveStorage_.GetTemporaryOutput()); + workFile.UseZip64 = UseZip64; + + if (key != null) + { + workFile.key = (byte[])key.Clone(); + } + } + + try + { + foreach (ZipUpdate update in updates_) + { + if (update != null) + { + switch (update.Command) + { + case UpdateCommand.Copy: + if (directUpdate) + { + CopyEntryDirect(workFile, update, ref destinationPosition); + } + else + { + CopyEntry(workFile, update); + } + break; + + case UpdateCommand.Modify: + // TODO: Direct modifying of an entry will take some legwork. + ModifyEntry(workFile, update); + break; + + case UpdateCommand.Add: + if (!IsNewArchive && directUpdate) + { + workFile.baseStream_.Position = destinationPosition; + } + + AddEntry(workFile, update); + + if (directUpdate) + { + destinationPosition = workFile.baseStream_.Position; + } + break; + } + } + } + + if (!IsNewArchive && directUpdate) + { + workFile.baseStream_.Position = destinationPosition; + } + + long centralDirOffset = workFile.baseStream_.Position; + + foreach (ZipUpdate update in updates_) + { + if (update != null) + { + sizeEntries += workFile.WriteCentralDirectoryHeader(update.OutEntry); + } + } + + byte[] theComment = (newComment_ != null) ? newComment_.RawComment : ZipStrings.ConvertToArray(comment_); + using (ZipHelperStream zhs = new ZipHelperStream(workFile.baseStream_)) + { + zhs.WriteEndOfCentralDirectory(updateCount_, sizeEntries, centralDirOffset, theComment); + } + + endOfStream = workFile.baseStream_.Position; + + // And now patch entries... + foreach (ZipUpdate update in updates_) + { + if (update != null) + { + // If the size of the entry is zero leave the crc as 0 as well. + // The calculated crc will be all bits on... + if ((update.CrcPatchOffset > 0) && (update.OutEntry.CompressedSize > 0)) + { + workFile.baseStream_.Position = update.CrcPatchOffset; + workFile.WriteLEInt((int)update.OutEntry.Crc); + } + + if (update.SizePatchOffset > 0) + { + workFile.baseStream_.Position = update.SizePatchOffset; + if (update.OutEntry.LocalHeaderRequiresZip64) + { + workFile.WriteLeLong(update.OutEntry.Size); + workFile.WriteLeLong(update.OutEntry.CompressedSize); + } + else + { + workFile.WriteLEInt((int)update.OutEntry.CompressedSize); + workFile.WriteLEInt((int)update.OutEntry.Size); + } + } + } + } + } + catch + { + workFile.Close(); + if (!directUpdate && (workFile.Name != null)) + { + File.Delete(workFile.Name); + } + throw; + } + + if (directUpdate) + { + workFile.baseStream_.SetLength(endOfStream); + workFile.baseStream_.Flush(); + isNewArchive_ = false; + ReadEntries(); + } + else + { + baseStream_.Dispose(); + Reopen(archiveStorage_.ConvertTemporaryToFinal()); + } + } + + private void CheckUpdating() + { + if (updates_ == null) + { + throw new InvalidOperationException("BeginUpdate has not been called"); + } + } + + #endregion Update Support + + #region ZipUpdate class + + /// + /// Represents a pending update to a Zip file. + /// + private class ZipUpdate + { + #region Constructors + + public ZipUpdate(string fileName, ZipEntry entry) + { + command_ = UpdateCommand.Add; + entry_ = entry; + filename_ = fileName; + } + + [Obsolete] + public ZipUpdate(string fileName, string entryName, CompressionMethod compressionMethod) + { + command_ = UpdateCommand.Add; + entry_ = new ZipEntry(entryName) + { + CompressionMethod = compressionMethod + }; + filename_ = fileName; + } + + [Obsolete] + public ZipUpdate(string fileName, string entryName) + : this(fileName, entryName, CompressionMethod.Deflated) + { + // Do nothing. + } + + [Obsolete] + public ZipUpdate(IStaticDataSource dataSource, string entryName, CompressionMethod compressionMethod) + { + command_ = UpdateCommand.Add; + entry_ = new ZipEntry(entryName) + { + CompressionMethod = compressionMethod + }; + dataSource_ = dataSource; + } + + public ZipUpdate(IStaticDataSource dataSource, ZipEntry entry) + { + command_ = UpdateCommand.Add; + entry_ = entry; + dataSource_ = dataSource; + } + + public ZipUpdate(ZipEntry original, ZipEntry updated) + { + throw new ZipException("Modify not currently supported"); + /* + command_ = UpdateCommand.Modify; + entry_ = ( ZipEntry )original.Clone(); + outEntry_ = ( ZipEntry )updated.Clone(); + */ + } + + public ZipUpdate(UpdateCommand command, ZipEntry entry) + { + command_ = command; + entry_ = (ZipEntry)entry.Clone(); + } + + /// + /// Copy an existing entry. + /// + /// The existing entry to copy. + public ZipUpdate(ZipEntry entry) + : this(UpdateCommand.Copy, entry) + { + // Do nothing. + } + + #endregion Constructors + + /// + /// Get the for this update. + /// + /// This is the source or original entry. + public ZipEntry Entry + { + get { return entry_; } + } + + /// + /// Get the that will be written to the updated/new file. + /// + public ZipEntry OutEntry + { + get + { + if (outEntry_ == null) + { + outEntry_ = (ZipEntry)entry_.Clone(); + } + + return outEntry_; + } + } + + /// + /// Get the command for this update. + /// + public UpdateCommand Command + { + get { return command_; } + } + + /// + /// Get the filename if any for this update. Null if none exists. + /// + public string Filename + { + get { return filename_; } + } + + /// + /// Get/set the location of the size patch for this update. + /// + public long SizePatchOffset + { + get { return sizePatchOffset_; } + set { sizePatchOffset_ = value; } + } + + /// + /// Get /set the location of the crc patch for this update. + /// + public long CrcPatchOffset + { + get { return crcPatchOffset_; } + set { crcPatchOffset_ = value; } + } + + /// + /// Get/set the size calculated by offset. + /// Specifically, the difference between this and next entry's starting offset. + /// + public long OffsetBasedSize + { + get { return _offsetBasedSize; } + set { _offsetBasedSize = value; } + } + + public Stream GetSource() + { + Stream result = null; + if (dataSource_ != null) + { + result = dataSource_.GetSource(); + } + + return result; + } + + #region Instance Fields + + private ZipEntry entry_; + private ZipEntry outEntry_; + private readonly UpdateCommand command_; + private IStaticDataSource dataSource_; + private readonly string filename_; + private long sizePatchOffset_ = -1; + private long crcPatchOffset_ = -1; + private long _offsetBasedSize = -1; + + #endregion Instance Fields + } + + #endregion ZipUpdate class + + #endregion Updating + + #region Disposing + + #region IDisposable Members + + void IDisposable.Dispose() + { + Close(); + } + + #endregion IDisposable Members + + private void DisposeInternal(bool disposing) + { + if (!isDisposed_) + { + isDisposed_ = true; + entries_ = new ZipEntry[0]; + + if (IsStreamOwner && (baseStream_ != null)) + { + lock (baseStream_) + { + baseStream_.Dispose(); + } + } + + PostUpdateCleanup(); + } + } + + /// + /// Releases the unmanaged resources used by the this instance and optionally releases the managed resources. + /// + /// true to release both managed and unmanaged resources; + /// false to release only unmanaged resources. + protected virtual void Dispose(bool disposing) + { + DisposeInternal(disposing); + } + + #endregion Disposing + + #region Internal routines + + #region Reading + + /// + /// Read an unsigned short in little endian byte order. + /// + /// Returns the value read. + /// + /// The stream ends prematurely + /// + private ushort ReadLEUshort() + { + int data1 = baseStream_.ReadByte(); + + if (data1 < 0) + { + throw new EndOfStreamException("End of stream"); + } + + int data2 = baseStream_.ReadByte(); + + if (data2 < 0) + { + throw new EndOfStreamException("End of stream"); + } + + return unchecked((ushort)((ushort)data1 | (ushort)(data2 << 8))); + } + + /// + /// Read a uint in little endian byte order. + /// + /// Returns the value read. + /// + /// An i/o error occurs. + /// + /// + /// The file ends prematurely + /// + private uint ReadLEUint() + { + return (uint)(ReadLEUshort() | (ReadLEUshort() << 16)); + } + + private ulong ReadLEUlong() + { + return ReadLEUint() | ((ulong)ReadLEUint() << 32); + } + + #endregion Reading + + // NOTE this returns the offset of the first byte after the signature. + private long LocateBlockWithSignature(int signature, long endLocation, int minimumBlockSize, int maximumVariableData) + { + using (ZipHelperStream les = new ZipHelperStream(baseStream_)) + { + return les.LocateBlockWithSignature(signature, endLocation, minimumBlockSize, maximumVariableData); + } + } + + /// + /// Search for and read the central directory of a zip file filling the entries array. + /// + /// + /// An i/o error occurs. + /// + /// + /// The central directory is malformed or cannot be found + /// + private void ReadEntries() + { + // Search for the End Of Central Directory. When a zip comment is + // present the directory will start earlier + // + // The search is limited to 64K which is the maximum size of a trailing comment field to aid speed. + // This should be compatible with both SFX and ZIP files but has only been tested for Zip files + // If a SFX file has the Zip data attached as a resource and there are other resources occurring later then + // this could be invalid. + // Could also speed this up by reading memory in larger blocks. + + if (baseStream_.CanSeek == false) + { + throw new ZipException("ZipFile stream must be seekable"); + } + + long locatedEndOfCentralDir = LocateBlockWithSignature(ZipConstants.EndOfCentralDirectorySignature, + baseStream_.Length, ZipConstants.EndOfCentralRecordBaseSize, 0xffff); + + if (locatedEndOfCentralDir < 0) + { + throw new ZipException("Cannot find central directory"); + } + + // Read end of central directory record + ushort thisDiskNumber = ReadLEUshort(); + ushort startCentralDirDisk = ReadLEUshort(); + ulong entriesForThisDisk = ReadLEUshort(); + ulong entriesForWholeCentralDir = ReadLEUshort(); + ulong centralDirSize = ReadLEUint(); + long offsetOfCentralDir = ReadLEUint(); + uint commentSize = ReadLEUshort(); + + if (commentSize > 0) + { + byte[] comment = new byte[commentSize]; + + StreamUtils.ReadFully(baseStream_, comment); + comment_ = ZipStrings.ConvertToString(comment); + } + else + { + comment_ = string.Empty; + } + + bool isZip64 = false; + bool requireZip64 = false; + + // Check if zip64 header information is required. + if ((thisDiskNumber == 0xffff) || + (startCentralDirDisk == 0xffff) || + (entriesForThisDisk == 0xffff) || + (entriesForWholeCentralDir == 0xffff) || + (centralDirSize == 0xffffffff) || + (offsetOfCentralDir == 0xffffffff)) + { + requireZip64 = true; + } + + // #357 - always check for the existance of the Zip64 central directory. + // #403 - Take account of the fixed size of the locator when searching. + // Subtract from locatedEndOfCentralDir so that the endLocation is the location of EndOfCentralDirectorySignature, + // rather than the data following the signature. + long locatedZip64EndOfCentralDirLocator = LocateBlockWithSignature( + ZipConstants.Zip64CentralDirLocatorSignature, + locatedEndOfCentralDir - 4, + ZipConstants.Zip64EndOfCentralDirectoryLocatorSize, + 0); + + if (locatedZip64EndOfCentralDirLocator < 0) + { + if (requireZip64) + { + // This is only an error in cases where the Zip64 directory is required. + throw new ZipException("Cannot find Zip64 locator"); + } + } + else + { + isZip64 = true; + + // number of the disk with the start of the zip64 end of central directory 4 bytes + // relative offset of the zip64 end of central directory record 8 bytes + // total number of disks 4 bytes + ReadLEUint(); // startDisk64 is not currently used + ulong offset64 = ReadLEUlong(); + uint totalDisks = ReadLEUint(); + + baseStream_.Position = (long)offset64; + long sig64 = ReadLEUint(); + + if (sig64 != ZipConstants.Zip64CentralFileHeaderSignature) + { + throw new ZipException(string.Format("Invalid Zip64 Central directory signature at {0:X}", offset64)); + } + + // NOTE: Record size = SizeOfFixedFields + SizeOfVariableData - 12. + ulong recordSize = ReadLEUlong(); + int versionMadeBy = ReadLEUshort(); + int versionToExtract = ReadLEUshort(); + uint thisDisk = ReadLEUint(); + uint centralDirDisk = ReadLEUint(); + entriesForThisDisk = ReadLEUlong(); + entriesForWholeCentralDir = ReadLEUlong(); + centralDirSize = ReadLEUlong(); + offsetOfCentralDir = (long)ReadLEUlong(); + + // NOTE: zip64 extensible data sector (variable size) is ignored. + } + + entries_ = new ZipEntry[entriesForThisDisk]; + + // SFX/embedded support, find the offset of the first entry vis the start of the stream + // This applies to Zip files that are appended to the end of an SFX stub. + // Or are appended as a resource to an executable. + // Zip files created by some archivers have the offsets altered to reflect the true offsets + // and so dont require any adjustment here... + // TODO: Difficulty with Zip64 and SFX offset handling needs resolution - maths? + if (!isZip64 && (offsetOfCentralDir < locatedEndOfCentralDir - (4 + (long)centralDirSize))) + { + offsetOfFirstEntry = locatedEndOfCentralDir - (4 + (long)centralDirSize + offsetOfCentralDir); + if (offsetOfFirstEntry <= 0) + { + throw new ZipException("Invalid embedded zip archive"); + } + } + + baseStream_.Seek(offsetOfFirstEntry + offsetOfCentralDir, SeekOrigin.Begin); + + for (ulong i = 0; i < entriesForThisDisk; i++) + { + if (ReadLEUint() != ZipConstants.CentralHeaderSignature) + { + throw new ZipException("Wrong Central Directory signature"); + } + + int versionMadeBy = ReadLEUshort(); + int versionToExtract = ReadLEUshort(); + int bitFlags = ReadLEUshort(); + int method = ReadLEUshort(); + uint dostime = ReadLEUint(); + uint crc = ReadLEUint(); + var csize = (long)ReadLEUint(); + var size = (long)ReadLEUint(); + int nameLen = ReadLEUshort(); + int extraLen = ReadLEUshort(); + int commentLen = ReadLEUshort(); + + int diskStartNo = ReadLEUshort(); // Not currently used + int internalAttributes = ReadLEUshort(); // Not currently used + + uint externalAttributes = ReadLEUint(); + long offset = ReadLEUint(); + + byte[] buffer = new byte[Math.Max(nameLen, commentLen)]; + + StreamUtils.ReadFully(baseStream_, buffer, 0, nameLen); + string name = ZipStrings.ConvertToStringExt(bitFlags, buffer, nameLen); + + var entry = new ZipEntry(name, versionToExtract, versionMadeBy, (CompressionMethod)method) + { + Crc = crc & 0xffffffffL, + Size = size & 0xffffffffL, + CompressedSize = csize & 0xffffffffL, + Flags = bitFlags, + DosTime = dostime, + ZipFileIndex = (long)i, + Offset = offset, + ExternalFileAttributes = (int)externalAttributes + }; + + if ((bitFlags & 8) == 0) + { + entry.CryptoCheckValue = (byte)(crc >> 24); + } + else + { + entry.CryptoCheckValue = (byte)((dostime >> 8) & 0xff); + } + + if (extraLen > 0) + { + byte[] extra = new byte[extraLen]; + StreamUtils.ReadFully(baseStream_, extra); + entry.ExtraData = extra; + } + + entry.ProcessExtraData(false); + + if (commentLen > 0) + { + StreamUtils.ReadFully(baseStream_, buffer, 0, commentLen); + entry.Comment = ZipStrings.ConvertToStringExt(bitFlags, buffer, commentLen); + } + + entries_[i] = entry; + } + } + + /// + /// Locate the data for a given entry. + /// + /// + /// The start offset of the data. + /// + /// + /// The stream ends prematurely + /// + /// + /// The local header signature is invalid, the entry and central header file name lengths are different + /// or the local and entry compression methods dont match + /// + private long LocateEntry(ZipEntry entry) + { + return TestLocalHeader(entry, HeaderTest.Extract); + } + + private Stream CreateAndInitDecryptionStream(Stream baseStream, ZipEntry entry) + { + CryptoStream result = null; + + if (entry.CompressionMethodForHeader == CompressionMethod.WinZipAES) + { + if (entry.Version >= ZipConstants.VERSION_AES) + { + // Issue #471 - accept an empty string as a password, but reject null. + OnKeysRequired(entry.Name); + if (rawPassword_ == null) + { + throw new ZipException("No password available for AES encrypted stream"); + } + int saltLen = entry.AESSaltLen; + byte[] saltBytes = new byte[saltLen]; + int saltIn = StreamUtils.ReadRequestedBytes(baseStream, saltBytes, 0, saltLen); + if (saltIn != saltLen) + throw new ZipException("AES Salt expected " + saltLen + " got " + saltIn); + // + byte[] pwdVerifyRead = new byte[2]; + StreamUtils.ReadFully(baseStream, pwdVerifyRead); + int blockSize = entry.AESKeySize / 8; // bits to bytes + + var decryptor = new ZipAESTransform(rawPassword_, saltBytes, blockSize, false); + byte[] pwdVerifyCalc = decryptor.PwdVerifier; + if (pwdVerifyCalc[0] != pwdVerifyRead[0] || pwdVerifyCalc[1] != pwdVerifyRead[1]) + throw new ZipException("Invalid password for AES"); + result = new ZipAESStream(baseStream, decryptor, CryptoStreamMode.Read); + } + else + { + throw new ZipException("Decryption method not supported"); + } + } + else + { + if ((entry.Version < ZipConstants.VersionStrongEncryption) + || (entry.Flags & (int)GeneralBitFlags.StrongEncryption) == 0) + { + var classicManaged = new PkzipClassicManaged(); + + OnKeysRequired(entry.Name); + if (HaveKeys == false) + { + throw new ZipException("No password available for encrypted stream"); + } + + result = new CryptoStream(baseStream, classicManaged.CreateDecryptor(key, null), CryptoStreamMode.Read); + CheckClassicPassword(result, entry); + } + else + { + // We don't support PKWare strong encryption + throw new ZipException("Decryption method not supported"); + } + } + + return result; + } + + private Stream CreateAndInitEncryptionStream(Stream baseStream, ZipEntry entry) + { + CryptoStream result = null; + if ((entry.Version < ZipConstants.VersionStrongEncryption) + || (entry.Flags & (int)GeneralBitFlags.StrongEncryption) == 0) + { + var classicManaged = new PkzipClassicManaged(); + + OnKeysRequired(entry.Name); + if (HaveKeys == false) + { + throw new ZipException("No password available for encrypted stream"); + } + + // Closing a CryptoStream will close the base stream as well so wrap it in an UncompressedStream + // which doesnt do this. + result = new CryptoStream(new UncompressedStream(baseStream), + classicManaged.CreateEncryptor(key, null), CryptoStreamMode.Write); + + if ((entry.Crc < 0) || (entry.Flags & 8) != 0) + { + WriteEncryptionHeader(result, entry.DosTime << 16); + } + else + { + WriteEncryptionHeader(result, entry.Crc); + } + } + return result; + } + + private static void CheckClassicPassword(CryptoStream classicCryptoStream, ZipEntry entry) + { + byte[] cryptbuffer = new byte[ZipConstants.CryptoHeaderSize]; + StreamUtils.ReadFully(classicCryptoStream, cryptbuffer); + if (cryptbuffer[ZipConstants.CryptoHeaderSize - 1] != entry.CryptoCheckValue) + { + throw new ZipException("Invalid password"); + } + } + + private static void WriteEncryptionHeader(Stream stream, long crcValue) + { + byte[] cryptBuffer = new byte[ZipConstants.CryptoHeaderSize]; + var rnd = new Random(); + rnd.NextBytes(cryptBuffer); + cryptBuffer[11] = (byte)(crcValue >> 24); + stream.Write(cryptBuffer, 0, cryptBuffer.Length); + } + + #endregion Internal routines + + #region Instance Fields + + private bool isDisposed_; + private string name_; + private string comment_; + private string rawPassword_; + private Stream baseStream_; + private bool isStreamOwner; + private long offsetOfFirstEntry; + private ZipEntry[] entries_; + private byte[] key; + private bool isNewArchive_; + + // Default is dynamic which is not backwards compatible and can cause problems + // with XP's built in compression which cant read Zip64 archives. + // However it does avoid the situation were a large file is added and cannot be completed correctly. + // Hint: Set always ZipEntry size before they are added to an archive and this setting isnt needed. + private UseZip64 useZip64_ = UseZip64.Dynamic; + + #region Zip Update Instance Fields + + private List updates_; + private long updateCount_; // Count is managed manually as updates_ can contain nulls! + private Dictionary updateIndex_; + private IArchiveStorage archiveStorage_; + private IDynamicDataSource updateDataSource_; + private bool contentsEdited_; + private int bufferSize_ = DefaultBufferSize; + private byte[] copyBuffer_; + private ZipString newComment_; + private bool commentEdited_; + private IEntryFactory updateEntryFactory_ = new ZipEntryFactory(); + + #endregion Zip Update Instance Fields + + #endregion Instance Fields + + #region Support Classes + + /// + /// Represents a string from a which is stored as an array of bytes. + /// + private class ZipString + { + #region Constructors + + /// + /// Initialise a with a string. + /// + /// The textual string form. + public ZipString(string comment) + { + comment_ = comment; + isSourceString_ = true; + } + + /// + /// Initialise a using a string in its binary 'raw' form. + /// + /// + public ZipString(byte[] rawString) + { + rawComment_ = rawString; + } + + #endregion Constructors + + /// + /// Get a value indicating the original source of data for this instance. + /// True if the source was a string; false if the source was binary data. + /// + public bool IsSourceString + { + get { return isSourceString_; } + } + + /// + /// Get the length of the comment when represented as raw bytes. + /// + public int RawLength + { + get + { + MakeBytesAvailable(); + return rawComment_.Length; + } + } + + /// + /// Get the comment in its 'raw' form as plain bytes. + /// + public byte[] RawComment + { + get + { + MakeBytesAvailable(); + return (byte[])rawComment_.Clone(); + } + } + + /// + /// Reset the comment to its initial state. + /// + public void Reset() + { + if (isSourceString_) + { + rawComment_ = null; + } + else + { + comment_ = null; + } + } + + private void MakeTextAvailable() + { + if (comment_ == null) + { + comment_ = ZipStrings.ConvertToString(rawComment_); + } + } + + private void MakeBytesAvailable() + { + if (rawComment_ == null) + { + rawComment_ = ZipStrings.ConvertToArray(comment_); + } + } + + /// + /// Implicit conversion of comment to a string. + /// + /// The to convert to a string. + /// The textual equivalent for the input value. + static public implicit operator string(ZipString zipString) + { + zipString.MakeTextAvailable(); + return zipString.comment_; + } + + #region Instance Fields + + private string comment_; + private byte[] rawComment_; + private readonly bool isSourceString_; + + #endregion Instance Fields + } + + /// + /// An enumerator for Zip entries + /// + private class ZipEntryEnumerator : IEnumerator + { + #region Constructors + + public ZipEntryEnumerator(ZipEntry[] entries) + { + array = entries; + } + + #endregion Constructors + + #region IEnumerator Members + + public object Current + { + get + { + return array[index]; + } + } + + public void Reset() + { + index = -1; + } + + public bool MoveNext() + { + return (++index < array.Length); + } + + #endregion IEnumerator Members + + #region Instance Fields + + private ZipEntry[] array; + private int index = -1; + + #endregion Instance Fields + } + + /// + /// An is a stream that you can write uncompressed data + /// to and flush, but cannot read, seek or do anything else to. + /// + private class UncompressedStream : Stream + { + #region Constructors + + public UncompressedStream(Stream baseStream) + { + baseStream_ = baseStream; + } + + #endregion Constructors + + /// + /// Gets a value indicating whether the current stream supports reading. + /// + public override bool CanRead + { + get + { + return false; + } + } + + /// + /// Write any buffered data to underlying storage. + /// + public override void Flush() + { + baseStream_.Flush(); + } + + /// + /// Gets a value indicating whether the current stream supports writing. + /// + public override bool CanWrite + { + get + { + return baseStream_.CanWrite; + } + } + + /// + /// Gets a value indicating whether the current stream supports seeking. + /// + public override bool CanSeek + { + get + { + return false; + } + } + + /// + /// Get the length in bytes of the stream. + /// + public override long Length + { + get + { + return 0; + } + } + + /// + /// Gets or sets the position within the current stream. + /// + public override long Position + { + get + { + return baseStream_.Position; + } + set + { + throw new NotImplementedException(); + } + } + + /// + /// Reads a sequence of bytes from the current stream and advances the position within the stream by the number of bytes read. + /// + /// An array of bytes. When this method returns, the buffer contains the specified byte array with the values between offset and (offset + count - 1) replaced by the bytes read from the current source. + /// The zero-based byte offset in buffer at which to begin storing the data read from the current stream. + /// The maximum number of bytes to be read from the current stream. + /// + /// The total number of bytes read into the buffer. This can be less than the number of bytes requested if that many bytes are not currently available, or zero (0) if the end of the stream has been reached. + /// + /// The sum of offset and count is larger than the buffer length. + /// Methods were called after the stream was closed. + /// The stream does not support reading. + /// buffer is null. + /// An I/O error occurs. + /// offset or count is negative. + public override int Read(byte[] buffer, int offset, int count) + { + return 0; + } + + /// + /// Sets the position within the current stream. + /// + /// A byte offset relative to the origin parameter. + /// A value of type indicating the reference point used to obtain the new position. + /// + /// The new position within the current stream. + /// + /// An I/O error occurs. + /// The stream does not support seeking, such as if the stream is constructed from a pipe or console output. + /// Methods were called after the stream was closed. + public override long Seek(long offset, SeekOrigin origin) + { + return 0; + } + + /// + /// Sets the length of the current stream. + /// + /// The desired length of the current stream in bytes. + /// The stream does not support both writing and seeking, such as if the stream is constructed from a pipe or console output. + /// An I/O error occurs. + /// Methods were called after the stream was closed. + public override void SetLength(long value) + { + } + + /// + /// Writes a sequence of bytes to the current stream and advances the current position within this stream by the number of bytes written. + /// + /// An array of bytes. This method copies count bytes from buffer to the current stream. + /// The zero-based byte offset in buffer at which to begin copying bytes to the current stream. + /// The number of bytes to be written to the current stream. + /// An I/O error occurs. + /// The stream does not support writing. + /// Methods were called after the stream was closed. + /// buffer is null. + /// The sum of offset and count is greater than the buffer length. + /// offset or count is negative. + public override void Write(byte[] buffer, int offset, int count) + { + baseStream_.Write(buffer, offset, count); + } + + private readonly + + #region Instance Fields + + Stream baseStream_; + + #endregion Instance Fields + } + + /// + /// A is an + /// whose data is only a part or subsection of a file. + /// + private class PartialInputStream : Stream + { + #region Constructors + + /// + /// Initialise a new instance of the class. + /// + /// The containing the underlying stream to use for IO. + /// The start of the partial data. + /// The length of the partial data. + public PartialInputStream(ZipFile zipFile, long start, long length) + { + start_ = start; + length_ = length; + + // Although this is the only time the zipfile is used + // keeping a reference here prevents premature closure of + // this zip file and thus the baseStream_. + + // Code like this will cause apparently random failures depending + // on the size of the files and when garbage is collected. + // + // ZipFile z = new ZipFile (stream); + // Stream reader = z.GetInputStream(0); + // uses reader here.... + zipFile_ = zipFile; + baseStream_ = zipFile_.baseStream_; + readPos_ = start; + end_ = start + length; + } + + #endregion Constructors + + /// + /// Read a byte from this stream. + /// + /// Returns the byte read or -1 on end of stream. + public override int ReadByte() + { + if (readPos_ >= end_) + { + // -1 is the correct value at end of stream. + return -1; + } + + lock (baseStream_) + { + baseStream_.Seek(readPos_++, SeekOrigin.Begin); + return baseStream_.ReadByte(); + } + } + + /// + /// Reads a sequence of bytes from the current stream and advances the position within the stream by the number of bytes read. + /// + /// An array of bytes. When this method returns, the buffer contains the specified byte array with the values between offset and (offset + count - 1) replaced by the bytes read from the current source. + /// The zero-based byte offset in buffer at which to begin storing the data read from the current stream. + /// The maximum number of bytes to be read from the current stream. + /// + /// The total number of bytes read into the buffer. This can be less than the number of bytes requested if that many bytes are not currently available, or zero (0) if the end of the stream has been reached. + /// + /// The sum of offset and count is larger than the buffer length. + /// Methods were called after the stream was closed. + /// The stream does not support reading. + /// buffer is null. + /// An I/O error occurs. + /// offset or count is negative. + public override int Read(byte[] buffer, int offset, int count) + { + lock (baseStream_) + { + if (count > end_ - readPos_) + { + count = (int)(end_ - readPos_); + if (count == 0) + { + return 0; + } + } + // Protect against Stream implementations that throw away their buffer on every Seek + // (for example, Mono FileStream) + if (baseStream_.Position != readPos_) + { + baseStream_.Seek(readPos_, SeekOrigin.Begin); + } + int readCount = baseStream_.Read(buffer, offset, count); + if (readCount > 0) + { + readPos_ += readCount; + } + return readCount; + } + } + + /// + /// Writes a sequence of bytes to the current stream and advances the current position within this stream by the number of bytes written. + /// + /// An array of bytes. This method copies count bytes from buffer to the current stream. + /// The zero-based byte offset in buffer at which to begin copying bytes to the current stream. + /// The number of bytes to be written to the current stream. + /// An I/O error occurs. + /// The stream does not support writing. + /// Methods were called after the stream was closed. + /// buffer is null. + /// The sum of offset and count is greater than the buffer length. + /// offset or count is negative. + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + /// + /// When overridden in a derived class, sets the length of the current stream. + /// + /// The desired length of the current stream in bytes. + /// The stream does not support both writing and seeking, such as if the stream is constructed from a pipe or console output. + /// An I/O error occurs. + /// Methods were called after the stream was closed. + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// + /// When overridden in a derived class, sets the position within the current stream. + /// + /// A byte offset relative to the origin parameter. + /// A value of type indicating the reference point used to obtain the new position. + /// + /// The new position within the current stream. + /// + /// An I/O error occurs. + /// The stream does not support seeking, such as if the stream is constructed from a pipe or console output. + /// Methods were called after the stream was closed. + public override long Seek(long offset, SeekOrigin origin) + { + long newPos = readPos_; + + switch (origin) + { + case SeekOrigin.Begin: + newPos = start_ + offset; + break; + + case SeekOrigin.Current: + newPos = readPos_ + offset; + break; + + case SeekOrigin.End: + newPos = end_ + offset; + break; + } + + if (newPos < start_) + { + throw new ArgumentException("Negative position is invalid"); + } + + if (newPos > end_) + { + throw new IOException("Cannot seek past end"); + } + readPos_ = newPos; + return readPos_; + } + + /// + /// Clears all buffers for this stream and causes any buffered data to be written to the underlying device. + /// + /// An I/O error occurs. + public override void Flush() + { + // Nothing to do. + } + + /// + /// Gets or sets the position within the current stream. + /// + /// + /// The current position within the stream. + /// An I/O error occurs. + /// The stream does not support seeking. + /// Methods were called after the stream was closed. + public override long Position + { + get { return readPos_ - start_; } + set + { + long newPos = start_ + value; + + if (newPos < start_) + { + throw new ArgumentException("Negative position is invalid"); + } + + if (newPos > end_) + { + throw new InvalidOperationException("Cannot seek past end"); + } + readPos_ = newPos; + } + } + + /// + /// Gets the length in bytes of the stream. + /// + /// + /// A long value representing the length of the stream in bytes. + /// A class derived from Stream does not support seeking. + /// Methods were called after the stream was closed. + public override long Length + { + get { return length_; } + } + + /// + /// Gets a value indicating whether the current stream supports writing. + /// + /// false + /// true if the stream supports writing; otherwise, false. + public override bool CanWrite + { + get { return false; } + } + + /// + /// Gets a value indicating whether the current stream supports seeking. + /// + /// true + /// true if the stream supports seeking; otherwise, false. + public override bool CanSeek + { + get { return true; } + } + + /// + /// Gets a value indicating whether the current stream supports reading. + /// + /// true. + /// true if the stream supports reading; otherwise, false. + public override bool CanRead + { + get { return true; } + } + + /// + /// Gets a value that determines whether the current stream can time out. + /// + /// + /// A value that determines whether the current stream can time out. + public override bool CanTimeout + { + get { return baseStream_.CanTimeout; } + } + + #region Instance Fields + + private ZipFile zipFile_; + private Stream baseStream_; + private readonly long start_; + private readonly long length_; + private long readPos_; + private readonly long end_; + + #endregion Instance Fields + } + + #endregion Support Classes + } + + #endregion ZipFile Class + + #region DataSources + + /// + /// Provides a static way to obtain a source of data for an entry. + /// + public interface IStaticDataSource + { + /// + /// Get a source of data by creating a new stream. + /// + /// Returns a to use for compression input. + /// Ideally a new stream is created and opened to achieve this, to avoid locking problems. + Stream GetSource(); + } + + /// + /// Represents a source of data that can dynamically provide + /// multiple data sources based on the parameters passed. + /// + public interface IDynamicDataSource + { + /// + /// Get a data source. + /// + /// The to get a source for. + /// The name for data if known. + /// Returns a to use for compression input. + /// Ideally a new stream is created and opened to achieve this, to avoid locking problems. + Stream GetSource(ZipEntry entry, string name); + } + + /// + /// Default implementation of a for use with files stored on disk. + /// + public class StaticDiskDataSource : IStaticDataSource + { + /// + /// Initialise a new instance of + /// + /// The name of the file to obtain data from. + public StaticDiskDataSource(string fileName) + { + fileName_ = fileName; + } + + #region IDataSource Members + + /// + /// Get a providing data. + /// + /// Returns a providing data. + public Stream GetSource() + { + return File.Open(fileName_, FileMode.Open, FileAccess.Read, FileShare.Read); + } + + private readonly + + #endregion IDataSource Members + + #region Instance Fields + + string fileName_; + + #endregion Instance Fields + } + + /// + /// Default implementation of for files stored on disk. + /// + public class DynamicDiskDataSource : IDynamicDataSource + { + #region IDataSource Members + + /// + /// Get a providing data for an entry. + /// + /// The entry to provide data for. + /// The file name for data if known. + /// Returns a stream providing data; or null if not available + public Stream GetSource(ZipEntry entry, string name) + { + Stream result = null; + + if (name != null) + { + result = File.Open(name, FileMode.Open, FileAccess.Read, FileShare.Read); + } + + return result; + } + + #endregion IDataSource Members + } + + #endregion DataSources + + #region Archive Storage + + /// + /// Defines facilities for data storage when updating Zip Archives. + /// + public interface IArchiveStorage + { + /// + /// Get the to apply during updates. + /// + FileUpdateMode UpdateMode { get; } + + /// + /// Get an empty that can be used for temporary output. + /// + /// Returns a temporary output + /// + Stream GetTemporaryOutput(); + + /// + /// Convert a temporary output stream to a final stream. + /// + /// The resulting final + /// + Stream ConvertTemporaryToFinal(); + + /// + /// Make a temporary copy of the original stream. + /// + /// The to copy. + /// Returns a temporary output that is a copy of the input. + Stream MakeTemporaryCopy(Stream stream); + + /// + /// Return a stream suitable for performing direct updates on the original source. + /// + /// The current stream. + /// Returns a stream suitable for direct updating. + /// This may be the current stream passed. + Stream OpenForDirectUpdate(Stream stream); + + /// + /// Dispose of this instance. + /// + void Dispose(); + } + + /// + /// An abstract suitable for extension by inheritance. + /// + abstract public class BaseArchiveStorage : IArchiveStorage + { + #region Constructors + + /// + /// Initializes a new instance of the class. + /// + /// The update mode. + protected BaseArchiveStorage(FileUpdateMode updateMode) + { + updateMode_ = updateMode; + } + + #endregion Constructors + + #region IArchiveStorage Members + + /// + /// Gets a temporary output + /// + /// Returns the temporary output stream. + /// + public abstract Stream GetTemporaryOutput(); + + /// + /// Converts the temporary to its final form. + /// + /// Returns a that can be used to read + /// the final storage for the archive. + /// + public abstract Stream ConvertTemporaryToFinal(); + + /// + /// Make a temporary copy of a . + /// + /// The to make a copy of. + /// Returns a temporary output that is a copy of the input. + public abstract Stream MakeTemporaryCopy(Stream stream); + + /// + /// Return a stream suitable for performing direct updates on the original source. + /// + /// The to open for direct update. + /// Returns a stream suitable for direct updating. + public abstract Stream OpenForDirectUpdate(Stream stream); + + /// + /// Disposes this instance. + /// + public abstract void Dispose(); + + /// + /// Gets the update mode applicable. + /// + /// The update mode. + public FileUpdateMode UpdateMode + { + get + { + return updateMode_; + } + } + + #endregion IArchiveStorage Members + + #region Instance Fields + + private readonly FileUpdateMode updateMode_; + + #endregion Instance Fields + } + + /// + /// An implementation suitable for hard disks. + /// + public class DiskArchiveStorage : BaseArchiveStorage + { + #region Constructors + + /// + /// Initializes a new instance of the class. + /// + /// The file. + /// The update mode. + public DiskArchiveStorage(ZipFile file, FileUpdateMode updateMode) + : base(updateMode) + { + if (file.Name == null) + { + throw new ZipException("Cant handle non file archives"); + } + + fileName_ = file.Name; + } + + /// + /// Initializes a new instance of the class. + /// + /// The file. + public DiskArchiveStorage(ZipFile file) + : this(file, FileUpdateMode.Safe) + { + } + + #endregion Constructors + + #region IArchiveStorage Members + + /// + /// Gets a temporary output for performing updates on. + /// + /// Returns the temporary output stream. + public override Stream GetTemporaryOutput() + { + if (temporaryName_ != null) + { + temporaryName_ = GetTempFileName(temporaryName_, true); + temporaryStream_ = File.Open(temporaryName_, FileMode.OpenOrCreate, FileAccess.Write, FileShare.None); + } + else + { + // Determine where to place files based on internal strategy. + // Currently this is always done in system temp directory. + temporaryName_ = Path.GetTempFileName(); + temporaryStream_ = File.Open(temporaryName_, FileMode.OpenOrCreate, FileAccess.Write, FileShare.None); + } + + return temporaryStream_; + } + + /// + /// Converts a temporary to its final form. + /// + /// Returns a that can be used to read + /// the final storage for the archive. + public override Stream ConvertTemporaryToFinal() + { + if (temporaryStream_ == null) + { + throw new ZipException("No temporary stream has been created"); + } + + Stream result = null; + + string moveTempName = GetTempFileName(fileName_, false); + bool newFileCreated = false; + + try + { + temporaryStream_.Dispose(); + File.Move(fileName_, moveTempName); + File.Move(temporaryName_, fileName_); + newFileCreated = true; + File.Delete(moveTempName); + + result = File.Open(fileName_, FileMode.Open, FileAccess.Read, FileShare.Read); + } + catch (Exception) + { + result = null; + + // Try to roll back changes... + if (!newFileCreated) + { + File.Move(moveTempName, fileName_); + File.Delete(temporaryName_); + } + + throw; + } + + return result; + } + + /// + /// Make a temporary copy of a stream. + /// + /// The to copy. + /// Returns a temporary output that is a copy of the input. + public override Stream MakeTemporaryCopy(Stream stream) + { + stream.Dispose(); + + temporaryName_ = GetTempFileName(fileName_, true); + File.Copy(fileName_, temporaryName_, true); + + temporaryStream_ = new FileStream(temporaryName_, + FileMode.Open, + FileAccess.ReadWrite); + return temporaryStream_; + } + + /// + /// Return a stream suitable for performing direct updates on the original source. + /// + /// The current stream. + /// Returns a stream suitable for direct updating. + /// If the is not null this is used as is. + public override Stream OpenForDirectUpdate(Stream stream) + { + Stream result; + if ((stream == null) || !stream.CanWrite) + { + if (stream != null) + { + stream.Dispose(); + } + + result = new FileStream(fileName_, + FileMode.Open, + FileAccess.ReadWrite); + } + else + { + result = stream; + } + + return result; + } + + /// + /// Disposes this instance. + /// + public override void Dispose() + { + if (temporaryStream_ != null) + { + temporaryStream_.Dispose(); + } + } + + #endregion IArchiveStorage Members + + #region Internal routines + + private static string GetTempFileName(string original, bool makeTempFile) + { + string result = null; + + if (original == null) + { + result = Path.GetTempFileName(); + } + else + { + int counter = 0; + int suffixSeed = DateTime.Now.Second; + + while (result == null) + { + counter += 1; + string newName = string.Format("{0}.{1}{2}.tmp", original, suffixSeed, counter); + if (!File.Exists(newName)) + { + if (makeTempFile) + { + try + { + // Try and create the file. + using (FileStream stream = File.Create(newName)) + { + } + result = newName; + } + catch + { + suffixSeed = DateTime.Now.Second; + } + } + else + { + result = newName; + } + } + } + } + return result; + } + + #endregion Internal routines + + #region Instance Fields + + private Stream temporaryStream_; + private readonly string fileName_; + private string temporaryName_; + + #endregion Instance Fields + } + + /// + /// An implementation suitable for in memory streams. + /// + public class MemoryArchiveStorage : BaseArchiveStorage + { + #region Constructors + + /// + /// Initializes a new instance of the class. + /// + public MemoryArchiveStorage() + : base(FileUpdateMode.Direct) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// The to use + /// This constructor is for testing as memory streams dont really require safe mode. + public MemoryArchiveStorage(FileUpdateMode updateMode) + : base(updateMode) + { + } + + #endregion Constructors + + #region Properties + + /// + /// Get the stream returned by if this was in fact called. + /// + public MemoryStream FinalStream + { + get { return finalStream_; } + } + + #endregion Properties + + #region IArchiveStorage Members + + /// + /// Gets the temporary output + /// + /// Returns the temporary output stream. + public override Stream GetTemporaryOutput() + { + temporaryStream_ = new MemoryStream(); + return temporaryStream_; + } + + /// + /// Converts the temporary to its final form. + /// + /// Returns a that can be used to read + /// the final storage for the archive. + public override Stream ConvertTemporaryToFinal() + { + if (temporaryStream_ == null) + { + throw new ZipException("No temporary stream has been created"); + } + + finalStream_ = new MemoryStream(temporaryStream_.ToArray()); + return finalStream_; + } + + /// + /// Make a temporary copy of the original stream. + /// + /// The to copy. + /// Returns a temporary output that is a copy of the input. + public override Stream MakeTemporaryCopy(Stream stream) + { + temporaryStream_ = new MemoryStream(); + stream.Position = 0; + StreamUtils.Copy(stream, temporaryStream_, new byte[4096]); + return temporaryStream_; + } + + /// + /// Return a stream suitable for performing direct updates on the original source. + /// + /// The original source stream + /// Returns a stream suitable for direct updating. + /// If the passed is not null this is used; + /// otherwise a new is returned. + public override Stream OpenForDirectUpdate(Stream stream) + { + Stream result; + if ((stream == null) || !stream.CanWrite) + { + result = new MemoryStream(); + + if (stream != null) + { + stream.Position = 0; + StreamUtils.Copy(stream, result, new byte[4096]); + + stream.Dispose(); + } + } + else + { + result = stream; + } + + return result; + } + + /// + /// Disposes this instance. + /// + public override void Dispose() + { + if (temporaryStream_ != null) + { + temporaryStream_.Dispose(); + } + } + + #endregion IArchiveStorage Members + + #region Instance Fields + + private MemoryStream temporaryStream_; + private MemoryStream finalStream_; + + #endregion Instance Fields + } + + #endregion Archive Storage +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs.meta new file mode 100644 index 0000000..816055f --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipFile.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 645a2965a39dd4ad08e8abe497056cf0 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs new file mode 100644 index 0000000..dd7d25d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs @@ -0,0 +1,629 @@ +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// Holds data pertinent to a data descriptor. + /// + public class DescriptorData + { + /// + /// Get /set the compressed size of data. + /// + public long CompressedSize + { + get { return compressedSize; } + set { compressedSize = value; } + } + + /// + /// Get / set the uncompressed size of data + /// + public long Size + { + get { return size; } + set { size = value; } + } + + /// + /// Get /set the crc value. + /// + public long Crc + { + get { return crc; } + set { crc = (value & 0xffffffff); } + } + + #region Instance Fields + + private long size; + private long compressedSize; + private long crc; + + #endregion Instance Fields + } + + internal class EntryPatchData + { + public long SizePatchOffset + { + get { return sizePatchOffset_; } + set { sizePatchOffset_ = value; } + } + + public long CrcPatchOffset + { + get { return crcPatchOffset_; } + set { crcPatchOffset_ = value; } + } + + #region Instance Fields + + private long sizePatchOffset_; + private long crcPatchOffset_; + + #endregion Instance Fields + } + + /// + /// This class assists with writing/reading from Zip files. + /// + internal class ZipHelperStream : Stream + { + #region Constructors + + /// + /// Initialise an instance of this class. + /// + /// The name of the file to open. + public ZipHelperStream(string name) + { + stream_ = new FileStream(name, FileMode.Open, FileAccess.ReadWrite); + isOwner_ = true; + } + + /// + /// Initialise a new instance of . + /// + /// The stream to use. + public ZipHelperStream(Stream stream) + { + stream_ = stream; + } + + #endregion Constructors + + /// + /// Get / set a value indicating whether the underlying stream is owned or not. + /// + /// If the stream is owned it is closed when this instance is closed. + public bool IsStreamOwner + { + get { return isOwner_; } + set { isOwner_ = value; } + } + + #region Base Stream Methods + + public override bool CanRead + { + get { return stream_.CanRead; } + } + + public override bool CanSeek + { + get { return stream_.CanSeek; } + } + + public override bool CanTimeout + { + get { return stream_.CanTimeout; } + } + + public override long Length + { + get { return stream_.Length; } + } + + public override long Position + { + get { return stream_.Position; } + set { stream_.Position = value; } + } + + public override bool CanWrite + { + get { return stream_.CanWrite; } + } + + public override void Flush() + { + stream_.Flush(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + return stream_.Seek(offset, origin); + } + + public override void SetLength(long value) + { + stream_.SetLength(value); + } + + public override int Read(byte[] buffer, int offset, int count) + { + return stream_.Read(buffer, offset, count); + } + + public override void Write(byte[] buffer, int offset, int count) + { + stream_.Write(buffer, offset, count); + } + + /// + /// Close the stream. + /// + /// + /// The underlying stream is closed only if is true. + /// + protected override void Dispose(bool disposing) + { + Stream toClose = stream_; + stream_ = null; + if (isOwner_ && (toClose != null)) + { + isOwner_ = false; + toClose.Dispose(); + } + } + + #endregion Base Stream Methods + + // Write the local file header + // TODO: ZipHelperStream.WriteLocalHeader is not yet used and needs checking for ZipFile and ZipOuptutStream usage + private void WriteLocalHeader(ZipEntry entry, EntryPatchData patchData) + { + CompressionMethod method = entry.CompressionMethod; + bool headerInfoAvailable = true; // How to get this? + bool patchEntryHeader = false; + + WriteLEInt(ZipConstants.LocalHeaderSignature); + + WriteLEShort(entry.Version); + WriteLEShort(entry.Flags); + WriteLEShort((byte)method); + WriteLEInt((int)entry.DosTime); + + if (headerInfoAvailable == true) + { + WriteLEInt((int)entry.Crc); + if (entry.LocalHeaderRequiresZip64) + { + WriteLEInt(-1); + WriteLEInt(-1); + } + else + { + WriteLEInt(entry.IsCrypted ? (int)entry.CompressedSize + ZipConstants.CryptoHeaderSize : (int)entry.CompressedSize); + WriteLEInt((int)entry.Size); + } + } + else + { + if (patchData != null) + { + patchData.CrcPatchOffset = stream_.Position; + } + WriteLEInt(0); // Crc + + if (patchData != null) + { + patchData.SizePatchOffset = stream_.Position; + } + + // For local header both sizes appear in Zip64 Extended Information + if (entry.LocalHeaderRequiresZip64 && patchEntryHeader) + { + WriteLEInt(-1); + WriteLEInt(-1); + } + else + { + WriteLEInt(0); // Compressed size + WriteLEInt(0); // Uncompressed size + } + } + + byte[] name = ZipStrings.ConvertToArray(entry.Flags, entry.Name); + + if (name.Length > 0xFFFF) + { + throw new ZipException("Entry name too long."); + } + + var ed = new ZipExtraData(entry.ExtraData); + + if (entry.LocalHeaderRequiresZip64 && (headerInfoAvailable || patchEntryHeader)) + { + ed.StartNewEntry(); + if (headerInfoAvailable) + { + ed.AddLeLong(entry.Size); + ed.AddLeLong(entry.CompressedSize); + } + else + { + ed.AddLeLong(-1); + ed.AddLeLong(-1); + } + ed.AddNewEntry(1); + + if (!ed.Find(1)) + { + throw new ZipException("Internal error cant find extra data"); + } + + if (patchData != null) + { + patchData.SizePatchOffset = ed.CurrentReadIndex; + } + } + else + { + ed.Delete(1); + } + + byte[] extra = ed.GetEntryData(); + + WriteLEShort(name.Length); + WriteLEShort(extra.Length); + + if (name.Length > 0) + { + stream_.Write(name, 0, name.Length); + } + + if (entry.LocalHeaderRequiresZip64 && patchEntryHeader) + { + patchData.SizePatchOffset += stream_.Position; + } + + if (extra.Length > 0) + { + stream_.Write(extra, 0, extra.Length); + } + } + + /// + /// Locates a block with the desired . + /// + /// The signature to find. + /// Location, marking the end of block. + /// Minimum size of the block. + /// The maximum variable data. + /// Returns the offset of the first byte after the signature; -1 if not found + public long LocateBlockWithSignature(int signature, long endLocation, int minimumBlockSize, int maximumVariableData) + { + long pos = endLocation - minimumBlockSize; + if (pos < 0) + { + return -1; + } + + long giveUpMarker = Math.Max(pos - maximumVariableData, 0); + + // TODO: This loop could be optimised for speed. + do + { + if (pos < giveUpMarker) + { + return -1; + } + Seek(pos--, SeekOrigin.Begin); + } while (ReadLEInt() != signature); + + return Position; + } + + /// + /// Write Zip64 end of central directory records (File header and locator). + /// + /// The number of entries in the central directory. + /// The size of entries in the central directory. + /// The offset of the central directory. + public void WriteZip64EndOfCentralDirectory(long noOfEntries, long sizeEntries, long centralDirOffset) + { + long centralSignatureOffset = centralDirOffset + sizeEntries; + WriteLEInt(ZipConstants.Zip64CentralFileHeaderSignature); + WriteLELong(44); // Size of this record (total size of remaining fields in header or full size - 12) + WriteLEShort(ZipConstants.VersionMadeBy); // Version made by + WriteLEShort(ZipConstants.VersionZip64); // Version to extract + WriteLEInt(0); // Number of this disk + WriteLEInt(0); // number of the disk with the start of the central directory + WriteLELong(noOfEntries); // No of entries on this disk + WriteLELong(noOfEntries); // Total No of entries in central directory + WriteLELong(sizeEntries); // Size of the central directory + WriteLELong(centralDirOffset); // offset of start of central directory + // zip64 extensible data sector not catered for here (variable size) + + // Write the Zip64 end of central directory locator + WriteLEInt(ZipConstants.Zip64CentralDirLocatorSignature); + + // no of the disk with the start of the zip64 end of central directory + WriteLEInt(0); + + // relative offset of the zip64 end of central directory record + WriteLELong(centralSignatureOffset); + + // total number of disks + WriteLEInt(1); + } + + /// + /// Write the required records to end the central directory. + /// + /// The number of entries in the directory. + /// The size of the entries in the directory. + /// The start of the central directory. + /// The archive comment. (This can be null). + public void WriteEndOfCentralDirectory(long noOfEntries, long sizeEntries, + long startOfCentralDirectory, byte[] comment) + { + if ((noOfEntries >= 0xffff) || + (startOfCentralDirectory >= 0xffffffff) || + (sizeEntries >= 0xffffffff)) + { + WriteZip64EndOfCentralDirectory(noOfEntries, sizeEntries, startOfCentralDirectory); + } + + WriteLEInt(ZipConstants.EndOfCentralDirectorySignature); + + // TODO: ZipFile Multi disk handling not done + WriteLEShort(0); // number of this disk + WriteLEShort(0); // no of disk with start of central dir + + // Number of entries + if (noOfEntries >= 0xffff) + { + WriteLEUshort(0xffff); // Zip64 marker + WriteLEUshort(0xffff); + } + else + { + WriteLEShort((short)noOfEntries); // entries in central dir for this disk + WriteLEShort((short)noOfEntries); // total entries in central directory + } + + // Size of the central directory + if (sizeEntries >= 0xffffffff) + { + WriteLEUint(0xffffffff); // Zip64 marker + } + else + { + WriteLEInt((int)sizeEntries); + } + + // offset of start of central directory + if (startOfCentralDirectory >= 0xffffffff) + { + WriteLEUint(0xffffffff); // Zip64 marker + } + else + { + WriteLEInt((int)startOfCentralDirectory); + } + + int commentLength = (comment != null) ? comment.Length : 0; + + if (commentLength > 0xffff) + { + throw new ZipException(string.Format("Comment length({0}) is too long can only be 64K", commentLength)); + } + + WriteLEShort(commentLength); + + if (commentLength > 0) + { + Write(comment, 0, comment.Length); + } + } + + #region LE value reading/writing + + /// + /// Read an unsigned short in little endian byte order. + /// + /// Returns the value read. + /// + /// An i/o error occurs. + /// + /// + /// The file ends prematurely + /// + public int ReadLEShort() + { + int byteValue1 = stream_.ReadByte(); + + if (byteValue1 < 0) + { + throw new EndOfStreamException(); + } + + int byteValue2 = stream_.ReadByte(); + if (byteValue2 < 0) + { + throw new EndOfStreamException(); + } + + return byteValue1 | (byteValue2 << 8); + } + + /// + /// Read an int in little endian byte order. + /// + /// Returns the value read. + /// + /// An i/o error occurs. + /// + /// + /// The file ends prematurely + /// + public int ReadLEInt() + { + return ReadLEShort() | (ReadLEShort() << 16); + } + + /// + /// Read a long in little endian byte order. + /// + /// The value read. + public long ReadLELong() + { + return (uint)ReadLEInt() | ((long)ReadLEInt() << 32); + } + + /// + /// Write an unsigned short in little endian byte order. + /// + /// The value to write. + public void WriteLEShort(int value) + { + stream_.WriteByte((byte)(value & 0xff)); + stream_.WriteByte((byte)((value >> 8) & 0xff)); + } + + /// + /// Write a ushort in little endian byte order. + /// + /// The value to write. + public void WriteLEUshort(ushort value) + { + stream_.WriteByte((byte)(value & 0xff)); + stream_.WriteByte((byte)(value >> 8)); + } + + /// + /// Write an int in little endian byte order. + /// + /// The value to write. + public void WriteLEInt(int value) + { + WriteLEShort(value); + WriteLEShort(value >> 16); + } + + /// + /// Write a uint in little endian byte order. + /// + /// The value to write. + public void WriteLEUint(uint value) + { + WriteLEUshort((ushort)(value & 0xffff)); + WriteLEUshort((ushort)(value >> 16)); + } + + /// + /// Write a long in little endian byte order. + /// + /// The value to write. + public void WriteLELong(long value) + { + WriteLEInt((int)value); + WriteLEInt((int)(value >> 32)); + } + + /// + /// Write a ulong in little endian byte order. + /// + /// The value to write. + public void WriteLEUlong(ulong value) + { + WriteLEUint((uint)(value & 0xffffffff)); + WriteLEUint((uint)(value >> 32)); + } + + #endregion LE value reading/writing + + /// + /// Write a data descriptor. + /// + /// The entry to write a descriptor for. + /// Returns the number of descriptor bytes written. + public int WriteDataDescriptor(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + int result = 0; + + // Add data descriptor if flagged as required + if ((entry.Flags & (int)GeneralBitFlags.Descriptor) != 0) + { + // The signature is not PKZIP originally but is now described as optional + // in the PKZIP Appnote documenting trhe format. + WriteLEInt(ZipConstants.DataDescriptorSignature); + WriteLEInt(unchecked((int)(entry.Crc))); + + result += 8; + + if (entry.LocalHeaderRequiresZip64) + { + WriteLELong(entry.CompressedSize); + WriteLELong(entry.Size); + result += 16; + } + else + { + WriteLEInt((int)entry.CompressedSize); + WriteLEInt((int)entry.Size); + result += 8; + } + } + + return result; + } + + /// + /// Read data descriptor at the end of compressed data. + /// + /// if set to true [zip64]. + /// The data to fill in. + /// Returns the number of bytes read in the descriptor. + public void ReadDataDescriptor(bool zip64, DescriptorData data) + { + int intValue = ReadLEInt(); + + // In theory this may not be a descriptor according to PKZIP appnote. + // In practise its always there. + if (intValue != ZipConstants.DataDescriptorSignature) + { + throw new ZipException("Data descriptor signature not found"); + } + + data.Crc = ReadLEInt(); + + if (zip64) + { + data.CompressedSize = ReadLELong(); + data.Size = ReadLELong(); + } + else + { + data.CompressedSize = ReadLEInt(); + data.Size = ReadLEInt(); + } + } + + #region Instance Fields + + private bool isOwner_; + private Stream stream_; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs.meta new file mode 100644 index 0000000..e662add --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipHelperStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 511adc13eff874baf8fe157b49d6bc33 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs new file mode 100644 index 0000000..147d404 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs @@ -0,0 +1,693 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Encryption; +using ICSharpCode.SharpZipLib.Zip.Compression; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// This is an InflaterInputStream that reads the files baseInputStream an zip archive + /// one after another. It has a special method to get the zip entry of + /// the next file. The zip entry contains information about the file name + /// size, compressed size, Crc, etc. + /// It includes support for Stored and Deflated entries. + ///
+ ///
Author of the original java version : Jochen Hoenicke + ///
+ /// + /// This sample shows how to read a zip file + /// + /// using System; + /// using System.Text; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.Zip; + /// + /// class MainClass + /// { + /// public static void Main(string[] args) + /// { + /// using ( ZipInputStream s = new ZipInputStream(File.OpenRead(args[0]))) { + /// + /// ZipEntry theEntry; + /// const int size = 2048; + /// byte[] data = new byte[2048]; + /// + /// while ((theEntry = s.GetNextEntry()) != null) { + /// if ( entry.IsFile ) { + /// Console.Write("Show contents (y/n) ?"); + /// if (Console.ReadLine() == "y") { + /// while (true) { + /// size = s.Read(data, 0, data.Length); + /// if (size > 0) { + /// Console.Write(new ASCIIEncoding().GetString(data, 0, size)); + /// } else { + /// break; + /// } + /// } + /// } + /// } + /// } + /// } + /// } + /// } + /// + /// + public class ZipInputStream : InflaterInputStream + { + #region Instance Fields + + /// + /// Delegate for reading bytes from a stream. + /// + private delegate int ReadDataHandler(byte[] b, int offset, int length); + + /// + /// The current reader this instance. + /// + private ReadDataHandler internalReader; + + private Crc32 crc = new Crc32(); + private ZipEntry entry; + + private long size; + private CompressionMethod method; + private int flags; + private string password; + + #endregion Instance Fields + + #region Constructors + + /// + /// Creates a new Zip input stream, for reading a zip archive. + /// + /// The underlying providing data. + public ZipInputStream(Stream baseInputStream) + : base(baseInputStream, new Inflater(true)) + { + internalReader = new ReadDataHandler(ReadingNotAvailable); + } + + /// + /// Creates a new Zip input stream, for reading a zip archive. + /// + /// The underlying providing data. + /// Size of the buffer. + public ZipInputStream(Stream baseInputStream, int bufferSize) + : base(baseInputStream, new Inflater(true), bufferSize) + { + internalReader = new ReadDataHandler(ReadingNotAvailable); + } + + #endregion Constructors + + /// + /// Optional password used for encryption when non-null + /// + /// A password for all encrypted entries in this + public string Password + { + get + { + return password; + } + set + { + password = value; + } + } + + /// + /// Gets a value indicating if there is a current entry and it can be decompressed + /// + /// + /// The entry can only be decompressed if the library supports the zip features required to extract it. + /// See the ZipEntry Version property for more details. + /// + public bool CanDecompressEntry + { + get + { + return (entry != null) && entry.CanDecompress; + } + } + + /// + /// Advances to the next entry in the archive + /// + /// + /// The next entry in the archive or null if there are no more entries. + /// + /// + /// If the previous entry is still open CloseEntry is called. + /// + /// + /// Input stream is closed + /// + /// + /// Password is not set, password is invalid, compression method is invalid, + /// version required to extract is not supported + /// + public ZipEntry GetNextEntry() + { + if (crc == null) + { + throw new InvalidOperationException("Closed."); + } + + if (entry != null) + { + CloseEntry(); + } + + int header = inputBuffer.ReadLeInt(); + + if (header == ZipConstants.CentralHeaderSignature || + header == ZipConstants.EndOfCentralDirectorySignature || + header == ZipConstants.CentralHeaderDigitalSignature || + header == ZipConstants.ArchiveExtraDataSignature || + header == ZipConstants.Zip64CentralFileHeaderSignature) + { + // No more individual entries exist + Dispose(); + return null; + } + + // -jr- 07-Dec-2003 Ignore spanning temporary signatures if found + // Spanning signature is same as descriptor signature and is untested as yet. + if ((header == ZipConstants.SpanningTempSignature) || (header == ZipConstants.SpanningSignature)) + { + header = inputBuffer.ReadLeInt(); + } + + if (header != ZipConstants.LocalHeaderSignature) + { + throw new ZipException("Wrong Local header signature: 0x" + String.Format("{0:X}", header)); + } + + var versionRequiredToExtract = (short)inputBuffer.ReadLeShort(); + + flags = inputBuffer.ReadLeShort(); + method = (CompressionMethod)inputBuffer.ReadLeShort(); + var dostime = (uint)inputBuffer.ReadLeInt(); + int crc2 = inputBuffer.ReadLeInt(); + csize = inputBuffer.ReadLeInt(); + size = inputBuffer.ReadLeInt(); + int nameLen = inputBuffer.ReadLeShort(); + int extraLen = inputBuffer.ReadLeShort(); + + bool isCrypted = (flags & 1) == 1; + + byte[] buffer = new byte[nameLen]; + inputBuffer.ReadRawBuffer(buffer); + + string name = ZipStrings.ConvertToStringExt(flags, buffer); + + entry = new ZipEntry(name, versionRequiredToExtract, ZipConstants.VersionMadeBy, method) + { + Flags = flags, + }; + + if ((flags & 8) == 0) + { + entry.Crc = crc2 & 0xFFFFFFFFL; + entry.Size = size & 0xFFFFFFFFL; + entry.CompressedSize = csize & 0xFFFFFFFFL; + + entry.CryptoCheckValue = (byte)((crc2 >> 24) & 0xff); + } + else + { + // This allows for GNU, WinZip and possibly other archives, the PKZIP spec + // says these values are zero under these circumstances. + if (crc2 != 0) + { + entry.Crc = crc2 & 0xFFFFFFFFL; + } + + if (size != 0) + { + entry.Size = size & 0xFFFFFFFFL; + } + + if (csize != 0) + { + entry.CompressedSize = csize & 0xFFFFFFFFL; + } + + entry.CryptoCheckValue = (byte)((dostime >> 8) & 0xff); + } + + entry.DosTime = dostime; + + // If local header requires Zip64 is true then the extended header should contain + // both values. + + // Handle extra data if present. This can set/alter some fields of the entry. + if (extraLen > 0) + { + byte[] extra = new byte[extraLen]; + inputBuffer.ReadRawBuffer(extra); + entry.ExtraData = extra; + } + + entry.ProcessExtraData(true); + if (entry.CompressedSize >= 0) + { + csize = entry.CompressedSize; + } + + if (entry.Size >= 0) + { + size = entry.Size; + } + + if (method == CompressionMethod.Stored && (!isCrypted && csize != size || (isCrypted && csize - ZipConstants.CryptoHeaderSize != size))) + { + throw new ZipException("Stored, but compressed != uncompressed"); + } + + // Determine how to handle reading of data if this is attempted. + if (entry.IsCompressionMethodSupported()) + { + internalReader = new ReadDataHandler(InitialRead); + } + else + { + internalReader = new ReadDataHandler(ReadingNotSupported); + } + + return entry; + } + + /// + /// Read data descriptor at the end of compressed data. + /// + private void ReadDataDescriptor() + { + if (inputBuffer.ReadLeInt() != ZipConstants.DataDescriptorSignature) + { + throw new ZipException("Data descriptor signature not found"); + } + + entry.Crc = inputBuffer.ReadLeInt() & 0xFFFFFFFFL; + + if (entry.LocalHeaderRequiresZip64) + { + csize = inputBuffer.ReadLeLong(); + size = inputBuffer.ReadLeLong(); + } + else + { + csize = inputBuffer.ReadLeInt(); + size = inputBuffer.ReadLeInt(); + } + entry.CompressedSize = csize; + entry.Size = size; + } + + /// + /// Complete cleanup as the final part of closing. + /// + /// True if the crc value should be tested + private void CompleteCloseEntry(bool testCrc) + { + StopDecrypting(); + + if ((flags & 8) != 0) + { + ReadDataDescriptor(); + } + + size = 0; + + if (testCrc && + ((crc.Value & 0xFFFFFFFFL) != entry.Crc) && (entry.Crc != -1)) + { + throw new ZipException("CRC mismatch"); + } + + crc.Reset(); + + if (method == CompressionMethod.Deflated) + { + inf.Reset(); + } + entry = null; + } + + /// + /// Closes the current zip entry and moves to the next one. + /// + /// + /// The stream is closed + /// + /// + /// The Zip stream ends early + /// + public void CloseEntry() + { + if (crc == null) + { + throw new InvalidOperationException("Closed"); + } + + if (entry == null) + { + return; + } + + if (method == CompressionMethod.Deflated) + { + if ((flags & 8) != 0) + { + // We don't know how much we must skip, read until end. + byte[] tmp = new byte[4096]; + + // Read will close this entry + while (Read(tmp, 0, tmp.Length) > 0) + { + } + return; + } + + csize -= inf.TotalIn; + inputBuffer.Available += inf.RemainingInput; + } + + if ((inputBuffer.Available > csize) && (csize >= 0)) + { + inputBuffer.Available = (int)((long)inputBuffer.Available - csize); + } + else + { + csize -= inputBuffer.Available; + inputBuffer.Available = 0; + while (csize != 0) + { + long skipped = Skip(csize); + + if (skipped <= 0) + { + throw new ZipException("Zip archive ends early."); + } + + csize -= skipped; + } + } + + CompleteCloseEntry(false); + } + + /// + /// Returns 1 if there is an entry available + /// Otherwise returns 0. + /// + public override int Available + { + get + { + return entry != null ? 1 : 0; + } + } + + /// + /// Returns the current size that can be read from the current entry if available + /// + /// Thrown if the entry size is not known. + /// Thrown if no entry is currently available. + public override long Length + { + get + { + if (entry != null) + { + if (entry.Size >= 0) + { + return entry.Size; + } + else + { + throw new ZipException("Length not available for the current entry"); + } + } + else + { + throw new InvalidOperationException("No current entry"); + } + } + } + + /// + /// Reads a byte from the current zip entry. + /// + /// + /// The byte or -1 if end of stream is reached. + /// + public override int ReadByte() + { + byte[] b = new byte[1]; + if (Read(b, 0, 1) <= 0) + { + return -1; + } + return b[0] & 0xff; + } + + /// + /// Handle attempts to read by throwing an . + /// + /// The destination array to store data in. + /// The offset at which data read should be stored. + /// The maximum number of bytes to read. + /// Returns the number of bytes actually read. + private int ReadingNotAvailable(byte[] destination, int offset, int count) + { + throw new InvalidOperationException("Unable to read from this stream"); + } + + /// + /// Handle attempts to read from this entry by throwing an exception + /// + private int ReadingNotSupported(byte[] destination, int offset, int count) + { + throw new ZipException("The compression method for this entry is not supported"); + } + + /// + /// Perform the initial read on an entry which may include + /// reading encryption headers and setting up inflation. + /// + /// The destination to fill with data read. + /// The offset to start reading at. + /// The maximum number of bytes to read. + /// The actual number of bytes read. + private int InitialRead(byte[] destination, int offset, int count) + { + if (!CanDecompressEntry) + { + throw new ZipException("Library cannot extract this entry. Version required is (" + entry.Version + ")"); + } + + // Handle encryption if required. + if (entry.IsCrypted) + { + if (password == null) + { + throw new ZipException("No password set."); + } + + // Generate and set crypto transform... + var managed = new PkzipClassicManaged(); + byte[] key = PkzipClassic.GenerateKeys(ZipStrings.ConvertToArray(password)); + + inputBuffer.CryptoTransform = managed.CreateDecryptor(key, null); + + byte[] cryptbuffer = new byte[ZipConstants.CryptoHeaderSize]; + inputBuffer.ReadClearTextBuffer(cryptbuffer, 0, ZipConstants.CryptoHeaderSize); + + if (cryptbuffer[ZipConstants.CryptoHeaderSize - 1] != entry.CryptoCheckValue) + { + throw new ZipException("Invalid password"); + } + + if (csize >= ZipConstants.CryptoHeaderSize) + { + csize -= ZipConstants.CryptoHeaderSize; + } + else if ((entry.Flags & (int)GeneralBitFlags.Descriptor) == 0) + { + throw new ZipException(string.Format("Entry compressed size {0} too small for encryption", csize)); + } + } + else + { + inputBuffer.CryptoTransform = null; + } + + if ((csize > 0) || ((flags & (int)GeneralBitFlags.Descriptor) != 0)) + { + if ((method == CompressionMethod.Deflated) && (inputBuffer.Available > 0)) + { + inputBuffer.SetInflaterInput(inf); + } + + internalReader = new ReadDataHandler(BodyRead); + return BodyRead(destination, offset, count); + } + else + { + internalReader = new ReadDataHandler(ReadingNotAvailable); + return 0; + } + } + + /// + /// Read a block of bytes from the stream. + /// + /// The destination for the bytes. + /// The index to start storing data. + /// The number of bytes to attempt to read. + /// Returns the number of bytes read. + /// Zero bytes read means end of stream. + public override int Read(byte[] buffer, int offset, int count) + { + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative"); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative"); + } + + if ((buffer.Length - offset) < count) + { + throw new ArgumentException("Invalid offset/count combination"); + } + + return internalReader(buffer, offset, count); + } + + /// + /// Reads a block of bytes from the current zip entry. + /// + /// + /// The number of bytes read (this may be less than the length requested, even before the end of stream), or 0 on end of stream. + /// + /// + /// An i/o error occurred. + /// + /// + /// The deflated stream is corrupted. + /// + /// + /// The stream is not open. + /// + private int BodyRead(byte[] buffer, int offset, int count) + { + if (crc == null) + { + throw new InvalidOperationException("Closed"); + } + + if ((entry == null) || (count <= 0)) + { + return 0; + } + + if (offset + count > buffer.Length) + { + throw new ArgumentException("Offset + count exceeds buffer size"); + } + + bool finished = false; + + switch (method) + { + case CompressionMethod.Deflated: + count = base.Read(buffer, offset, count); + if (count <= 0) + { + if (!inf.IsFinished) + { + throw new ZipException("Inflater not finished!"); + } + inputBuffer.Available = inf.RemainingInput; + + // A csize of -1 is from an unpatched local header + if ((flags & 8) == 0 && + (inf.TotalIn != csize && csize != 0xFFFFFFFF && csize != -1 || inf.TotalOut != size)) + { + throw new ZipException("Size mismatch: " + csize + ";" + size + " <-> " + inf.TotalIn + ";" + inf.TotalOut); + } + inf.Reset(); + finished = true; + } + break; + + case CompressionMethod.Stored: + if ((count > csize) && (csize >= 0)) + { + count = (int)csize; + } + + if (count > 0) + { + count = inputBuffer.ReadClearTextBuffer(buffer, offset, count); + if (count > 0) + { + csize -= count; + size -= count; + } + } + + if (csize == 0) + { + finished = true; + } + else + { + if (count < 0) + { + throw new ZipException("EOF in stored block"); + } + } + break; + } + + if (count > 0) + { + crc.Update(new ArraySegment(buffer, offset, count)); + } + + if (finished) + { + CompleteCloseEntry(true); + } + + return count; + } + + /// + /// Closes the zip input stream + /// + protected override void Dispose(bool disposing) + { + internalReader = new ReadDataHandler(ReadingNotAvailable); + crc = null; + entry = null; + + base.Dispose(disposing); + } + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs.meta new file mode 100644 index 0000000..a99876b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipInputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 9976f2378abf34adfa74bd9f76a01486 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs new file mode 100644 index 0000000..1b5e01a --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs @@ -0,0 +1,250 @@ +using ICSharpCode.SharpZipLib.Core; +using System; +using System.IO; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// ZipNameTransform transforms names as per the Zip file naming convention. + /// + /// The use of absolute names is supported although its use is not valid + /// according to Zip naming conventions, and should not be used if maximum compatability is desired. + public class ZipNameTransform : INameTransform + { + #region Constructors + + /// + /// Initialize a new instance of + /// + public ZipNameTransform() + { + } + + /// + /// Initialize a new instance of + /// + /// The string to trim from the front of paths if found. + public ZipNameTransform(string trimPrefix) + { + TrimPrefix = trimPrefix; + } + + #endregion Constructors + + /// + /// Static constructor. + /// + static ZipNameTransform() + { + char[] invalidPathChars; + invalidPathChars = Path.GetInvalidPathChars(); + int howMany = invalidPathChars.Length + 2; + + InvalidEntryCharsRelaxed = new char[howMany]; + Array.Copy(invalidPathChars, 0, InvalidEntryCharsRelaxed, 0, invalidPathChars.Length); + InvalidEntryCharsRelaxed[howMany - 1] = '*'; + InvalidEntryCharsRelaxed[howMany - 2] = '?'; + + howMany = invalidPathChars.Length + 4; + InvalidEntryChars = new char[howMany]; + Array.Copy(invalidPathChars, 0, InvalidEntryChars, 0, invalidPathChars.Length); + InvalidEntryChars[howMany - 1] = ':'; + InvalidEntryChars[howMany - 2] = '\\'; + InvalidEntryChars[howMany - 3] = '*'; + InvalidEntryChars[howMany - 4] = '?'; + } + + /// + /// Transform a windows directory name according to the Zip file naming conventions. + /// + /// The directory name to transform. + /// The transformed name. + public string TransformDirectory(string name) + { + name = TransformFile(name); + if (name.Length > 0) + { + if (!name.EndsWith("/", StringComparison.Ordinal)) + { + name += "/"; + } + } + else + { + throw new ZipException("Cannot have an empty directory name"); + } + return name; + } + + /// + /// Transform a windows file name according to the Zip file naming conventions. + /// + /// The file name to transform. + /// The transformed name. + public string TransformFile(string name) + { + if (name != null) + { + string lowerName = name.ToLower(); + if ((trimPrefix_ != null) && (lowerName.IndexOf(trimPrefix_, StringComparison.Ordinal) == 0)) + { + name = name.Substring(trimPrefix_.Length); + } + + name = name.Replace(@"\", "/"); + name = WindowsPathUtils.DropPathRoot(name); + + // Drop any leading slashes. + while ((name.Length > 0) && (name[0] == '/')) + { + name = name.Remove(0, 1); + } + + // Drop any trailing slashes. + while ((name.Length > 0) && (name[name.Length - 1] == '/')) + { + name = name.Remove(name.Length - 1, 1); + } + + // Convert consecutive // characters to / + int index = name.IndexOf("//", StringComparison.Ordinal); + while (index >= 0) + { + name = name.Remove(index, 1); + index = name.IndexOf("//", StringComparison.Ordinal); + } + + name = MakeValidName(name, '_'); + } + else + { + name = string.Empty; + } + return name; + } + + /// + /// Get/set the path prefix to be trimmed from paths if present. + /// + /// The prefix is trimmed before any conversion from + /// a windows path is done. + public string TrimPrefix + { + get { return trimPrefix_; } + set + { + trimPrefix_ = value; + if (trimPrefix_ != null) + { + trimPrefix_ = trimPrefix_.ToLower(); + } + } + } + + /// + /// Force a name to be valid by replacing invalid characters with a fixed value + /// + /// The name to force valid + /// The replacement character to use. + /// Returns a valid name + private static string MakeValidName(string name, char replacement) + { + int index = name.IndexOfAny(InvalidEntryChars); + if (index >= 0) + { + var builder = new StringBuilder(name); + + while (index >= 0) + { + builder[index] = replacement; + + if (index >= name.Length) + { + index = -1; + } + else + { + index = name.IndexOfAny(InvalidEntryChars, index + 1); + } + } + name = builder.ToString(); + } + + if (name.Length > 0xffff) + { + throw new PathTooLongException(); + } + + return name; + } + + /// + /// Test a name to see if it is a valid name for a zip entry. + /// + /// The name to test. + /// If true checking is relaxed about windows file names and absolute paths. + /// Returns true if the name is a valid zip name; false otherwise. + /// Zip path names are actually in Unix format, and should only contain relative paths. + /// This means that any path stored should not contain a drive or + /// device letter, or a leading slash. All slashes should forward slashes '/'. + /// An empty name is valid for a file where the input comes from standard input. + /// A null name is not considered valid. + /// + public static bool IsValidName(string name, bool relaxed) + { + bool result = (name != null); + + if (result) + { + if (relaxed) + { + result = name.IndexOfAny(InvalidEntryCharsRelaxed) < 0; + } + else + { + result = + (name.IndexOfAny(InvalidEntryChars) < 0) && + (name.IndexOf('/') != 0); + } + } + + return result; + } + + /// + /// Test a name to see if it is a valid name for a zip entry. + /// + /// The name to test. + /// Returns true if the name is a valid zip name; false otherwise. + /// Zip path names are actually in unix format, + /// and should only contain relative paths if a path is present. + /// This means that the path stored should not contain a drive or + /// device letter, or a leading slash. All slashes should forward slashes '/'. + /// An empty name is valid where the input comes from standard input. + /// A null name is not considered valid. + /// + public static bool IsValidName(string name) + { + bool result = + (name != null) && + (name.IndexOfAny(InvalidEntryChars) < 0) && + (name.IndexOf('/') != 0) + ; + return result; + } + + #region Instance Fields + + private string trimPrefix_; + + #endregion Instance Fields + + #region Class Fields + + private static readonly char[] InvalidEntryChars; + private static readonly char[] InvalidEntryCharsRelaxed; + + #endregion Class Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs.meta new file mode 100644 index 0000000..05cf548 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipNameTransform.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 0989892d7e2414032ac50a3d11d452f1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs new file mode 100644 index 0000000..bfd308d --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs @@ -0,0 +1,975 @@ +using ICSharpCode.SharpZipLib.Checksum; +using ICSharpCode.SharpZipLib.Zip.Compression; +using ICSharpCode.SharpZipLib.Zip.Compression.Streams; +using System; +using System.Collections.Generic; +using System.IO; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// This is a DeflaterOutputStream that writes the files into a zip + /// archive one after another. It has a special method to start a new + /// zip entry. The zip entries contains information about the file name + /// size, compressed size, CRC, etc. + /// + /// It includes support for Stored and Deflated entries. + /// This class is not thread safe. + ///
+ ///
Author of the original java version : Jochen Hoenicke + ///
+ /// This sample shows how to create a zip file + /// + /// using System; + /// using System.IO; + /// + /// using ICSharpCode.SharpZipLib.Core; + /// using ICSharpCode.SharpZipLib.Zip; + /// + /// class MainClass + /// { + /// public static void Main(string[] args) + /// { + /// string[] filenames = Directory.GetFiles(args[0]); + /// byte[] buffer = new byte[4096]; + /// + /// using ( ZipOutputStream s = new ZipOutputStream(File.Create(args[1])) ) { + /// + /// s.SetLevel(9); // 0 - store only to 9 - means best compression + /// + /// foreach (string file in filenames) { + /// ZipEntry entry = new ZipEntry(file); + /// s.PutNextEntry(entry); + /// + /// using (FileStream fs = File.OpenRead(file)) { + /// StreamUtils.Copy(fs, s, buffer); + /// } + /// } + /// } + /// } + /// } + /// + /// + public class ZipOutputStream : DeflaterOutputStream + { + #region Constructors + + /// + /// Creates a new Zip output stream, writing a zip archive. + /// + /// + /// The output stream to which the archive contents are written. + /// + public ZipOutputStream(Stream baseOutputStream) + : base(baseOutputStream, new Deflater(Deflater.DEFAULT_COMPRESSION, true)) + { + } + + /// + /// Creates a new Zip output stream, writing a zip archive. + /// + /// The output stream to which the archive contents are written. + /// Size of the buffer to use. + public ZipOutputStream(Stream baseOutputStream, int bufferSize) + : base(baseOutputStream, new Deflater(Deflater.DEFAULT_COMPRESSION, true), bufferSize) + { + } + + #endregion Constructors + + /// + /// Gets a flag value of true if the central header has been added for this archive; false if it has not been added. + /// + /// No further entries can be added once this has been done. + public bool IsFinished + { + get + { + return entries == null; + } + } + + /// + /// Set the zip file comment. + /// + /// + /// The comment text for the entire archive. + /// + /// + /// The converted comment is longer than 0xffff bytes. + /// + public void SetComment(string comment) + { + // TODO: Its not yet clear how to handle unicode comments here. + byte[] commentBytes = ZipStrings.ConvertToArray(comment); + if (commentBytes.Length > 0xffff) + { + throw new ArgumentOutOfRangeException(nameof(comment)); + } + zipComment = commentBytes; + } + + /// + /// Sets the compression level. The new level will be activated + /// immediately. + /// + /// The new compression level (1 to 9). + /// + /// Level specified is not supported. + /// + /// + public void SetLevel(int level) + { + deflater_.SetLevel(level); + defaultCompressionLevel = level; + } + + /// + /// Get the current deflater compression level + /// + /// The current compression level + public int GetLevel() + { + return deflater_.GetLevel(); + } + + /// + /// Get / set a value indicating how Zip64 Extension usage is determined when adding entries. + /// + /// Older archivers may not understand Zip64 extensions. + /// If backwards compatability is an issue be careful when adding entries to an archive. + /// Setting this property to off is workable but less desirable as in those circumstances adding a file + /// larger then 4GB will fail. + public UseZip64 UseZip64 + { + get { return useZip64_; } + set { useZip64_ = value; } + } + + /// + /// Write an unsigned short in little endian byte order. + /// + private void WriteLeShort(int value) + { + unchecked + { + baseOutputStream_.WriteByte((byte)(value & 0xff)); + baseOutputStream_.WriteByte((byte)((value >> 8) & 0xff)); + } + } + + /// + /// Write an int in little endian byte order. + /// + private void WriteLeInt(int value) + { + unchecked + { + WriteLeShort(value); + WriteLeShort(value >> 16); + } + } + + /// + /// Write an int in little endian byte order. + /// + private void WriteLeLong(long value) + { + unchecked + { + WriteLeInt((int)value); + WriteLeInt((int)(value >> 32)); + } + } + + /// + /// Starts a new Zip entry. It automatically closes the previous + /// entry if present. + /// All entry elements bar name are optional, but must be correct if present. + /// If the compression method is stored and the output is not patchable + /// the compression for that entry is automatically changed to deflate level 0 + /// + /// + /// the entry. + /// + /// + /// if entry passed is null. + /// + /// + /// if an I/O error occured. + /// + /// + /// if stream was finished + /// + /// + /// Too many entries in the Zip file
+ /// Entry name is too long
+ /// Finish has already been called
+ ///
+ /// + /// The Compression method specified for the entry is unsupported. + /// + public void PutNextEntry(ZipEntry entry) + { + if (entry == null) + { + throw new ArgumentNullException(nameof(entry)); + } + + if (entries == null) + { + throw new InvalidOperationException("ZipOutputStream was finished"); + } + + if (curEntry != null) + { + CloseEntry(); + } + + if (entries.Count == int.MaxValue) + { + throw new ZipException("Too many entries for Zip file"); + } + + CompressionMethod method = entry.CompressionMethod; + + // Check that the compression is one that we support + if (method != CompressionMethod.Deflated && method != CompressionMethod.Stored) + { + throw new NotImplementedException("Compression method not supported"); + } + + int compressionLevel = defaultCompressionLevel; + + // Clear flags that the library manages internally + entry.Flags &= (int)GeneralBitFlags.UnicodeText; + patchEntryHeader = false; + + bool headerInfoAvailable; + + // No need to compress - definitely no data. + if (entry.Size == 0) + { + entry.CompressedSize = entry.Size; + entry.Crc = 0; + method = CompressionMethod.Stored; + headerInfoAvailable = true; + } + else + { + headerInfoAvailable = (entry.Size >= 0) && entry.HasCrc && entry.CompressedSize >= 0; + + // Switch to deflation if storing isnt possible. + if (method == CompressionMethod.Stored) + { + if (!headerInfoAvailable) + { + if (!CanPatchEntries) + { + // Can't patch entries so storing is not possible. + method = CompressionMethod.Deflated; + compressionLevel = 0; + } + } + else // entry.size must be > 0 + { + entry.CompressedSize = entry.Size; + headerInfoAvailable = entry.HasCrc; + } + } + } + + if (headerInfoAvailable == false) + { + if (CanPatchEntries == false) + { + // Only way to record size and compressed size is to append a data descriptor + // after compressed data. + + // Stored entries of this form have already been converted to deflating. + entry.Flags |= 8; + } + else + { + patchEntryHeader = true; + } + } + + if (Password != null) + { + entry.IsCrypted = true; + if (entry.Crc < 0) + { + // Need to append a data descriptor as the crc isnt available for use + // with encryption, the date is used instead. Setting the flag + // indicates this to the decompressor. + entry.Flags |= 8; + } + } + + entry.Offset = offset; + entry.CompressionMethod = (CompressionMethod)method; + + curMethod = method; + sizePatchPos = -1; + + if ((useZip64_ == UseZip64.On) || ((entry.Size < 0) && (useZip64_ == UseZip64.Dynamic))) + { + entry.ForceZip64(); + } + + // Write the local file header + WriteLeInt(ZipConstants.LocalHeaderSignature); + + WriteLeShort(entry.Version); + WriteLeShort(entry.Flags); + WriteLeShort((byte)entry.CompressionMethodForHeader); + WriteLeInt((int)entry.DosTime); + + // TODO: Refactor header writing. Its done in several places. + if (headerInfoAvailable) + { + WriteLeInt((int)entry.Crc); + if (entry.LocalHeaderRequiresZip64) + { + WriteLeInt(-1); + WriteLeInt(-1); + } + else + { + WriteLeInt(entry.IsCrypted ? (int)entry.CompressedSize + ZipConstants.CryptoHeaderSize : (int)entry.CompressedSize); + WriteLeInt((int)entry.Size); + } + } + else + { + if (patchEntryHeader) + { + crcPatchPos = baseOutputStream_.Position; + } + WriteLeInt(0); // Crc + + if (patchEntryHeader) + { + sizePatchPos = baseOutputStream_.Position; + } + + // For local header both sizes appear in Zip64 Extended Information + if (entry.LocalHeaderRequiresZip64 || patchEntryHeader) + { + WriteLeInt(-1); + WriteLeInt(-1); + } + else + { + WriteLeInt(0); // Compressed size + WriteLeInt(0); // Uncompressed size + } + } + + byte[] name = ZipStrings.ConvertToArray(entry.Flags, entry.Name); + + if (name.Length > 0xFFFF) + { + throw new ZipException("Entry name too long."); + } + + var ed = new ZipExtraData(entry.ExtraData); + + if (entry.LocalHeaderRequiresZip64) + { + ed.StartNewEntry(); + if (headerInfoAvailable) + { + ed.AddLeLong(entry.Size); + ed.AddLeLong(entry.CompressedSize); + } + else + { + ed.AddLeLong(-1); + ed.AddLeLong(-1); + } + ed.AddNewEntry(1); + + if (!ed.Find(1)) + { + throw new ZipException("Internal error cant find extra data"); + } + + if (patchEntryHeader) + { + sizePatchPos = ed.CurrentReadIndex; + } + } + else + { + ed.Delete(1); + } + + if (entry.AESKeySize > 0) + { + AddExtraDataAES(entry, ed); + } + byte[] extra = ed.GetEntryData(); + + WriteLeShort(name.Length); + WriteLeShort(extra.Length); + + if (name.Length > 0) + { + baseOutputStream_.Write(name, 0, name.Length); + } + + if (entry.LocalHeaderRequiresZip64 && patchEntryHeader) + { + sizePatchPos += baseOutputStream_.Position; + } + + if (extra.Length > 0) + { + baseOutputStream_.Write(extra, 0, extra.Length); + } + + offset += ZipConstants.LocalHeaderBaseSize + name.Length + extra.Length; + // Fix offsetOfCentraldir for AES + if (entry.AESKeySize > 0) + offset += entry.AESOverheadSize; + + // Activate the entry. + curEntry = entry; + crc.Reset(); + if (method == CompressionMethod.Deflated) + { + deflater_.Reset(); + deflater_.SetLevel(compressionLevel); + } + size = 0; + + if (entry.IsCrypted) + { + if (entry.AESKeySize > 0) + { + WriteAESHeader(entry); + } + else + { + if (entry.Crc < 0) + { // so testing Zip will says its ok + WriteEncryptionHeader(entry.DosTime << 16); + } + else + { + WriteEncryptionHeader(entry.Crc); + } + } + } + } + + /// + /// Closes the current entry, updating header and footer information as required + /// + /// + /// An I/O error occurs. + /// + /// + /// No entry is active. + /// + public void CloseEntry() + { + if (curEntry == null) + { + throw new InvalidOperationException("No open entry"); + } + + long csize = size; + + // First finish the deflater, if appropriate + if (curMethod == CompressionMethod.Deflated) + { + if (size >= 0) + { + base.Finish(); + csize = deflater_.TotalOut; + } + else + { + deflater_.Reset(); + } + } + else if (curMethod == CompressionMethod.Stored) + { + // This is done by Finsh() for Deflated entries, but we need to do it + // ourselves for Stored ones + base.GetAuthCodeIfAES(); + } + + // Write the AES Authentication Code (a hash of the compressed and encrypted data) + if (curEntry.AESKeySize > 0) + { + baseOutputStream_.Write(AESAuthCode, 0, 10); + } + + if (curEntry.Size < 0) + { + curEntry.Size = size; + } + else if (curEntry.Size != size) + { + throw new ZipException("size was " + size + ", but I expected " + curEntry.Size); + } + + if (curEntry.CompressedSize < 0) + { + curEntry.CompressedSize = csize; + } + else if (curEntry.CompressedSize != csize) + { + throw new ZipException("compressed size was " + csize + ", but I expected " + curEntry.CompressedSize); + } + + if (curEntry.Crc < 0) + { + curEntry.Crc = crc.Value; + } + else if (curEntry.Crc != crc.Value) + { + throw new ZipException("crc was " + crc.Value + ", but I expected " + curEntry.Crc); + } + + offset += csize; + + if (curEntry.IsCrypted) + { + if (curEntry.AESKeySize > 0) + { + curEntry.CompressedSize += curEntry.AESOverheadSize; + } + else + { + curEntry.CompressedSize += ZipConstants.CryptoHeaderSize; + } + } + + // Patch the header if possible + if (patchEntryHeader) + { + patchEntryHeader = false; + + long curPos = baseOutputStream_.Position; + baseOutputStream_.Seek(crcPatchPos, SeekOrigin.Begin); + WriteLeInt((int)curEntry.Crc); + + if (curEntry.LocalHeaderRequiresZip64) + { + if (sizePatchPos == -1) + { + throw new ZipException("Entry requires zip64 but this has been turned off"); + } + + baseOutputStream_.Seek(sizePatchPos, SeekOrigin.Begin); + WriteLeLong(curEntry.Size); + WriteLeLong(curEntry.CompressedSize); + } + else + { + WriteLeInt((int)curEntry.CompressedSize); + WriteLeInt((int)curEntry.Size); + } + baseOutputStream_.Seek(curPos, SeekOrigin.Begin); + } + + // Add data descriptor if flagged as required + if ((curEntry.Flags & 8) != 0) + { + WriteLeInt(ZipConstants.DataDescriptorSignature); + WriteLeInt(unchecked((int)curEntry.Crc)); + + if (curEntry.LocalHeaderRequiresZip64) + { + WriteLeLong(curEntry.CompressedSize); + WriteLeLong(curEntry.Size); + offset += ZipConstants.Zip64DataDescriptorSize; + } + else + { + WriteLeInt((int)curEntry.CompressedSize); + WriteLeInt((int)curEntry.Size); + offset += ZipConstants.DataDescriptorSize; + } + } + + entries.Add(curEntry); + curEntry = null; + } + + private void WriteEncryptionHeader(long crcValue) + { + offset += ZipConstants.CryptoHeaderSize; + + InitializePassword(Password); + + byte[] cryptBuffer = new byte[ZipConstants.CryptoHeaderSize]; + var rnd = new Random(); + rnd.NextBytes(cryptBuffer); + cryptBuffer[11] = (byte)(crcValue >> 24); + + EncryptBlock(cryptBuffer, 0, cryptBuffer.Length); + baseOutputStream_.Write(cryptBuffer, 0, cryptBuffer.Length); + } + + private static void AddExtraDataAES(ZipEntry entry, ZipExtraData extraData) + { + // Vendor Version: AE-1 IS 1. AE-2 is 2. With AE-2 no CRC is required and 0 is stored. + const int VENDOR_VERSION = 2; + // Vendor ID is the two ASCII characters "AE". + const int VENDOR_ID = 0x4541; //not 6965; + extraData.StartNewEntry(); + // Pack AES extra data field see http://www.winzip.com/aes_info.htm + //extraData.AddLeShort(7); // Data size (currently 7) + extraData.AddLeShort(VENDOR_VERSION); // 2 = AE-2 + extraData.AddLeShort(VENDOR_ID); // "AE" + extraData.AddData(entry.AESEncryptionStrength); // 1 = 128, 2 = 192, 3 = 256 + extraData.AddLeShort((int)entry.CompressionMethod); // The actual compression method used to compress the file + extraData.AddNewEntry(0x9901); + } + + // Replaces WriteEncryptionHeader for AES + // + private void WriteAESHeader(ZipEntry entry) + { + byte[] salt; + byte[] pwdVerifier; + InitializeAESPassword(entry, Password, out salt, out pwdVerifier); + // File format for AES: + // Size (bytes) Content + // ------------ ------- + // Variable Salt value + // 2 Password verification value + // Variable Encrypted file data + // 10 Authentication code + // + // Value in the "compressed size" fields of the local file header and the central directory entry + // is the total size of all the items listed above. In other words, it is the total size of the + // salt value, password verification value, encrypted data, and authentication code. + baseOutputStream_.Write(salt, 0, salt.Length); + baseOutputStream_.Write(pwdVerifier, 0, pwdVerifier.Length); + } + + /// + /// Writes the given buffer to the current entry. + /// + /// The buffer containing data to write. + /// The offset of the first byte to write. + /// The number of bytes to write. + /// Archive size is invalid + /// No entry is active. + public override void Write(byte[] buffer, int offset, int count) + { + if (curEntry == null) + { + throw new InvalidOperationException("No open entry."); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0) + { + throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative"); + } + + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative"); + } + + if ((buffer.Length - offset) < count) + { + throw new ArgumentException("Invalid offset/count combination"); + } + + crc.Update(new ArraySegment(buffer, offset, count)); + size += count; + + switch (curMethod) + { + case CompressionMethod.Deflated: + base.Write(buffer, offset, count); + break; + + case CompressionMethod.Stored: + if (Password != null) + { + CopyAndEncrypt(buffer, offset, count); + } + else + { + baseOutputStream_.Write(buffer, offset, count); + } + break; + } + } + + private void CopyAndEncrypt(byte[] buffer, int offset, int count) + { + const int CopyBufferSize = 4096; + byte[] localBuffer = new byte[CopyBufferSize]; + while (count > 0) + { + int bufferCount = (count < CopyBufferSize) ? count : CopyBufferSize; + + Array.Copy(buffer, offset, localBuffer, 0, bufferCount); + EncryptBlock(localBuffer, 0, bufferCount); + baseOutputStream_.Write(localBuffer, 0, bufferCount); + count -= bufferCount; + offset += bufferCount; + } + } + + /// + /// Finishes the stream. This will write the central directory at the + /// end of the zip file and flush the stream. + /// + /// + /// This is automatically called when the stream is closed. + /// + /// + /// An I/O error occurs. + /// + /// + /// Comment exceeds the maximum length
+ /// Entry name exceeds the maximum length + ///
+ public override void Finish() + { + if (entries == null) + { + return; + } + + if (curEntry != null) + { + CloseEntry(); + } + + long numEntries = entries.Count; + long sizeEntries = 0; + + foreach (ZipEntry entry in entries) + { + WriteLeInt(ZipConstants.CentralHeaderSignature); + WriteLeShort((entry.HostSystem << 8) | entry.VersionMadeBy); + WriteLeShort(entry.Version); + WriteLeShort(entry.Flags); + WriteLeShort((short)entry.CompressionMethodForHeader); + WriteLeInt((int)entry.DosTime); + WriteLeInt((int)entry.Crc); + + if (entry.IsZip64Forced() || + (entry.CompressedSize >= uint.MaxValue)) + { + WriteLeInt(-1); + } + else + { + WriteLeInt((int)entry.CompressedSize); + } + + if (entry.IsZip64Forced() || + (entry.Size >= uint.MaxValue)) + { + WriteLeInt(-1); + } + else + { + WriteLeInt((int)entry.Size); + } + + byte[] name = ZipStrings.ConvertToArray(entry.Flags, entry.Name); + + if (name.Length > 0xffff) + { + throw new ZipException("Name too long."); + } + + var ed = new ZipExtraData(entry.ExtraData); + + if (entry.CentralHeaderRequiresZip64) + { + ed.StartNewEntry(); + if (entry.IsZip64Forced() || + (entry.Size >= 0xffffffff)) + { + ed.AddLeLong(entry.Size); + } + + if (entry.IsZip64Forced() || + (entry.CompressedSize >= 0xffffffff)) + { + ed.AddLeLong(entry.CompressedSize); + } + + if (entry.Offset >= 0xffffffff) + { + ed.AddLeLong(entry.Offset); + } + + ed.AddNewEntry(1); + } + else + { + ed.Delete(1); + } + + if (entry.AESKeySize > 0) + { + AddExtraDataAES(entry, ed); + } + byte[] extra = ed.GetEntryData(); + + byte[] entryComment = + (entry.Comment != null) ? + ZipStrings.ConvertToArray(entry.Flags, entry.Comment) : + new byte[0]; + + if (entryComment.Length > 0xffff) + { + throw new ZipException("Comment too long."); + } + + WriteLeShort(name.Length); + WriteLeShort(extra.Length); + WriteLeShort(entryComment.Length); + WriteLeShort(0); // disk number + WriteLeShort(0); // internal file attributes + // external file attributes + + if (entry.ExternalFileAttributes != -1) + { + WriteLeInt(entry.ExternalFileAttributes); + } + else + { + if (entry.IsDirectory) + { // mark entry as directory (from nikolam.AT.perfectinfo.com) + WriteLeInt(16); + } + else + { + WriteLeInt(0); + } + } + + if (entry.Offset >= uint.MaxValue) + { + WriteLeInt(-1); + } + else + { + WriteLeInt((int)entry.Offset); + } + + if (name.Length > 0) + { + baseOutputStream_.Write(name, 0, name.Length); + } + + if (extra.Length > 0) + { + baseOutputStream_.Write(extra, 0, extra.Length); + } + + if (entryComment.Length > 0) + { + baseOutputStream_.Write(entryComment, 0, entryComment.Length); + } + + sizeEntries += ZipConstants.CentralHeaderBaseSize + name.Length + extra.Length + entryComment.Length; + } + + using (ZipHelperStream zhs = new ZipHelperStream(baseOutputStream_)) + { + zhs.WriteEndOfCentralDirectory(numEntries, sizeEntries, offset, zipComment); + } + + entries = null; + } + + /// + /// Flushes the stream by calling Flush on the deflater stream unless + /// the current compression method is . Then it flushes the underlying output stream. + /// + public override void Flush() + { + if(curMethod == CompressionMethod.Stored) + { + baseOutputStream_.Flush(); + } + else + { + base.Flush(); + } + } + + #region Instance Fields + + /// + /// The entries for the archive. + /// + private List entries = new List(); + + /// + /// Used to track the crc of data added to entries. + /// + private Crc32 crc = new Crc32(); + + /// + /// The current entry being added. + /// + private ZipEntry curEntry; + + private int defaultCompressionLevel = Deflater.DEFAULT_COMPRESSION; + + private CompressionMethod curMethod = CompressionMethod.Deflated; + + /// + /// Used to track the size of data for an entry during writing. + /// + private long size; + + /// + /// Offset to be recorded for each entry in the central header. + /// + private long offset; + + /// + /// Comment for the entire archive recorded in central header. + /// + private byte[] zipComment = new byte[0]; + + /// + /// Flag indicating that header patching is required for the current entry. + /// + private bool patchEntryHeader; + + /// + /// Position to patch crc + /// + private long crcPatchPos = -1; + + /// + /// Position to patch size. + /// + private long sizePatchPos = -1; + + // Default is dynamic which is not backwards compatible and can cause problems + // with XP's built in compression which cant read Zip64 archives. + // However it does avoid the situation were a large file is added and cannot be completed correctly. + // NOTE: Setting the size for entries before they are added is the best solution! + private UseZip64 useZip64_ = UseZip64.Dynamic; + + #endregion Instance Fields + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs.meta new file mode 100644 index 0000000..a9eaa84 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipOutputStream.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: d015af389d9a1407184d31918de7de0c +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs new file mode 100644 index 0000000..6ef523b --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs @@ -0,0 +1,193 @@ +using System; +using System.Text; + +namespace ICSharpCode.SharpZipLib.Zip +{ + /// + /// This static class contains functions for encoding and decoding zip file strings + /// + public static class ZipStrings + { + static ZipStrings() + { + try + { + var platformCodepage = Encoding.GetEncoding(0).CodePage; + SystemDefaultCodePage = (platformCodepage == 1 || platformCodepage == 2 || platformCodepage == 3 || platformCodepage == 42) ? FallbackCodePage : platformCodepage; + } + catch + { + SystemDefaultCodePage = FallbackCodePage; + } + } + + /// Code page backing field + /// + /// The original Zip specification (https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT) states + /// that file names should only be encoded with IBM Code Page 437 or UTF-8. + /// In practice, most zip apps use OEM or system encoding (typically cp437 on Windows). + /// Let's be good citizens and default to UTF-8 http://utf8everywhere.org/ + /// + private static int codePage = AutomaticCodePage; + + /// Automatically select codepage while opening archive + /// see https://github.com/icsharpcode/SharpZipLib/pull/280#issuecomment-433608324 + /// + private const int AutomaticCodePage = -1; + + /// + /// Encoding used for string conversion. Setting this to 65001 (UTF-8) will + /// also set the Language encoding flag to indicate UTF-8 encoded file names. + /// + public static int CodePage + { + get + { + return codePage == AutomaticCodePage? Encoding.UTF8.CodePage:codePage; + } + set + { + if ((value < 0) || (value > 65535) || + (value == 1) || (value == 2) || (value == 3) || (value == 42)) + { + throw new ArgumentOutOfRangeException(nameof(value)); + } + + codePage = value; + } + } + + private const int FallbackCodePage = 437; + + /// + /// Attempt to get the operating system default codepage, or failing that, to + /// the fallback code page IBM 437. + /// + public static int SystemDefaultCodePage { get; } + + /// + /// Get whether the default codepage is set to UTF-8. Setting this property to false will + /// set the to + /// + /// + /// /// Get OEM codepage from NetFX, which parses the NLP file with culture info table etc etc. + /// But sometimes it yields the special value of 1 which is nicknamed CodePageNoOEM in sources (might also mean CP_OEMCP, but Encoding puts it so). + /// This was observed on Ukranian and Hindu systems. + /// Given this value, throws an . + /// So replace it with , (IBM 437 which is the default code page in a default Windows installation console. + /// + public static bool UseUnicode + { + get + { + return codePage == Encoding.UTF8.CodePage; + } + set + { + if (value) + { + codePage = Encoding.UTF8.CodePage; + } + else + { + codePage = SystemDefaultCodePage; + } + } + } + + /// + /// Convert a portion of a byte array to a string using + /// + /// + /// Data to convert to string + /// + /// + /// Number of bytes to convert starting from index 0 + /// + /// + /// data[0]..data[count - 1] converted to a string + /// + public static string ConvertToString(byte[] data, int count) + => data == null + ? string.Empty + : Encoding.GetEncoding(CodePage).GetString(data, 0, count); + + /// + /// Convert a byte array to a string using + /// + /// + /// Byte array to convert + /// + /// + /// dataconverted to a string + /// + public static string ConvertToString(byte[] data) + => ConvertToString(data, data.Length); + + private static Encoding EncodingFromFlag(int flags) + => ((flags & (int)GeneralBitFlags.UnicodeText) != 0) + ? Encoding.UTF8 + : Encoding.GetEncoding( + // if CodePage wasn't set manually and no utf flag present + // then we must use SystemDefault (old behavior) + // otherwise, CodePage should be preferred over SystemDefault + // see https://github.com/icsharpcode/SharpZipLib/issues/274 + codePage == AutomaticCodePage? + SystemDefaultCodePage: + codePage); + + /// + /// Convert a byte array to a string using + /// + /// The applicable general purpose bits flags + /// + /// Byte array to convert + /// + /// The number of bytes to convert. + /// + /// dataconverted to a string + /// + public static string ConvertToStringExt(int flags, byte[] data, int count) + => (data == null) + ? string.Empty + : EncodingFromFlag(flags).GetString(data, 0, count); + + /// + /// Convert a byte array to a string using + /// + /// + /// Byte array to convert + /// + /// The applicable general purpose bits flags + /// + /// dataconverted to a string + /// + public static string ConvertToStringExt(int flags, byte[] data) + => ConvertToStringExt(flags, data, data.Length); + + /// + /// Convert a string to a byte array using + /// + /// + /// String to convert to an array + /// + /// Converted array + public static byte[] ConvertToArray(string str) + => str == null + ? new byte[0] + : Encoding.GetEncoding(CodePage).GetBytes(str); + + /// + /// Convert a string to a byte array using + /// + /// The applicable general purpose bits flags + /// + /// String to convert to an array + /// + /// Converted array + public static byte[] ConvertToArray(int flags, string str) + => (string.IsNullOrEmpty(str)) + ? new byte[0] + : EncodingFromFlag(flags).GetBytes(str); + } +} diff --git a/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs.meta b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs.meta new file mode 100644 index 0000000..a98a393 --- /dev/null +++ b/FirClient/Assets/Libraries/ICSharpCode.SharpZipLib/Zip/ZipStrings.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: c6ef9f9eb21cc4a349764deb68593e8d +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/Android/libs/armeabi-v7a/libtolua.so b/FirClient/Assets/Plugins/Android/libs/armeabi-v7a/libtolua.so deleted file mode 100644 index 0e74ac3..0000000 Binary files a/FirClient/Assets/Plugins/Android/libs/armeabi-v7a/libtolua.so and /dev/null differ diff --git a/FirClient/Assets/Plugins/Editor.meta b/FirClient/Assets/Plugins/Editor.meta deleted file mode 100644 index 1f7bd98..0000000 --- a/FirClient/Assets/Plugins/Editor.meta +++ /dev/null @@ -1,8 +0,0 @@ -fileFormatVersion: 2 -guid: 4090bdad1592f4aac9619b904fc84bb0 -folderAsset: yes -DefaultImporter: - externalObjects: {} - userData: - assetBundleName: - assetBundleVariant: diff --git a/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll b/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll deleted file mode 100644 index e829ebf..0000000 Binary files a/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll and /dev/null differ diff --git a/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll.meta b/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll.meta deleted file mode 100644 index 5184a26..0000000 --- a/FirClient/Assets/Plugins/ICSharpCode.SharpZipLib.dll.meta +++ /dev/null @@ -1,33 +0,0 @@ -fileFormatVersion: 2 -guid: 5319cbf45a9ffb84f8e7b904e1675b88 -PluginImporter: - externalObjects: {} - serializedVersion: 2 - iconMap: {} - executionOrder: {} - defineConstraints: [] - isPreloaded: 0 - isOverridable: 0 - isExplicitlyReferenced: 0 - validateReferences: 1 - platformData: - - first: - Any: - second: - enabled: 1 - settings: {} - - first: - Editor: Editor - second: - enabled: 0 - settings: - DefaultValueInitialized: true - - first: - Windows Store Apps: WindowsStoreApps - second: - enabled: 0 - settings: - CPU: AnyCPU - userData: - assetBundleName: - assetBundleVariant: diff --git a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Serialization.dll.meta b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Serialization.dll.meta index df04da5..697fc6d 100644 --- a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Serialization.dll.meta +++ b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Serialization.dll.meta @@ -1,18 +1,49 @@ fileFormatVersion: 2 guid: 5651992cdad94894a3af7dc3f1da9170 -timeCreated: 1488812592 -licenseType: Store PluginImporter: - serializedVersion: 1 + externalObjects: {} + serializedVersion: 2 iconMap: {} executionOrder: {} + defineConstraints: [] isPreloaded: 0 isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 platformData: - Android: + - first: + : Linux + second: enabled: 1 settings: {} - Any: + - first: + : LinuxUniversal + second: + enabled: 1 + settings: {} + - first: + : OSXIntel + second: + enabled: 1 + settings: {} + - first: + : OSXIntel64 + second: + enabled: 1 + settings: {} + - first: + : PSM + second: + enabled: 0 + settings: {} + - first: + Android: Android + second: + enabled: 0 + settings: {} + - first: + Any: + second: enabled: 0 settings: Exclude Android: 0 @@ -37,38 +68,39 @@ PluginImporter: Exclude XboxOne: 1 Exclude iOS: 1 Exclude tvOS: 1 - Editor: + - first: + Editor: Editor + second: enabled: 0 settings: DefaultValueInitialized: true - Linux: - enabled: 1 - settings: {} - Linux64: - enabled: 1 - settings: {} - LinuxUniversal: + - first: + Standalone: Linux64 + second: enabled: 1 - settings: {} - OSXIntel: - enabled: 1 - settings: {} - OSXIntel64: - enabled: 1 - settings: {} - OSXUniversal: + settings: + CPU: + - first: + Standalone: OSXUniversal + second: enabled: 1 - settings: {} - PSM: - enabled: 0 - settings: {} - Win: + settings: + CPU: + - first: + Standalone: Win + second: enabled: 1 - settings: {} - Win64: + settings: + CPU: + - first: + Standalone: Win64 + second: enabled: 1 - settings: {} - WindowsStoreApps: + settings: + CPU: + - first: + Windows Store Apps: WindowsStoreApps + second: enabled: 0 settings: CPU: AnyCPU diff --git a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Utilities.dll.meta b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Utilities.dll.meta index 647e74e..dcbedf4 100644 --- a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Utilities.dll.meta +++ b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEditor/Sirenix.Utilities.dll.meta @@ -1,18 +1,49 @@ fileFormatVersion: 2 guid: 5978f8f3dd274e848fbb7a123bde1fb9 -timeCreated: 1488812592 -licenseType: Store PluginImporter: - serializedVersion: 1 + externalObjects: {} + serializedVersion: 2 iconMap: {} executionOrder: {} + defineConstraints: [] isPreloaded: 0 isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 platformData: - Android: + - first: + : Linux + second: enabled: 1 settings: {} - Any: + - first: + : LinuxUniversal + second: + enabled: 1 + settings: {} + - first: + : OSXIntel + second: + enabled: 1 + settings: {} + - first: + : OSXIntel64 + second: + enabled: 1 + settings: {} + - first: + : PSM + second: + enabled: 0 + settings: {} + - first: + Android: Android + second: + enabled: 0 + settings: {} + - first: + Any: + second: enabled: 0 settings: Exclude Android: 0 @@ -37,38 +68,39 @@ PluginImporter: Exclude XboxOne: 1 Exclude iOS: 1 Exclude tvOS: 1 - Editor: + - first: + Editor: Editor + second: enabled: 0 settings: DefaultValueInitialized: true - Linux: - enabled: 1 - settings: {} - Linux64: - enabled: 1 - settings: {} - LinuxUniversal: + - first: + Standalone: Linux64 + second: enabled: 1 - settings: {} - OSXIntel: - enabled: 1 - settings: {} - OSXIntel64: - enabled: 1 - settings: {} - OSXUniversal: + settings: + CPU: + - first: + Standalone: OSXUniversal + second: enabled: 1 - settings: {} - PSM: - enabled: 0 - settings: {} - Win: + settings: + CPU: + - first: + Standalone: Win + second: enabled: 1 - settings: {} - Win64: + settings: + CPU: + - first: + Standalone: Win64 + second: enabled: 1 - settings: {} - WindowsStoreApps: + settings: + CPU: + - first: + Windows Store Apps: WindowsStoreApps + second: enabled: 0 settings: CPU: AnyCPU diff --git a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Serialization.dll.meta b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Serialization.dll.meta index 4320e60..6c285fb 100644 --- a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Serialization.dll.meta +++ b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Serialization.dll.meta @@ -1,15 +1,54 @@ fileFormatVersion: 2 guid: d2a8f0021d6b47c5923d8972dfb81ef1 -timeCreated: 1488812607 -licenseType: Store PluginImporter: - serializedVersion: 1 + externalObjects: {} + serializedVersion: 2 iconMap: {} executionOrder: {} + defineConstraints: [] isPreloaded: 0 isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 platformData: - Any: + - first: + : N3DS + second: + enabled: 1 + settings: {} + - first: + : PSM + second: + enabled: 1 + settings: {} + - first: + : PSP2 + second: + enabled: 1 + settings: {} + - first: + : SamsungTV + second: + enabled: 1 + settings: {} + - first: + : Tizen + second: + enabled: 1 + settings: {} + - first: + : WiiU + second: + enabled: 1 + settings: {} + - first: + Android: Android + second: + enabled: 1 + settings: {} + - first: + Any: + second: enabled: 0 settings: Exclude Android: 1 @@ -34,45 +73,41 @@ PluginImporter: Exclude XboxOne: 0 Exclude iOS: 0 Exclude tvOS: 0 - Editor: + - first: + Editor: Editor + second: enabled: 0 settings: DefaultValueInitialized: true - N3DS: - enabled: 1 - settings: {} - PS4: - enabled: 1 - settings: {} - PSM: - enabled: 1 - settings: {} - PSP2: - enabled: 1 - settings: {} - SamsungTV: - enabled: 1 - settings: {} - Tizen: - enabled: 1 - settings: {} - WebGL: + - first: + PS4: PS4 + second: enabled: 1 settings: {} - WiiU: + - first: + WebGL: WebGL + second: enabled: 1 settings: {} - WindowsStoreApps: + - first: + Windows Store Apps: WindowsStoreApps + second: enabled: 1 settings: CPU: AnyCPU - XboxOne: + - first: + XboxOne: XboxOne + second: enabled: 1 settings: {} - iOS: + - first: + iPhone: iOS + second: enabled: 1 settings: {} - tvOS: + - first: + tvOS: tvOS + second: enabled: 1 settings: {} userData: diff --git a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Utilities.dll.meta b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Utilities.dll.meta index 378a385..ecf6cbc 100644 --- a/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Utilities.dll.meta +++ b/FirClient/Assets/Plugins/Sirenix/Assemblies/NoEmitAndNoEditor/Sirenix.Utilities.dll.meta @@ -1,15 +1,54 @@ fileFormatVersion: 2 guid: 1e0a9643dc0d4b46bf2321f72c4e503e -timeCreated: 1488812607 -licenseType: Store PluginImporter: - serializedVersion: 1 + externalObjects: {} + serializedVersion: 2 iconMap: {} executionOrder: {} + defineConstraints: [] isPreloaded: 0 isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 platformData: - Any: + - first: + : N3DS + second: + enabled: 1 + settings: {} + - first: + : PSM + second: + enabled: 1 + settings: {} + - first: + : PSP2 + second: + enabled: 1 + settings: {} + - first: + : SamsungTV + second: + enabled: 1 + settings: {} + - first: + : Tizen + second: + enabled: 1 + settings: {} + - first: + : WiiU + second: + enabled: 1 + settings: {} + - first: + Android: Android + second: + enabled: 1 + settings: {} + - first: + Any: + second: enabled: 0 settings: Exclude Android: 1 @@ -34,45 +73,41 @@ PluginImporter: Exclude XboxOne: 0 Exclude iOS: 0 Exclude tvOS: 0 - Editor: + - first: + Editor: Editor + second: enabled: 0 settings: DefaultValueInitialized: true - N3DS: - enabled: 1 - settings: {} - PS4: - enabled: 1 - settings: {} - PSM: - enabled: 1 - settings: {} - PSP2: - enabled: 1 - settings: {} - SamsungTV: - enabled: 1 - settings: {} - Tizen: - enabled: 1 - settings: {} - WebGL: + - first: + PS4: PS4 + second: enabled: 1 settings: {} - WiiU: + - first: + WebGL: WebGL + second: enabled: 1 settings: {} - WindowsStoreApps: + - first: + Windows Store Apps: WindowsStoreApps + second: enabled: 1 settings: CPU: AnyCPU - XboxOne: + - first: + XboxOne: XboxOne + second: enabled: 1 settings: {} - iOS: + - first: + iPhone: iOS + second: enabled: 1 settings: {} - tvOS: + - first: + tvOS: tvOS + second: enabled: 1 settings: {} userData: diff --git a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Assets/Editor/Odin Inspector Logo.png.meta b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Assets/Editor/Odin Inspector Logo.png.meta index 79aebd3..63da7c9 100644 --- a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Assets/Editor/Odin Inspector Logo.png.meta +++ b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Assets/Editor/Odin Inspector Logo.png.meta @@ -3,7 +3,7 @@ guid: a78bffbeb81b48ae9ec71ad7969613e5 TextureImporter: internalIDToNameTable: [] externalObjects: {} - serializedVersion: 10 + serializedVersion: 11 mipmaps: mipMapMode: 0 enableMipMap: 0 @@ -57,6 +57,7 @@ TextureImporter: maxTextureSizeSet: 0 compressionQualitySet: 0 textureFormatSet: 0 + applyGammaDecoding: 1 platformSettings: - serializedVersion: 3 buildTarget: DefaultTexturePlatform @@ -70,6 +71,30 @@ TextureImporter: overridden: 0 androidETC2FallbackOverride: 0 forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: Android + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 47 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 1 + forceMaximumCompressionQuality_BC6H_BC7: 0 + - serializedVersion: 3 + buildTarget: iPhone + maxTextureSize: 1024 + resizeAlgorithm: 0 + textureFormat: 50 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 1 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 spriteSheet: serializedVersion: 2 sprites: [] diff --git a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset new file mode 100644 index 0000000..91f67d8 --- /dev/null +++ b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset @@ -0,0 +1,20 @@ +%YAML 1.1 +%TAG !u! tag:unity3d.com,2011: +--- !u!114 &11400000 +MonoBehaviour: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 0} + m_Enabled: 1 + m_EditorHideFlags: 0 + m_Script: {fileID: 1726182683, guid: a4865f1ab4504ed8a368670db22f409c, type: 3} + m_Name: AOTGenerationConfig + m_EditorClassIdentifier: + automateBeforeBuilds: 0 + deleteDllAfterBuilds: 1 + AutomateForAllAOTPlatforms: 1 + automateForPlatforms: 0900000014000000 + lastScan: 0 + supportSerializedTypes: [] diff --git a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset.meta b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset.meta new file mode 100644 index 0000000..88371a7 --- /dev/null +++ b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/AOTGenerationConfig.asset.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 08ed6aa3a709e4b40913b07b1301136d +NativeFormatImporter: + externalObjects: {} + mainObjectFileID: 0 + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset new file mode 100644 index 0000000..338ffe3 --- /dev/null +++ b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset @@ -0,0 +1,15 @@ +%YAML 1.1 +%TAG !u! tag:unity3d.com,2011: +--- !u!114 &11400000 +MonoBehaviour: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 0} + m_Enabled: 1 + m_EditorHideFlags: 0 + m_Script: {fileID: 188390376, guid: a4865f1ab4504ed8a368670db22f409c, type: 3} + m_Name: ImportSettingsConfig + m_EditorClassIdentifier: + automateBeforeBuild: 1 diff --git a/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset.meta b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset.meta new file mode 100644 index 0000000..6e08cad --- /dev/null +++ b/FirClient/Assets/Plugins/Sirenix/Odin Inspector/Config/Editor/ImportSettingsConfig.asset.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: c293bb33022c2a245adccd164efb78e9 +NativeFormatImporter: + externalObjects: {} + mainObjectFileID: 0 + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/iOS/libtolua.a b/FirClient/Assets/Plugins/iOS/libtolua.a deleted file mode 100644 index 10c70b3..0000000 Binary files a/FirClient/Assets/Plugins/iOS/libtolua.a and /dev/null differ diff --git a/FirClient/Assets/Plugins/protobuf-net.dll b/FirClient/Assets/Plugins/protobuf-net.dll deleted file mode 100644 index 346f6f1..0000000 Binary files a/FirClient/Assets/Plugins/protobuf-net.dll and /dev/null differ diff --git a/FirClient/Assets/Plugins/protobuf-net.dll.meta b/FirClient/Assets/Plugins/protobuf-net.dll.meta deleted file mode 100644 index 4e7b7c4..0000000 --- a/FirClient/Assets/Plugins/protobuf-net.dll.meta +++ /dev/null @@ -1,33 +0,0 @@ -fileFormatVersion: 2 -guid: a458919a58f7d684a8e7b62b9d779e86 -PluginImporter: - externalObjects: {} - serializedVersion: 2 - iconMap: {} - executionOrder: {} - defineConstraints: [] - isPreloaded: 0 - isOverridable: 0 - isExplicitlyReferenced: 0 - validateReferences: 1 - platformData: - - first: - Any: - second: - enabled: 1 - settings: {} - - first: - Editor: Editor - second: - enabled: 0 - settings: - DefaultValueInitialized: true - - first: - Windows Store Apps: WindowsStoreApps - second: - enabled: 0 - settings: - CPU: AnyCPU - userData: - assetBundleName: - assetBundleVariant: diff --git a/FirClient/Assets/Plugins/protobuff-net.meta b/FirClient/Assets/Plugins/protobuff-net.meta new file mode 100644 index 0000000..95fba10 --- /dev/null +++ b/FirClient/Assets/Plugins/protobuff-net.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 3ccd0fcc2ede87d43a62fbc8e99acba5 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll new file mode 100644 index 0000000..44f5c89 Binary files /dev/null and b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll differ diff --git a/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll.meta b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll.meta new file mode 100644 index 0000000..5084e35 --- /dev/null +++ b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.dll.meta @@ -0,0 +1,33 @@ +fileFormatVersion: 2 +guid: e25f891e05c177c4885852ee1a984041 +PluginImporter: + externalObjects: {} + serializedVersion: 2 + iconMap: {} + executionOrder: {} + defineConstraints: [] + isPreloaded: 0 + isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 + platformData: + - first: + Any: + second: + enabled: 1 + settings: {} + - first: + Editor: Editor + second: + enabled: 0 + settings: + DefaultValueInitialized: true + - first: + Windows Store Apps: WindowsStoreApps + second: + enabled: 0 + settings: + CPU: AnyCPU + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml new file mode 100644 index 0000000..6bfc071 --- /dev/null +++ b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml @@ -0,0 +1,6674 @@ + + + + Google.Protobuf + + + + + Class containing helpful workarounds for various platform compatibility + + + + + Stream implementation which proxies another stream, only allowing a certain amount + of data to be read. Note that this is only used to read delimited streams, so it + doesn't attempt to implement everything. + + + + + Thrown when an attempt is made to parse invalid JSON, e.g. using + a non-string property key, or including a redundant comma. Parsing a protocol buffer + message represented in JSON using can throw both this + exception and depending on the situation. This + exception is only thrown for "pure JSON" errors, whereas InvalidProtocolBufferException + is thrown when the JSON may be valid in and of itself, but cannot be parsed as a protocol buffer + message. + + + + + Encodes and writes protocol message fields. + + + + This class is generally used by generated code to write appropriate + primitives to the stream. It effectively encapsulates the lowest + levels of protocol buffer format. Unlike some other implementations, + this does not include combined "write tag and value" methods. Generated + code knows the exact byte representations of the tags they're going to write, + so there's no need to re-encode them each time. Manually-written code calling + this class should just call one of the WriteTag overloads before each value. + + + Repeated fields and map fields are not handled by this class; use RepeatedField<T> + and MapField<TKey, TValue> to serialize such fields. + + + + + + The buffer size used by CreateInstance(Stream). + + + + + Creates a new CodedOutputStream that writes directly to the given + byte array. If more bytes are written than fit in the array, + OutOfSpaceException will be thrown. + + + + + Creates a new CodedOutputStream that writes directly to the given + byte array slice. If more bytes are written than fit in the array, + OutOfSpaceException will be thrown. + + + + + Creates a new which write to the given stream, and disposes of that + stream when the returned CodedOutputStream is disposed. + + The stream to write to. It will be disposed when the returned CodedOutputStream is disposed. + + + + Creates a new CodedOutputStream which write to the given stream and uses + the specified buffer size. + + The stream to write to. It will be disposed when the returned CodedOutputStream is disposed. + The size of buffer to use internally. + + + + Creates a new CodedOutputStream which write to the given stream. + + The stream to write to. + If true, is left open when the returned CodedOutputStream is disposed; + if false, the provided stream is disposed as well. + + + + Creates a new CodedOutputStream which write to the given stream and uses + the specified buffer size. + + The stream to write to. + The size of buffer to use internally. + If true, is left open when the returned CodedOutputStream is disposed; + if false, the provided stream is disposed as well. + + + + Returns the current position in the stream, or the position in the output buffer + + + + + Writes a double field value, without a tag, to the stream. + + The value to write + + + + Writes a float field value, without a tag, to the stream. + + The value to write + + + + Writes a uint64 field value, without a tag, to the stream. + + The value to write + + + + Writes an int64 field value, without a tag, to the stream. + + The value to write + + + + Writes an int32 field value, without a tag, to the stream. + + The value to write + + + + Writes a fixed64 field value, without a tag, to the stream. + + The value to write + + + + Writes a fixed32 field value, without a tag, to the stream. + + The value to write + + + + Writes a bool field value, without a tag, to the stream. + + The value to write + + + + Writes a string field value, without a tag, to the stream. + The data is length-prefixed. + + The value to write + + + + Writes a message, without a tag, to the stream. + The data is length-prefixed. + + The value to write + + + + Write a byte string, without a tag, to the stream. + The data is length-prefixed. + + The value to write + + + + Writes a uint32 value, without a tag, to the stream. + + The value to write + + + + Writes an enum value, without a tag, to the stream. + + The value to write + + + + Writes an sfixed32 value, without a tag, to the stream. + + The value to write. + + + + Writes an sfixed64 value, without a tag, to the stream. + + The value to write + + + + Writes an sint32 value, without a tag, to the stream. + + The value to write + + + + Writes an sint64 value, without a tag, to the stream. + + The value to write + + + + Writes a length (in bytes) for length-delimited data. + + + This method simply writes a rawint, but exists for clarity in calling code. + + Length value, in bytes. + + + + Encodes and writes a tag. + + The number of the field to write the tag for + The wire format type of the tag to write + + + + Writes an already-encoded tag. + + The encoded tag + + + + Writes the given single-byte tag directly to the stream. + + The encoded tag + + + + Writes the given two-byte tag directly to the stream. + + The first byte of the encoded tag + The second byte of the encoded tag + + + + Writes the given three-byte tag directly to the stream. + + The first byte of the encoded tag + The second byte of the encoded tag + The third byte of the encoded tag + + + + Writes the given four-byte tag directly to the stream. + + The first byte of the encoded tag + The second byte of the encoded tag + The third byte of the encoded tag + The fourth byte of the encoded tag + + + + Writes the given five-byte tag directly to the stream. + + The first byte of the encoded tag + The second byte of the encoded tag + The third byte of the encoded tag + The fourth byte of the encoded tag + The fifth byte of the encoded tag + + + + Writes a 32 bit value as a varint. The fast route is taken when + there's enough buffer space left to whizz through without checking + for each byte; otherwise, we resort to calling WriteRawByte each time. + + + + + Writes out an array of bytes. + + + + + Writes out part of an array of bytes. + + + + + Encode a 32-bit value with ZigZag encoding. + + + ZigZag encodes signed integers into values that can be efficiently + encoded with varint. (Otherwise, negative values must be + sign-extended to 64 bits to be varint encoded, thus always taking + 10 bytes on the wire.) + + + + + Encode a 64-bit value with ZigZag encoding. + + + ZigZag encodes signed integers into values that can be efficiently + encoded with varint. (Otherwise, negative values must be + sign-extended to 64 bits to be varint encoded, thus always taking + 10 bytes on the wire.) + + + + + Indicates that a CodedOutputStream wrapping a flat byte array + ran out of space. + + + + + Flushes any buffered data and optionally closes the underlying stream, if any. + + + + By default, any underlying stream is closed by this method. To configure this behaviour, + use a constructor overload with a leaveOpen parameter. If this instance does not + have an underlying stream, this method does nothing. + + + For the sake of efficiency, calling this method does not prevent future write calls - but + if a later write ends up writing to a stream which has been disposed, that is likely to + fail. It is recommend that you not call any other methods after this. + + + + + + Flushes any buffered data to the underlying stream (if there is one). + + + + + Verifies that SpaceLeft returns zero. It's common to create a byte array + that is exactly big enough to hold a message, then write to it with + a CodedOutputStream. Calling CheckNoSpaceLeft after writing verifies that + the message was actually as big as expected, which can help bugs. + + + + + If writing to a flat array, returns the space left in the array. Otherwise, + throws an InvalidOperationException. + + + + + Computes the number of bytes that would be needed to encode a + double field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + float field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + uint64 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + int64 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + int32 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + fixed64 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + fixed32 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + bool field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + string field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + group field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + embedded message field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + bytes field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + uint32 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a + enum field, including the tag. The caller is responsible for + converting the enum value to its numeric value. + + + + + Computes the number of bytes that would be needed to encode an + sfixed32 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + sfixed64 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + sint32 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode an + sint64 field, including the tag. + + + + + Computes the number of bytes that would be needed to encode a length, + as written by . + + + + + Computes the number of bytes that would be needed to encode a varint. + + + + + Computes the number of bytes that would be needed to encode a varint. + + + + + Computes the number of bytes that would be needed to encode a tag. + + + + + A general message parser, typically used by reflection-based code as all the methods + return simple . + + + + + Creates a template instance ready for population. + + An empty message. + + + + Parses a message from a byte array. + + The byte array containing the message. Must not be null. + The newly parsed message. + + + + Parses a message from the given byte string. + + The data to parse. + The parsed message. + + + + Parses a message from the given stream. + + The stream to parse. + The parsed message. + + + + Parses a length-delimited message from the given stream. + + + The stream is expected to contain a length and then the data. Only the amount of data + specified by the length will be consumed. + + The stream to parse. + The parsed message. + + + + Parses a message from the given coded input stream. + + The stream to parse. + The parsed message. + + + + Parses a message from the given JSON. + + The JSON to parse. + The parsed message. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + A parser for a specific message type. + + +

+ This delegates most behavior to the + implementation within the original type, but + provides convenient overloads to parse from a variety of sources. +

+

+ Most applications will never need to create their own instances of this type; + instead, use the static Parser property of a generated message type to obtain a + parser for that type. +

+
+ The type of message to be parsed. +
+ + + Creates a new parser. + + + The factory method is effectively an optimization over using a generic constraint + to require a parameterless constructor: delegates are significantly faster to execute. + + Function to invoke when a new, empty message is required. + + + + Creates a template instance ready for population. + + An empty message. + + + + Parses a message from a byte array. + + The byte array containing the message. Must not be null. + The newly parsed message. + + + + Parses a message from the given byte string. + + The data to parse. + The parsed message. + + + + Parses a message from the given stream. + + The stream to parse. + The parsed message. + + + + Parses a length-delimited message from the given stream. + + + The stream is expected to contain a length and then the data. Only the amount of data + specified by the length will be consumed. + + The stream to parse. + The parsed message. + + + + Parses a message from the given coded input stream. + + The stream to parse. + The parsed message. + + + + Parses a message from the given JSON. + + The JSON to parse. + The parsed message. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + Immutable array of bytes. + + + + + Unsafe operations that can cause IO Failure and/or other catestrophic side-effects. + + + + + Constructs a new ByteString from the given byte array. The array is + *not* copied, and must not be modified after this constructor is called. + + + + + Provides direct, unrestricted access to the bytes contained in this instance. + You must not modify or resize the byte array returned by this method. + + + + + Internal use only. Ensure that the provided array is not mutated and belongs to this instance. + + + + + Constructs a new ByteString from the given byte array. The array is + *not* copied, and must not be modified after this constructor is called. + + + + + Returns an empty ByteString. + + + + + Returns the length of this ByteString in bytes. + + + + + Returns true if this byte string is empty, false otherwise. + + + + + Converts this into a byte array. + + The data is copied - changes to the returned array will not be reflected in this ByteString. + A byte array with the same data as this ByteString. + + + + Converts this into a standard base64 representation. + + A base64 representation of this ByteString. + + + + Constructs a from the Base64 Encoded String. + + + + + Constructs a from the given array. The contents + are copied, so further modifications to the array will not + be reflected in the returned ByteString. + This method can also be invoked in ByteString.CopyFrom(0xaa, 0xbb, ...) form + which is primarily useful for testing. + + + + + Constructs a from a portion of a byte array. + + + + + Creates a new by encoding the specified text with + the given encoding. + + + + + Creates a new by encoding the specified text in UTF-8. + + + + + Retuns the byte at the given index. + + + + + Converts this into a string by applying the given encoding. + + + This method should only be used to convert binary data which was the result of encoding + text with the given encoding. + + The encoding to use to decode the binary data into text. + The result of decoding the binary data with the given decoding. + + + + Converts this into a string by applying the UTF-8 encoding. + + + This method should only be used to convert binary data which was the result of encoding + text with UTF-8. + + The result of decoding the binary data with the given decoding. + + + + Returns an iterator over the bytes in this . + + An iterator over the bytes in this object. + + + + Returns an iterator over the bytes in this . + + An iterator over the bytes in this object. + + + + Creates a CodedInputStream from this ByteString's data. + + + + + Compares two byte strings for equality. + + The first byte string to compare. + The second byte string to compare. + true if the byte strings are equal; false otherwise. + + + + Compares two byte strings for inequality. + + The first byte string to compare. + The second byte string to compare. + false if the byte strings are equal; true otherwise. + + + + Compares this byte string with another object. + + The object to compare this with. + true if refers to an equal ; false otherwise. + + + + Returns a hash code for this object. Two equal byte strings + will return the same hash code. + + A hash code for this object. + + + + Compares this byte string with another. + + The to compare this with. + true if refers to an equal byte string; false otherwise. + + + + Used internally by CodedOutputStream to avoid creating a copy for the write + + + + + Copies the entire byte array to the destination array provided at the offset specified. + + + + + Writes the entire byte array to the provided stream + + + + + Interface for a Protocol Buffers message, supporting + basic operations required for serialization. + + + + + Merges the data from the specified coded input stream with the current message. + + See the user guide for precise merge semantics. + + + + + Writes the data to the given coded output stream. + + Coded output stream to write the data to. Must not be null. + + + + Calculates the size of this message in Protocol Buffer wire format, in bytes. + + The number of bytes required to write this message + to a coded output stream. + + + + Descriptor for this message. All instances are expected to return the same descriptor, + and for generated types this will be an explicitly-implemented member, returning the + same value as the static property declared on the type. + + + + + Generic interface for a Protocol Buffers message, + where the type parameter is expected to be the same type as + the implementation class. + + The message type. + + + + Merges the given message into this one. + + See the user guide for precise merge semantics. + The message to merge with this one. Must not be null. + + + + This class is used internally by the Protocol Buffer Library and generated + message implementations. It is public only for the sake of those generated + messages. Others should not use this class directly. + + This class contains constants and helper functions useful for dealing with + the Protocol Buffer wire format. + + + + + + Wire types within protobuf encoding. + + + + + Variable-length integer. + + + + + A fixed-length 64-bit value. + + + + + A length-delimited value, i.e. a length followed by that many bytes of data. + + + + + A "start group" value - not supported by this implementation. + + + + + An "end group" value - not supported by this implementation. + + + + + A fixed-length 32-bit value. + + + + + Given a tag value, determines the wire type (lower 3 bits). + + + + + Given a tag value, determines the field number (the upper 29 bits). + + + + + Makes a tag value given a field number and wire type. + + + + + Provides a utility routine to copy small arrays much more quickly than Buffer.BlockCopy + + + + + The threshold above which you should use Buffer.BlockCopy rather than ByteArray.Copy + + + + + Determines which copy routine to use based on the number of bytes to be copied. + + + + + Reverses the order of bytes in the array + + + + + Reads and decodes protocol message fields. + + + + This class is generally used by generated code to read appropriate + primitives from the stream. It effectively encapsulates the lowest + levels of protocol buffer format. + + + Repeated fields and map fields are not handled by this class; use + and to serialize such fields. + + + + + + Whether to leave the underlying stream open when disposing of this stream. + This is always true when there's no stream. + + + + + Buffer of data read from the stream or provided at construction time. + + + + + The index of the buffer at which we need to refill from the stream (if there is one). + + + + + The position within the current buffer (i.e. the next byte to read) + + + + + The stream to read further input from, or null if the byte array buffer was provided + directly on construction, with no further data available. + + + + + The last tag we read. 0 indicates we've read to the end of the stream + (or haven't read anything yet). + + + + + The next tag, used to store the value read by PeekTag. + + + + + The total number of bytes read before the current buffer. The + total bytes read up to the current position can be computed as + totalBytesRetired + bufferPos. + + + + + The absolute position of the end of the current message. + + + + + Creates a new CodedInputStream reading data from the given byte array. + + + + + Creates a new that reads from the given byte array slice. + + + + + Creates a new reading data from the given stream, which will be disposed + when the returned object is disposed. + + The stream to read from. + + + + Creates a new reading data from the given stream. + + The stream to read from. + true to leave open when the returned + is disposed; false to dispose of the given stream when the + returned object is disposed. + + + + Creates a new CodedInputStream reading data from the given + stream and buffer, using the default limits. + + + + + Creates a new CodedInputStream reading data from the given + stream and buffer, using the specified limits. + + + This chains to the version with the default limits instead of vice versa to avoid + having to check that the default values are valid every time. + + + + + Creates a with the specified size and recursion limits, reading + from an input stream. + + + This method exists separately from the constructor to reduce the number of constructor overloads. + It is likely to be used considerably less frequently than the constructors, as the default limits + are suitable for most use cases. + + The input stream to read from + The total limit of data to read from the stream. + The maximum recursion depth to allow while reading. + A CodedInputStream reading from with the specified size + and recursion limits. + + + + Returns the current position in the input stream, or the position in the input buffer + + + + + Returns the last tag read, or 0 if no tags have been read or we've read beyond + the end of the stream. + + + + + Returns the size limit for this stream. + + + This limit is applied when reading from the underlying stream, as a sanity check. It is + not applied when reading from a byte array data source without an underlying stream. + The default value is 64MB. + + + The size limit. + + + + + Returns the recursion limit for this stream. This limit is applied whilst reading messages, + to avoid maliciously-recursive data. + + + The default limit is 64. + + + The recursion limit for this stream. + + + + + Disposes of this instance, potentially closing any underlying stream. + + + As there is no flushing to perform here, disposing of a which + was constructed with the leaveOpen option parameter set to true (or one which + was constructed to read from a byte array) has no effect. + + + + + Verifies that the last call to ReadTag() returned tag 0 - in other words, + we've reached the end of the stream when we expected to. + + The + tag read was not the one specified + + + + Peeks at the next field tag. This is like calling , but the + tag is not consumed. (So a subsequent call to will return the + same value.) + + + + + Reads a field tag, returning the tag of 0 for "end of stream". + + + If this method returns 0, it doesn't necessarily mean the end of all + the data in this CodedInputStream; it may be the end of the logical stream + for an embedded message, for example. + + The next field tag, or 0 for end of stream. (0 is never a valid tag.) + + + + Skips the data for the field with the tag we've just read. + This should be called directly after , when + the caller wishes to skip an unknown field. + + + This method throws if the last-read tag was an end-group tag. + If a caller wishes to skip a group, they should skip the whole group, by calling this method after reading the + start-group tag. This behavior allows callers to call this method on any field they don't understand, correctly + resulting in an error if an end-group tag has not been paired with an earlier start-group tag. + + The last tag was an end-group tag + The last read operation read to the end of the logical stream + + + + Reads a double field from the stream. + + + + + Reads a float field from the stream. + + + + + Reads a uint64 field from the stream. + + + + + Reads an int64 field from the stream. + + + + + Reads an int32 field from the stream. + + + + + Reads a fixed64 field from the stream. + + + + + Reads a fixed32 field from the stream. + + + + + Reads a bool field from the stream. + + + + + Reads a string field from the stream. + + + + + Reads an embedded message field value from the stream. + + + + + Reads a bytes field value from the stream. + + + + + Reads a uint32 field value from the stream. + + + + + Reads an enum field value from the stream. + + + + + Reads an sfixed32 field value from the stream. + + + + + Reads an sfixed64 field value from the stream. + + + + + Reads an sint32 field value from the stream. + + + + + Reads an sint64 field value from the stream. + + + + + Reads a length for length-delimited data. + + + This is internally just reading a varint, but this method exists + to make the calling code clearer. + + + + + Peeks at the next tag in the stream. If it matches , + the tag is consumed and the method returns true; otherwise, the + stream is left in the original position and the method returns false. + + + + + Same code as ReadRawVarint32, but read each byte individually, checking for + buffer overflow. + + + + + Reads a raw Varint from the stream. If larger than 32 bits, discard the upper bits. + This method is optimised for the case where we've got lots of data in the buffer. + That means we can check the size just once, then just read directly from the buffer + without constant rechecking of the buffer length. + + + + + Reads a varint from the input one byte at a time, so that it does not + read any bytes after the end of the varint. If you simply wrapped the + stream in a CodedInputStream and used ReadRawVarint32(Stream) + then you would probably end up reading past the end of the varint since + CodedInputStream buffers its input. + + + + + + + Reads a raw varint from the stream. + + + + + Reads a 32-bit little-endian integer from the stream. + + + + + Reads a 64-bit little-endian integer from the stream. + + + + + Decode a 32-bit value with ZigZag encoding. + + + ZigZag encodes signed integers into values that can be efficiently + encoded with varint. (Otherwise, negative values must be + sign-extended to 64 bits to be varint encoded, thus always taking + 10 bytes on the wire.) + + + + + Decode a 32-bit value with ZigZag encoding. + + + ZigZag encodes signed integers into values that can be efficiently + encoded with varint. (Otherwise, negative values must be + sign-extended to 64 bits to be varint encoded, thus always taking + 10 bytes on the wire.) + + + + + Sets currentLimit to (current position) + byteLimit. This is called + when descending into a length-delimited embedded message. The previous + limit is returned. + + The old limit. + + + + Discards the current limit, returning the previous limit. + + + + + Returns whether or not all the data before the limit has been read. + + + + + + Returns true if the stream has reached the end of the input. This is the + case if either the end of the underlying input source has been reached or + the stream has reached a limit created using PushLimit. + + + + + Called when buffer is empty to read more bytes from the + input. If is true, RefillBuffer() gurantees that + either there will be at least one byte in the buffer when it returns + or it will throw an exception. If is false, + RefillBuffer() returns false if no more bytes were available. + + + + + + + Read one byte from the input. + + + the end of the stream or the current limit was reached + + + + + Reads a fixed size of bytes from the input. + + + the end of the stream or the current limit was reached + + + + + Reads and discards bytes. + + the end of the stream + or the current limit was reached + + + + Abstraction of skipping to cope with streams which can't really skip. + + + + + A message type that has a custom string format for diagnostic purposes. + + + + Calling on a generated message type normally + returns the JSON representation. If a message type implements this interface, + then the method will be called instead of the regular + JSON formatting code, but only when ToString() is called either on the message itself + or on another message which contains it. This does not affect the normal JSON formatting of + the message. + + + For example, if you create a proto message representing a GUID, the internal + representation may be a bytes field or four fixed32 fields. However, when debugging + it may be more convenient to see a result in the same format as provides. + + This interface extends to avoid it accidentally being implemented + on types other than messages, where it would not be used by anything in the framework. + + + + + Returns a string representation of this object, for diagnostic purposes. + + + This method is called when a message is formatted as part of a + call. It does not affect the JSON representation used by other than + in calls to . While it is recommended + that the result is valid JSON, this is never assumed by the Protobuf library. + + A string representation of this object, for diagnostic purposes. + + + + Simple but strict JSON tokenizer, rigidly following RFC 7159. + + + + This tokenizer is stateful, and only returns "useful" tokens - names, values etc. + It does not create tokens for the separator between names and values, or for the comma + between values. It validates the token stream as it goes - so callers can assume that the + tokens it produces are appropriate. For example, it would never produce "start object, end array." + + Implementation details: the base class handles single token push-back and + Not thread-safe. + + + + + Creates a tokenizer that reads from the given text reader. + + + + + Creates a tokenizer that first replays the given list of tokens, then continues reading + from another tokenizer. Note that if the returned tokenizer is "pushed back", that does not push back + on the continuation tokenizer, or vice versa. Care should be taken when using this method - it was + created for the sake of Any parsing. + + + + + Returns the depth of the stack, purely in objects (not collections). + Informally, this is the number of remaining unclosed '{' characters we have. + + + + + Returns the next JSON token in the stream. An EndDocument token is returned to indicate the end of the stream, + after which point Next() should not be called again. + + This implementation provides single-token buffering, and calls if there is no buffered token. + The next token in the stream. This is never null. + This method is called after an EndDocument token has been returned + The input text does not comply with RFC 7159 + + + + Returns the next JSON token in the stream, when requested by the base class. (The method delegates + to this if it doesn't have a buffered token.) + + This method is called after an EndDocument token has been returned + The input text does not comply with RFC 7159 + + + + Tokenizer which first exhausts a list of tokens, then consults another tokenizer. + + + + + Tokenizer which does all the *real* work of parsing JSON. + + + + + This method essentially just loops through characters skipping whitespace, validating and + changing state (e.g. from ObjectBeforeColon to ObjectAfterColon) + until it reaches something which will be a genuine token (e.g. a start object, or a value) at which point + it returns the token. Although the method is large, it would be relatively hard to break down further... most + of it is the large switch statement, which sometimes returns and sometimes doesn't. + + + + + Reads a string token. It is assumed that the opening " has already been read. + + + + + Reads an escaped character. It is assumed that the leading backslash has already been read. + + + + + Reads an escaped Unicode 4-nybble hex sequence. It is assumed that the leading \u has already been read. + + + + + Consumes a text-only literal, throwing an exception if the read text doesn't match it. + It is assumed that the first letter of the literal has already been read. + + + + + Validates that we're in a valid state to read a value (using the given error prefix if necessary) + and changes the state to the appropriate one, e.g. ObjectAfterColon to ObjectAfterProperty. + + + + + Pops the top-most container, and sets the state to the appropriate one for the end of a value + in the parent container. + + + + + Possible states of the tokenizer. + + + This is a flags enum purely so we can simply and efficiently represent a set of valid states + for checking. + + Each is documented with an example, + where ^ represents the current position within the text stream. The examples all use string values, + but could be any value, including nested objects/arrays. + The complete state of the tokenizer also includes a stack to indicate the contexts (arrays/objects). + Any additional notional state of "AfterValue" indicates that a value has been completed, at which + point there's an immediate transition to ExpectedEndOfDocument, ObjectAfterProperty or ArrayAfterValue. + + + These states were derived manually by reading RFC 7159 carefully. + + + + + + ^ { "foo": "bar" } + Before the value in a document. Next states: ObjectStart, ArrayStart, "AfterValue" + + + + + { "foo": "bar" } ^ + After the value in a document. Next states: ReaderExhausted + + + + + { "foo": "bar" } ^ (and already read to the end of the reader) + Terminal state. + + + + + { ^ "foo": "bar" } + Before the *first* property in an object. + Next states: + "AfterValue" (empty object) + ObjectBeforeColon (read a name) + + + + + { "foo" ^ : "bar", "x": "y" } + Next state: ObjectAfterColon + + + + + { "foo" : ^ "bar", "x": "y" } + Before any property other than the first in an object. + (Equivalently: after any property in an object) + Next states: + "AfterValue" (value is simple) + ObjectStart (value is object) + ArrayStart (value is array) + + + + + { "foo" : "bar" ^ , "x" : "y" } + At the end of a property, so expecting either a comma or end-of-object + Next states: ObjectAfterComma or "AfterValue" + + + + + { "foo":"bar", ^ "x":"y" } + Read the comma after the previous property, so expecting another property. + This is like ObjectStart, but closing brace isn't valid here + Next state: ObjectBeforeColon. + + + + + [ ^ "foo", "bar" ] + Before the *first* value in an array. + Next states: + "AfterValue" (read a value) + "AfterValue" (end of array; will pop stack) + + + + + [ "foo" ^ , "bar" ] + After any value in an array, so expecting either a comma or end-of-array + Next states: ArrayAfterComma or "AfterValue" + + + + + [ "foo", ^ "bar" ] + After a comma in an array, so there *must* be another value (simple or complex). + Next states: "AfterValue" (simple value), StartObject, StartArray + + + + + Wrapper around a text reader allowing small amounts of buffering and location handling. + + + + + The buffered next character, if we have one. + + + + + Returns the next character in the stream, or null if we have reached the end. + + + + + + Creates a new exception appropriate for the current state of the reader. + + + + + Factory methods for . + + + + + Retrieves a codec suitable for a string field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a bytes field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a bool field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an int32 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an sint32 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a fixed32 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an sfixed32 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a uint32 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an int64 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an sint64 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a fixed64 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an sfixed64 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a uint64 field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a float field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for a double field with the given tag. + + The tag. + A codec for the given tag. + + + + Retrieves a codec suitable for an enum field with the given tag. + + The tag. + A conversion function from to the enum type. + A conversion function from the enum type to . + A codec for the given tag. + + + + Retrieves a codec suitable for a message field with the given tag. + + The tag. + A parser to use for the message type. + A codec for the given tag. + + + + Creates a codec for a wrapper type of a class - which must be string or ByteString. + + + + + Creates a codec for a wrapper type of a struct - which must be Int32, Int64, UInt32, UInt64, + Bool, Single or Double. + + + + + Helper code to create codecs for wrapper types. + + + Somewhat ugly with all the static methods, but the conversions involved to/from nullable types make it + slightly tricky to improve. So long as we keep the public API (ForClassWrapper, ForStructWrapper) in place, + we can refactor later if we come up with something cleaner. + + + + + Returns a field codec which effectively wraps a value of type T in a message. + + + + + + + An encode/decode pair for a single field. This effectively encapsulates + all the information needed to read or write the field value from/to a coded + stream. + + + This class is public and has to be as it is used by generated code, but its public + API is very limited - just what the generated code needs to call directly. + + + + This never writes default values to the stream, and does not address "packedness" + in repeated fields itself, other than to know whether or not the field *should* be packed. + + + + + Returns a delegate to write a value (unconditionally) to a coded output stream. + + + + + Returns the size calculator for just a value. + + + + + Returns a delegate to read a value from a coded input stream. It is assumed that + the stream is already positioned on the appropriate tag. + + + + + Returns the fixed size for an entry, or 0 if sizes vary. + + + + + Gets the tag of the codec. + + + The tag of the codec. + + + + + Default value for this codec. Usually the same for every instance of the same type, but + for string/ByteString wrapper fields the codec's default value is null, whereas for + other string/ByteString fields it's "" or ByteString.Empty. + + + The default value of the codec's type. + + + + + Write a tag and the given value, *if* the value is not the default. + + + + + Reads a value of the codec type from the given . + + The input stream to read from. + The value read from the stream. + + + + Calculates the size required to write the given value, with a tag, + if the value is not the default. + + + + + Thrown when a protocol message being parsed is invalid in some way, + e.g. it contains a malformed varint or a negative byte length. + + + + + Creates an exception for an error condition of an invalid tag being encountered. + + + + + Generic interface for a deeply cloneable type. + + + + All generated messages implement this interface, but so do some non-message types. + Additionally, due to the type constraint on T in , + it is simpler to keep this as a separate interface. + + + The type itself, returned by the method. + + + + Creates a deep clone of this object. + + A deep clone of this object. + + + + Extension methods on and . + + + + + Merges data from the given byte array into an existing message. + + The message to merge the data into. + The data to merge, which must be protobuf-encoded binary data. + + + + Merges data from the given byte string into an existing message. + + The message to merge the data into. + The data to merge, which must be protobuf-encoded binary data. + + + + Merges data from the given stream into an existing message. + + The message to merge the data into. + Stream containing the data to merge, which must be protobuf-encoded binary data. + + + + Merges length-delimited data from the given stream into an existing message. + + + The stream is expected to contain a length and then the data. Only the amount of data + specified by the length will be consumed. + + The message to merge the data into. + Stream containing the data to merge, which must be protobuf-encoded binary data. + + + + Converts the given message into a byte array in protobuf encoding. + + The message to convert. + The message data as a byte array. + + + + Writes the given message data to the given stream in protobuf encoding. + + The message to write to the stream. + The stream to write to. + + + + Writes the length and then data of the given message to a stream. + + The message to write. + The output stream to write to. + + + + Converts the given message into a byte string in protobuf encoding. + + The message to convert. + The message data as a byte string. + + + + Reflection-based converter from JSON to messages. + + + + Instances of this class are thread-safe, with no mutable state. + + + This is a simple start to get JSON parsing working. As it's reflection-based, + it's not as quick as baking calls into generated messages - but is a simpler implementation. + (This code is generally not heavily optimized.) + + + + + + Returns a formatter using the default settings. + + + + + Creates a new formatted with the given settings. + + The settings. + + + + Parses and merges the information into the given message. + + The message to merge the JSON information into. + The JSON to parse. + + + + Parses JSON read from and merges the information into the given message. + + The message to merge the JSON information into. + Reader providing the JSON to parse. + + + + Merges the given message using data from the given tokenizer. In most cases, the next + token should be a "start object" token, but wrapper types and nullity can invalidate + that assumption. This is implemented as an LL(1) recursive descent parser over the stream + of tokens provided by the tokenizer. This token stream is assumed to be valid JSON, with the + tokenizer performing that validation - but not every token stream is valid "protobuf JSON". + + + + + Parses into a new message. + + The type of message to create. + The JSON to parse. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + Parses JSON read from into a new message. + + The type of message to create. + Reader providing the JSON to parse. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + Parses into a new message. + + The JSON to parse. + Descriptor of message type to parse. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + Parses JSON read from into a new message. + + Reader providing the JSON to parse. + Descriptor of message type to parse. + The JSON does not comply with RFC 7159 + The JSON does not represent a Protocol Buffers message correctly + + + + Creates a new instance of the message type for the given field. + + + + + Checks that any infinite/NaN values originated from the correct text. + This corrects the lenient whitespace handling of double.Parse/float.Parse, as well as the + way that Mono parses out-of-range values as infinity. + + + + + Settings controlling JSON parsing. + + + + + Default settings, as used by . This has the same default + recursion limit as , and an empty type registry. + + + + + The maximum depth of messages to parse. Note that this limit only applies to parsing + messages, not collections - so a message within a collection within a message only counts as + depth 2, not 3. + + + + + The type registry used to parse messages. + + + + + Creates a new object with the specified recursion limit. + + The maximum depth of messages to parse + + + + Creates a new object with the specified recursion limit and type registry. + + The maximum depth of messages to parse + The type registry used to parse messages + + + + Helper methods for throwing exceptions when preconditions are not met. + + + This class is used internally and by generated code; it is not particularly + expected to be used from application code, although nothing prevents it + from being used that way. + + + + + Throws an ArgumentNullException if the given value is null, otherwise + return the value to the caller. + + + + + Throws an ArgumentNullException if the given value is null, otherwise + return the value to the caller. + + + This is equivalent to but without the type parameter + constraint. In most cases, the constraint is useful to prevent you from calling CheckNotNull + with a value type - but it gets in the way if either you want to use it with a nullable + value type, or you want to use it with an unconstrained type parameter. + + + + + Reflection-based converter from messages to JSON. + + + + Instances of this class are thread-safe, with no mutable state. + + + This is a simple start to get JSON formatting working. As it's reflection-based, + it's not as quick as baking calls into generated messages - but is a simpler implementation. + (This code is generally not heavily optimized.) + + + + + + Returns a formatter using the default settings. + + + + + The JSON representation of the first 160 characters of Unicode. + Empty strings are replaced by the static constructor. + + + + + Creates a new formatted with the given settings. + + The settings. + + + + Formats the specified message as JSON. + + The message to format. + The formatted message. + + + + Formats the specified message as JSON. + + The message to format. + The TextWriter to write the formatted message to. + The formatted message. + + + + Converts a message to JSON for diagnostic purposes with no extra context. + + + + This differs from calling on the default JSON + formatter in its handling of . As no type registry is available + in calls, the normal way of resolving the type of + an Any message cannot be applied. Instead, a JSON property named @value + is included with the base64 data from the property of the message. + + The value returned by this method is only designed to be used for diagnostic + purposes. It may not be parsable by , and may not be parsable + by other Protocol Buffer implementations. + + The message to format for diagnostic purposes. + The diagnostic-only JSON representation of the message + + + + Camel-case converter with added strictness for field mask formatting. + + The field mask is invalid for JSON representation + + + + Writes a single value to the given writer as JSON. Only types understood by + Protocol Buffers can be written in this way. This method is only exposed for + advanced use cases; most users should be using + or . + + The writer to write the value to. Must not be null. + The value to write. May be null. + + + + Central interception point for well-known type formatting. Any well-known types which + don't need special handling can fall back to WriteMessage. We avoid assuming that the + values are using the embedded well-known types, in order to allow for dynamic messages + in the future. + + + + + Writes a string (including leading and trailing double quotes) to a builder, escaping as required. + + + Other than surrogate pair handling, this code is mostly taken from src/google/protobuf/util/internal/json_escaping.cc. + + + + + Settings controlling JSON formatting. + + + + + Default settings, as used by + + + + + Whether fields whose values are the default for the field type (e.g. 0 for integers) + should be formatted (true) or omitted (false). + + + + + The type registry used to format messages. + + + + + Creates a new object with the specified formatting of default values + and an empty type registry. + + true if default values (0, empty strings etc) should be formatted; false otherwise. + + + + Creates a new object with the specified formatting of default values + and type registry. + + true if default values (0, empty strings etc) should be formatted; false otherwise. + The to use when formatting messages. + + + + Accessor for map fields. + + + + + Describes a single method in a service. + + + + + The service this method belongs to. + + + + + The method's input type. + + + + + The method's input type. + + + + + Indicates if client streams multiple requests. + + + + + Indicates if server streams multiple responses. + + + + + The brief name of the descriptor's target. + + + + + Describes a field within a message. + + + + Field number for the "name" field. + + + Field number for the "number" field. + + + Field number for the "label" field. + + + Field number for the "type" field. + + + + If type_name is set, this need not be set. If both this and type_name + are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + + + + Field number for the "type_name" field. + + + + For message and enum types, this is the name of the type. If the name + starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + rules are used to find the type (i.e. first the nested types within this + message are searched, then within the parent, on up to the root + namespace). + + + + Field number for the "extendee" field. + + + + For extensions, this is the name of the type being extended. It is + resolved in the same manner as type_name. + + + + Field number for the "default_value" field. + + + + For numeric types, contains the original text representation of the value. + For booleans, "true" or "false". + For strings, contains the default text contents (not escaped in any way). + For bytes, contains the C escaped value. All bytes >= 128 are escaped. + TODO(kenton): Base-64 encode? + + + + Field number for the "oneof_index" field. + + + + If set, gives the index of a oneof in the containing type's oneof_decl + list. This field is a member of that oneof. + + + + Field number for the "json_name" field. + + + + JSON name of this field. The value is set by protocol compiler. If the + user has set a "json_name" option on this field, that option's value + will be used. Otherwise, it's deduced from the field's name by converting + it to camelCase. + + + + Field number for the "options" field. + + + Container for nested types declared in the FieldDescriptorProto message type. + + + + 0 is reserved for errors. + Order is weird for historical reasons. + + + + + Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + negative values are likely. + + + + + Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + negative values are likely. + + + + + Tag-delimited aggregate. + + + + + Length-delimited aggregate. + + + + + New in version 2. + + + + + Uses ZigZag encoding. + + + + + Uses ZigZag encoding. + + + + + 0 is reserved for errors + + + + + TODO(sanjay): Should we add LABEL_MAP? + + + + Field number for the "ctype" field. + + + + The ctype option instructs the C++ code generator to use a different + representation of the field than it normally would. See the specific + options below. This option is not yet implemented in the open source + release -- sorry, we'll try to include it in a future version! + + + + Field number for the "packed" field. + + + + The packed option can be enabled for repeated primitive fields to enable + a more efficient representation on the wire. Rather than repeatedly + writing the tag and type for each element, the entire array is encoded as + a single length-delimited blob. In proto3, only explicit setting it to + false will avoid using packed encoding. + + + + Field number for the "jstype" field. + + + + The jstype option determines the JavaScript type used for values of the + field. The option is permitted only for 64 bit integral and fixed types + (int64, uint64, sint64, fixed64, sfixed64). By default these types are + represented as JavaScript strings. This avoids loss of precision that can + happen when a large value is converted to a floating point JavaScript + numbers. Specifying JS_NUMBER for the jstype causes the generated + JavaScript code to use the JavaScript "number" type instead of strings. + This option is an enum to permit additional types to be added, + e.g. goog.math.Integer. + + + + Field number for the "lazy" field. + + + + Should this field be parsed lazily? Lazy applies only to message-type + fields. It means that when the outer message is initially parsed, the + inner message's contents will not be parsed but instead stored in encoded + form. The inner message will actually be parsed when it is first accessed. + + This is only a hint. Implementations are free to choose whether to use + eager or lazy parsing regardless of the value of this option. However, + setting this option true suggests that the protocol author believes that + using lazy parsing on this field is worth the additional bookkeeping + overhead typically needed to implement it. + + This option does not affect the public interface of any generated code; + all method signatures remain the same. Furthermore, thread-safety of the + interface is not affected by this option; const methods remain safe to + call from multiple threads concurrently, while non-const methods continue + to require exclusive access. + + Note that implementations may choose not to check required fields within + a lazy sub-message. That is, calling IsInitialized() on the outher message + may return true even if the inner message has missing required fields. + This is necessary because otherwise the inner message would have to be + parsed in order to perform the check, defeating the purpose of lazy + parsing. An implementation which chooses not to check required fields + must be consistent about it. That is, for any particular sub-message, the + implementation must either *always* check its required fields, or *never* + check its required fields, regardless of whether or not the message has + been parsed. + + + + Field number for the "deprecated" field. + + + + Is this field deprecated? + Depending on the target platform, this can emit Deprecated annotations + for accessors, or it will be completely ignored; in the very least, this + is a formalization for deprecating fields. + + + + Field number for the "weak" field. + + + + For Google-internal migration only. Do not use. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Container for nested types declared in the FieldOptions message type. + + + + Default mode. + + + + + Use the default type. + + + + + Use JavaScript strings. + + + + + Use JavaScript numbers. + + + + + Describes a service type. + + + + + The brief name of the descriptor's target. + + + + + An unmodifiable list of methods in this service. + + + + + Finds a method by name. + + The unqualified name of the method (e.g. "Foo"). + The method's decsriptor, or null if not found. + + + + An immutable registry of types which can be looked up by their full name. + + + + + An empty type registry, containing no types. + + + + + Attempts to find a message descriptor by its full name. + + The full name of the message, which is the dot-separated + combination of package, containing messages and message name + The message descriptor corresponding to or null + if there is no such message descriptor. + + + + Creates a type registry from the specified set of file descriptors. + + + This is a convenience overload for + to allow calls such as TypeRegistry.FromFiles(descriptor1, descriptor2). + + The set of files to include in the registry. Must not contain null values. + A type registry for the given files. + + + + Creates a type registry from the specified set of file descriptors. + + + All message types within all the specified files are added to the registry, and + the dependencies of the specified files are also added, recursively. + + The set of files to include in the registry. Must not contain null values. + A type registry for the given files. + + + + Creates a type registry from the file descriptor parents of the specified set of message descriptors. + + + This is a convenience overload for + to allow calls such as TypeRegistry.FromFiles(descriptor1, descriptor2). + + The set of message descriptors to use to identify file descriptors to include in the registry. + Must not contain null values. + A type registry for the given files. + + + + Creates a type registry from the file descriptor parents of the specified set of message descriptors. + + + The specified message descriptors are only used to identify their file descriptors; the returned registry + contains all the types within the file descriptors which contain the specified message descriptors (and + the dependencies of those files), not just the specified messages. + + The set of message descriptors to use to identify file descriptors to include in the registry. + Must not contain null values. + A type registry for the given files. + + + + Builder class which isn't exposed, but acts as a convenient alternative to passing round two dictionaries in recursive calls. + + + + Holder for reflection information generated from google/protobuf/descriptor.proto + + + File descriptor for google/protobuf/descriptor.proto + + + + The protocol compiler can output a FileDescriptorSet containing the .proto + files it parses. + + + + Field number for the "file" field. + + + + Describes a complete .proto file. + + + + Field number for the "name" field. + + + + file name, relative to root of source tree + + + + Field number for the "package" field. + + + + e.g. "foo", "foo.bar", etc. + + + + Field number for the "dependency" field. + + + + Names of files imported by this file. + + + + Field number for the "public_dependency" field. + + + + Indexes of the public imported files in the dependency list above. + + + + Field number for the "weak_dependency" field. + + + + Indexes of the weak imported files in the dependency list. + For Google-internal migration only. Do not use. + + + + Field number for the "message_type" field. + + + + All top-level definitions in this file. + + + + Field number for the "enum_type" field. + + + Field number for the "service" field. + + + Field number for the "extension" field. + + + Field number for the "options" field. + + + Field number for the "source_code_info" field. + + + + This field contains optional information about the original source code. + You may safely remove this entire field without harming runtime + functionality of the descriptors -- the information is needed only by + development tools. + + + + Field number for the "syntax" field. + + + + The syntax of the proto file. + The supported values are "proto2" and "proto3". + + + + + Describes a message type. + + + + Field number for the "name" field. + + + Field number for the "field" field. + + + Field number for the "extension" field. + + + Field number for the "nested_type" field. + + + Field number for the "enum_type" field. + + + Field number for the "extension_range" field. + + + Field number for the "oneof_decl" field. + + + Field number for the "options" field. + + + Field number for the "reserved_range" field. + + + Field number for the "reserved_name" field. + + + + Reserved field names, which may not be used by fields in the same message. + A given name may only be reserved once. + + + + Container for nested types declared in the DescriptorProto message type. + + + Field number for the "start" field. + + + Field number for the "end" field. + + + + Range of reserved tag numbers. Reserved tag numbers may not be used by + fields or extension ranges in the same message. Reserved ranges may + not overlap. + + + + Field number for the "start" field. + + + + Inclusive. + + + + Field number for the "end" field. + + + + Exclusive. + + + + + Describes a oneof. + + + + Field number for the "name" field. + + + Field number for the "options" field. + + + + Describes an enum type. + + + + Field number for the "name" field. + + + Field number for the "value" field. + + + Field number for the "options" field. + + + + Describes a value within an enum. + + + + Field number for the "name" field. + + + Field number for the "number" field. + + + Field number for the "options" field. + + + + Describes a service. + + + + Field number for the "name" field. + + + Field number for the "method" field. + + + Field number for the "options" field. + + + + Describes a method of a service. + + + + Field number for the "name" field. + + + Field number for the "input_type" field. + + + + Input and output type names. These are resolved in the same way as + FieldDescriptorProto.type_name, but must refer to a message type. + + + + Field number for the "output_type" field. + + + Field number for the "options" field. + + + Field number for the "client_streaming" field. + + + + Identifies if client streams multiple client messages + + + + Field number for the "server_streaming" field. + + + + Identifies if server streams multiple server messages + + + + Field number for the "java_package" field. + + + + Sets the Java package where classes generated from this .proto will be + placed. By default, the proto package is used, but this is often + inappropriate because proto packages do not normally start with backwards + domain names. + + + + Field number for the "java_outer_classname" field. + + + + If set, all the classes from the .proto file are wrapped in a single + outer class with the given name. This applies to both Proto1 + (equivalent to the old "--one_java_file" option) and Proto2 (where + a .proto always translates to a single class, but you may want to + explicitly choose the class name). + + + + Field number for the "java_multiple_files" field. + + + + If set true, then the Java code generator will generate a separate .java + file for each top-level message, enum, and service defined in the .proto + file. Thus, these types will *not* be nested inside the outer class + named by java_outer_classname. However, the outer class will still be + generated to contain the file's getDescriptor() method as well as any + top-level extensions defined in the file. + + + + Field number for the "java_generate_equals_and_hash" field. + + + + If set true, then the Java code generator will generate equals() and + hashCode() methods for all messages defined in the .proto file. + This increases generated code size, potentially substantially for large + protos, which may harm a memory-constrained application. + - In the full runtime this is a speed optimization, as the + AbstractMessage base class includes reflection-based implementations of + these methods. + - In the lite runtime, setting this option changes the semantics of + equals() and hashCode() to more closely match those of the full runtime; + the generated methods compute their results based on field values rather + than object identity. (Implementations should not assume that hashcodes + will be consistent across runtimes or versions of the protocol compiler.) + + + + Field number for the "java_string_check_utf8" field. + + + + If set true, then the Java2 code generator will generate code that + throws an exception whenever an attempt is made to assign a non-UTF-8 + byte sequence to a string field. + Message reflection will do the same. + However, an extension field still accepts non-UTF-8 byte sequences. + This option has no effect on when used with the lite runtime. + + + + Field number for the "optimize_for" field. + + + Field number for the "go_package" field. + + + + Sets the Go package where structs generated from this .proto will be + placed. If omitted, the Go package will be derived from the following: + - The basename of the package import path, if provided. + - Otherwise, the package statement in the .proto file, if present. + - Otherwise, the basename of the .proto file, without extension. + + + + Field number for the "cc_generic_services" field. + + + + Should generic services be generated in each language? "Generic" services + are not specific to any particular RPC system. They are generated by the + main code generators in each language (without additional plugins). + Generic services were the only kind of service generation supported by + early versions of google.protobuf. + + Generic services are now considered deprecated in favor of using plugins + that generate code specific to your particular RPC system. Therefore, + these default to false. Old code which depends on generic services should + explicitly set them to true. + + + + Field number for the "java_generic_services" field. + + + Field number for the "py_generic_services" field. + + + Field number for the "deprecated" field. + + + + Is this file deprecated? + Depending on the target platform, this can emit Deprecated annotations + for everything in the file, or it will be completely ignored; in the very + least, this is a formalization for deprecating files. + + + + Field number for the "cc_enable_arenas" field. + + + + Enables the use of arenas for the proto messages in this file. This applies + only to generated classes for C++. + + + + Field number for the "objc_class_prefix" field. + + + + Sets the objective c class prefix which is prepended to all objective c + generated classes from this .proto. There is no default. + + + + Field number for the "csharp_namespace" field. + + + + Namespace for generated classes; defaults to the package. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Container for nested types declared in the FileOptions message type. + + + + Generated classes can be optimized for speed or code size. + + + + + Generate complete code for parsing, serialization, + + + + + etc. + + + + + Generate code using MessageLite and the lite runtime. + + + + Field number for the "message_set_wire_format" field. + + + + Set true to use the old proto1 MessageSet wire format for extensions. + This is provided for backwards-compatibility with the MessageSet wire + format. You should not use this for any other reason: It's less + efficient, has fewer features, and is more complicated. + + The message must be defined exactly as follows: + message Foo { + option message_set_wire_format = true; + extensions 4 to max; + } + Note that the message cannot have any defined fields; MessageSets only + have extensions. + + All extensions of your type must be singular messages; e.g. they cannot + be int32s, enums, or repeated messages. + + Because this is an option, the above two restrictions are not enforced by + the protocol compiler. + + + + Field number for the "no_standard_descriptor_accessor" field. + + + + Disables the generation of the standard "descriptor()" accessor, which can + conflict with a field of the same name. This is meant to make migration + from proto1 easier; new code should avoid fields named "descriptor". + + + + Field number for the "deprecated" field. + + + + Is this message deprecated? + Depending on the target platform, this can emit Deprecated annotations + for the message, or it will be completely ignored; in the very least, + this is a formalization for deprecating messages. + + + + Field number for the "map_entry" field. + + + + Whether the message is an automatically generated map entry type for the + maps field. + + For maps fields: + map<KeyType, ValueType> map_field = 1; + The parsed descriptor looks like: + message MapFieldEntry { + option map_entry = true; + optional KeyType key = 1; + optional ValueType value = 2; + } + repeated MapFieldEntry map_field = 1; + + Implementations may choose not to generate the map_entry=true message, but + use a native map in the target language to hold the keys and values. + The reflection APIs in such implementions still need to work as + if the field is a repeated message field. + + NOTE: Do not set the option in .proto files. Always use the maps syntax + instead. The option should only be implicitly set by the proto compiler + parser. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Field number for the "allow_alias" field. + + + + Set this option to true to allow mapping different tag names to the same + value. + + + + Field number for the "deprecated" field. + + + + Is this enum deprecated? + Depending on the target platform, this can emit Deprecated annotations + for the enum, or it will be completely ignored; in the very least, this + is a formalization for deprecating enums. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Field number for the "deprecated" field. + + + + Is this enum value deprecated? + Depending on the target platform, this can emit Deprecated annotations + for the enum value, or it will be completely ignored; in the very least, + this is a formalization for deprecating enum values. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Field number for the "deprecated" field. + + + + Is this service deprecated? + Depending on the target platform, this can emit Deprecated annotations + for the service, or it will be completely ignored; in the very least, + this is a formalization for deprecating services. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + Field number for the "deprecated" field. + + + + Is this method deprecated? + Depending on the target platform, this can emit Deprecated annotations + for the method, or it will be completely ignored; in the very least, + this is a formalization for deprecating methods. + + + + Field number for the "uninterpreted_option" field. + + + + The parser stores options it doesn't recognize here. See above. + + + + + A message representing a option the parser does not recognize. This only + appears in options protos created by the compiler::Parser class. + DescriptorPool resolves these when building Descriptor objects. Therefore, + options protos in descriptor objects (e.g. returned by Descriptor::options(), + or produced by Descriptor::CopyTo()) will never have UninterpretedOptions + in them. + + + + Field number for the "name" field. + + + Field number for the "identifier_value" field. + + + + The value of the uninterpreted option, in whatever type the tokenizer + identified it as during parsing. Exactly one of these should be set. + + + + Field number for the "positive_int_value" field. + + + Field number for the "negative_int_value" field. + + + Field number for the "double_value" field. + + + Field number for the "string_value" field. + + + Field number for the "aggregate_value" field. + + + Container for nested types declared in the UninterpretedOption message type. + + + + The name of the uninterpreted option. Each string represents a segment in + a dot-separated name. is_extension is true iff a segment represents an + extension (denoted with parentheses in options specs in .proto files). + E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + "foo.(bar.baz).qux". + + + + Field number for the "name_part" field. + + + Field number for the "is_extension" field. + + + + Encapsulates information about the original source file from which a + FileDescriptorProto was generated. + + + + Field number for the "location" field. + + + + A Location identifies a piece of source code in a .proto file which + corresponds to a particular definition. This information is intended + to be useful to IDEs, code indexers, documentation generators, and similar + tools. + + For example, say we have a file like: + message Foo { + optional string foo = 1; + } + Let's look at just the field definition: + optional string foo = 1; + ^ ^^ ^^ ^ ^^^ + a bc de f ghi + We have the following locations: + span path represents + [a,i) [ 4, 0, 2, 0 ] The whole field definition. + [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + + Notes: + - A location may refer to a repeated field itself (i.e. not to any + particular index within it). This is used whenever a set of elements are + logically enclosed in a single code segment. For example, an entire + extend block (possibly containing multiple extension definitions) will + have an outer location whose path refers to the "extensions" repeated + field without an index. + - Multiple locations may have the same path. This happens when a single + logical declaration is spread out across multiple places. The most + obvious example is the "extend" block again -- there may be multiple + extend blocks in the same scope, each of which will have the same path. + - A location's span is not always a subset of its parent's span. For + example, the "extendee" of an extension declaration appears at the + beginning of the "extend" block and is shared by all extensions within + the block. + - Just because a location's span is a subset of some other location's span + does not mean that it is a descendent. For example, a "group" defines + both a type and a field in a single declaration. Thus, the locations + corresponding to the type and field and their components will overlap. + - Code which tries to interpret locations should probably be designed to + ignore those that it doesn't understand, as more types of locations could + be recorded in the future. + + + + Container for nested types declared in the SourceCodeInfo message type. + + + Field number for the "path" field. + + + + Identifies which part of the FileDescriptorProto was defined at this + location. + + Each element is a field number or an index. They form a path from + the root FileDescriptorProto to the place where the definition. For + example, this path: + [ 4, 3, 2, 7, 1 ] + refers to: + file.message_type(3) // 4, 3 + .field(7) // 2, 7 + .name() // 1 + This is because FileDescriptorProto.message_type has field number 4: + repeated DescriptorProto message_type = 4; + and DescriptorProto.field has field number 2: + repeated FieldDescriptorProto field = 2; + and FieldDescriptorProto.name has field number 1: + optional string name = 1; + + Thus, the above path gives the location of a field name. If we removed + the last element: + [ 4, 3, 2, 7 ] + this path refers to the whole field declaration (from the beginning + of the label to the terminating semicolon). + + + + Field number for the "span" field. + + + + Always has exactly three or four elements: start line, start column, + end line (optional, otherwise assumed same as start line), end column. + These are packed into a single field for efficiency. Note that line + and column numbers are zero-based -- typically you will want to add + 1 to each before displaying to a user. + + + + Field number for the "leading_comments" field. + + + + If this SourceCodeInfo represents a complete declaration, these are any + comments appearing before and after the declaration which appear to be + attached to the declaration. + + A series of line comments appearing on consecutive lines, with no other + tokens appearing on those lines, will be treated as a single comment. + + leading_detached_comments will keep paragraphs of comments that appear + before (but not connected to) the current element. Each paragraph, + separated by empty lines, will be one comment element in the repeated + field. + + Only the comment content is provided; comment markers (e.g. //) are + stripped out. For block comments, leading whitespace and an asterisk + will be stripped from the beginning of each line other than the first. + Newlines are included in the output. + + Examples: + + optional int32 foo = 1; // Comment attached to foo. + // Comment attached to bar. + optional int32 bar = 2; + + optional string baz = 3; + // Comment attached to baz. + // Another line attached to baz. + + // Comment attached to qux. + // + // Another line attached to qux. + optional double qux = 4; + + // Detached comment for corge. This is not leading or trailing comments + // to qux or corge because there are blank lines separating it from + // both. + + // Detached comment for corge paragraph 2. + + optional string corge = 5; + /* Block comment attached + * to corge. Leading asterisks + * will be removed. */ + /* Block comment attached to + * grault. */ + optional int32 grault = 6; + + // ignored detached comments. + + + + Field number for the "trailing_comments" field. + + + Field number for the "leading_detached_comments" field. + + + + Describes the relationship between generated code and its original source + file. A GeneratedCodeInfo message is associated with only one generated + source file, but may contain references to different source .proto files. + + + + Field number for the "annotation" field. + + + + An Annotation connects some span of text in generated code to an element + of its generating .proto file. + + + + Container for nested types declared in the GeneratedCodeInfo message type. + + + Field number for the "path" field. + + + + Identifies the element in the original source .proto file. This field + is formatted the same as SourceCodeInfo.Location.path. + + + + Field number for the "source_file" field. + + + + Identifies the filesystem path to the original source .proto. + + + + Field number for the "begin" field. + + + + Identifies the starting offset in bytes in the generated code + that relates to the identified object. + + + + Field number for the "end" field. + + + + Identifies the ending offset in bytes in the generated code that + relates to the identified offset. The end offset should be one past + the last relevant byte (so the length of the text = end - begin). + + + + + Interface implemented by all descriptor types. + + + + + Returns the name of the entity (message, field etc) being described. + + + + + Returns the fully-qualified name of the entity being described. + + + + + Returns the descriptor for the .proto file that this entity is part of. + + + + + Thrown when building descriptors fails because the source DescriptorProtos + are not valid. + + + + + The full name of the descriptor where the error occurred. + + + + + A human-readable description of the error. (The Message property + is made up of the descriptor's name and this description.) + + + + + Descriptor for a field or extension within a message in a .proto file. + + + + + Get the field's containing message type. + + + + + Returns the oneof containing this field, or null if it is not part of a oneof. + + + + + The effective JSON name for this field. This is usually the lower-camel-cased form of the field name, + but can be overridden using the json_name option in the .proto file. + + + + + The brief name of the descriptor's target. + + + + + Returns the accessor for this field. + + + + While a describes the field, it does not provide + any way of obtaining or changing the value of the field within a specific message; + that is the responsibility of the accessor. + + + The value returned by this property will be non-null for all regular fields. However, + if a message containing a map field is introspected, the list of nested messages will include + an auto-generated nested key/value pair message for the field. This is not represented in any + generated type, and the value of the map field itself is represented by a dictionary in the + reflection API. There are never instances of those "hidden" messages, so no accessor is provided + and this property will return null. + + + + + + Maps a field type as included in the .proto file to a FieldType. + + + + + Returns true if this field is a repeated field; false otherwise. + + + + + Returns true if this field is a map field; false otherwise. + + + + + Returns true if this field is a packed, repeated field; false otherwise. + + + + + Returns the type of the field. + + + + + Returns the field number declared in the proto file. + + + + + Compares this descriptor with another one, ordering in "canonical" order + which simply means ascending order by field number. + must be a field of the same type, i.e. the of + both fields must be the same. + + + + + For enum fields, returns the field's type. + + + + + For embedded message and group fields, returns the field's type. + + + + + Look up and cross-link all field types etc. + + + + + Accessor for repeated fields. + + + + + Reflection access for a oneof, allowing clear and "get case" actions. + + + + + Gets the descriptor for this oneof. + + + The descriptor of the oneof. + + + + + Clears the oneof in the specified message. + + + + + Indicates which field in the oneof is set for specified message + + + + + Descriptor for an enum type in a .proto file. + + + + + The brief name of the descriptor's target. + + + + + The CLR type for this enum. For generated code, this will be a CLR enum type. + + + + + If this is a nested type, get the outer descriptor, otherwise null. + + + + + An unmodifiable list of defined value descriptors for this enum. + + + + + Finds an enum value by number. If multiple enum values have the + same number, this returns the first defined value with that number. + If there is no value for the given number, this returns null. + + + + + Finds an enum value by name. + + The unqualified name of the value (e.g. "FOO"). + The value's descriptor, or null if not found. + + + + Contains lookup tables containing all the descriptors defined in a particular file. + + + + + Finds a symbol of the given name within the pool. + + The type of symbol to look for + Fully-qualified name to look up + The symbol with the given name and type, + or null if the symbol doesn't exist or has the wrong type + + + + Adds a package to the symbol tables. If a package by the same name + already exists, that is fine, but if some other kind of symbol + exists under the same name, an exception is thrown. If the package + has multiple components, this also adds the parent package(s). + + + + + Adds a symbol to the symbol table. + + The symbol already existed + in the symbol table. + + + + Verifies that the descriptor's name is valid (i.e. it contains + only letters, digits and underscores, and does not start with a digit). + + + + + + Returns the field with the given number in the given descriptor, + or null if it can't be found. + + + + + Adds a field to the fieldsByNumber table. + + A field with the same + containing type and number already exists. + + + + Adds an enum value to the enumValuesByNumber table. If an enum value + with the same type and number already exists, this method does nothing. + (This is allowed; the first value defined with the number takes precedence.) + + + + + Looks up a descriptor by name, relative to some other descriptor. + The name may be fully-qualified (with a leading '.'), partially-qualified, + or unqualified. C++-like name lookup semantics are used to search for the + matching descriptor. + + + This isn't heavily optimized, but it's only used during cross linking anyway. + If it starts being used more widely, we should look at performance more carefully. + + + + + Struct used to hold the keys for the fieldByNumber table. + + + + + Descriptor for a single enum value within an enum in a .proto file. + + + + + Returns the name of the enum value described by this object. + + + + + Returns the number associated with this enum value. + + + + + Returns the enum descriptor that this value is part of. + + + + + Base class for field accessors. + + + + + In order to run on iOS (no JIT) we had to use Invoke which results in a bit + of a performance cost. The original description is as follows: + The methods in this class are somewhat evil, and should not be tampered with lightly. + Basically they allow the creation of relatively weakly typed delegates from MethodInfos + which are more strongly typed. They do this by creating an appropriate strongly typed + delegate from the MethodInfo, and then calling that within an anonymous method. + Mind-bending stuff (at least to your humble narrator) but the resulting delegates are + very fast compared with calling Invoke later on. + + + + + Empty Type[] used when calling GetProperty to force property instead of indexer fetching. + + + + + Creates a delegate which will cast the argument to the appropriate method target type, + call the method on it, then convert the result to object. + + + + + Creates a delegate which will cast the argument to the appropriate method target type, + call the method on it, then convert the result to the specified type. + + + + + Creates a delegate which will execute the given method after casting the first argument to + the target type of the method, and the second argument to the first parameter type of the method. + + + + + Creates a delegate which will execute the given method after casting the first argument to + the target type of the method. + + + + + Describes a .proto file, including everything defined within. + IDescriptor is implemented such that the File property returns this descriptor, + and the FullName is the same as the Name. + + + + + Computes the full name of a descriptor within this file, with an optional parent message. + + + + + Extracts public dependencies from direct dependencies. This is a static method despite its + first parameter, as the value we're in the middle of constructing is only used for exceptions. + + + + + The descriptor in its protocol message representation. + + + + + The file name. + + + + + The package as declared in the .proto file. This may or may not + be equivalent to the .NET namespace of the generated classes. + + + + + Unmodifiable list of top-level message types declared in this file. + + + + + Unmodifiable list of top-level enum types declared in this file. + + + + + Unmodifiable list of top-level services declared in this file. + + + + + Unmodifiable list of this file's dependencies (imports). + + + + + Unmodifiable list of this file's public dependencies (public imports). + + + + + The original serialized binary form of this descriptor. + + + + + Implementation of IDescriptor.FullName - just returns the same as Name. + + + + + Implementation of IDescriptor.File - just returns this descriptor. + + + + + Pool containing symbol descriptors. + + + + + Finds a type (message, enum, service or extension) in the file by name. Does not find nested types. + + The unqualified type name to look for. + The type of descriptor to look for + The type's descriptor, or null if not found. + + + + Builds a FileDescriptor from its protocol buffer representation. + + The original serialized descriptor data. + We have only limited proto2 support, so serializing FileDescriptorProto + would not necessarily give us this. + The protocol message form of the FileDescriptor. + FileDescriptors corresponding to all of the + file's dependencies, in the exact order listed in the .proto file. May be null, + in which case it is treated as an empty array. + Whether unknown dependencies are ignored (true) or cause an exception to be thrown (false). + Details about generated code, for the purposes of reflection. + If is not + a valid descriptor. This can occur for a number of reasons, such as a field + having an undefined type or because two messages were defined with the same name. + + + + Creates a descriptor for generated code. + + + This method is only designed to be used by the results of generating code with protoc, + which creates the appropriate dependencies etc. It has to be public because the generated + code is "external", but should not be called directly by end users. + + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Returns the file descriptor for descriptor.proto. + + + This is used for protos which take a direct dependency on descriptor.proto, typically for + annotations. While descriptor.proto is a proto2 file, it is built into the Google.Protobuf + runtime for reflection purposes. The messages are internal to the runtime as they would require + proto2 semantics for full support, but the file descriptor is available via this property. The + C# codegen in protoc automatically uses this property when it detects a dependency on descriptor.proto. + + + The file descriptor for descriptor.proto. + + + + + Extra information provided by generated code when initializing a message or file descriptor. + These are constructed as required, and are not long-lived. Hand-written code should + never need to use this type. + + + + + Irrelevant for file descriptors; the CLR type for the message for message descriptors. + + + + + Irrelevant for file descriptors; the parser for message descriptors. + + + + + Irrelevant for file descriptors; the CLR property names (in message descriptor field order) + for fields in the message for message descriptors. + + + + + Irrelevant for file descriptors; the CLR property "base" names (in message descriptor oneof order) + for oneofs in the message for message descriptors. It is expected that for a oneof name of "Foo", + there will be a "FooCase" property and a "ClearFoo" method. + + + + + The reflection information for types within this file/message descriptor. Elements may be null + if there is no corresponding generated type, e.g. for map entry types. + + + + + The CLR types for enums within this file/message descriptor. + + + + + Creates a GeneratedClrTypeInfo for a message descriptor, with nested types, nested enums, the CLR type, property names and oneof names. + Each array parameter may be null, to indicate a lack of values. + The parameter order is designed to make it feasible to format the generated code readably. + + + + + Creates a GeneratedClrTypeInfo for a file descriptor, with only types and enums. + + + + + Describes a "oneof" field collection in a message type: a set of + fields of which at most one can be set in any particular message. + + + + + The brief name of the descriptor's target. + + + + + Gets the message type containing this oneof. + + + The message type containing this oneof. + + + + + Gets the fields within this oneof, in declaration order. + + + The fields within this oneof, in declaration order. + + + + + Gets an accessor for reflective access to the values associated with the oneof + in a particular message. + + + The accessor used for reflective access. + + + + + Internal class containing utility methods when working with descriptors. + + + + + Equivalent to Func[TInput, int, TOutput] but usable in .NET 2.0. Only used to convert + arrays. + + + + + Converts the given array into a read-only list, applying the specified conversion to + each input element. + + + + + Allows fields to be reflectively accessed. + + + + + Returns the descriptor associated with this field. + + + + + Clears the field in the specified message. (For repeated fields, + this clears the list.) + + + + + Fetches the field value. For repeated values, this will be an + implementation. For map values, this will be an + implementation. + + + + + Mutator for single "simple" fields only. + + + Repeated fields are mutated by fetching the value and manipulating it as a list. + Map fields are mutated by fetching the value and manipulating it as a dictionary. + + The field is not a "simple" field. + + + + Enumeration of all the possible field types. + + + + + The double field type. + + + + + The float field type. + + + + + The int64 field type. + + + + + The uint64 field type. + + + + + The int32 field type. + + + + + The fixed64 field type. + + + + + The fixed32 field type. + + + + + The bool field type. + + + + + The string field type. + + + + + The field type used for groups (not supported in this implementation). + + + + + The field type used for message fields. + + + + + The bytes field type. + + + + + The uint32 field type. + + + + + The sfixed32 field type. + + + + + The sfixed64 field type. + + + + + The sint32 field type. + + + + + The sint64 field type. + + + + + The field type used for enum fields. + + + + + Accessor for single fields. + + + + + Describes a message type. + + + + + The brief name of the descriptor's target. + + + + + The CLR type used to represent message instances from this descriptor. + + + + The value returned by this property will be non-null for all regular fields. However, + if a message containing a map field is introspected, the list of nested messages will include + an auto-generated nested key/value pair message for the field. This is not represented in any + generated type, so this property will return null in such cases. + + + For wrapper types ( and the like), the type returned here + will be the generated message type, not the native type used by reflection for fields of those types. Code + using reflection should call to determine whether a message descriptor represents + a wrapper type, and handle the result appropriately. + + + + + + A parser for this message type. + + + + As is not generic, this cannot be statically + typed to the relevant type, but it should produce objects of a type compatible with . + + + The value returned by this property will be non-null for all regular fields. However, + if a message containing a map field is introspected, the list of nested messages will include + an auto-generated nested key/value pair message for the field. No message parser object is created for + such messages, so this property will return null in such cases. + + + For wrapper types ( and the like), the parser returned here + will be the generated message type, not the native type used by reflection for fields of those types. Code + using reflection should call to determine whether a message descriptor represents + a wrapper type, and handle the result appropriately. + + + + + + Returns whether this message is one of the "well known types" which may have runtime/protoc support. + + + + + Returns whether this message is one of the "wrapper types" used for fields which represent primitive values + with the addition of presence. + + + + + If this is a nested type, get the outer descriptor, otherwise null. + + + + + A collection of fields, which can be retrieved by name or field number. + + + + + An unmodifiable list of this message type's nested types. + + + + + An unmodifiable list of this message type's enum types. + + + + + An unmodifiable list of the "oneof" field collections in this message type. + + + + + Finds a field by field name. + + The unqualified name of the field (e.g. "foo"). + The field's descriptor, or null if not found. + + + + Finds a field by field number. + + The field number within this message type. + The field's descriptor, or null if not found. + + + + Finds a nested descriptor by name. The is valid for fields, nested + message types, oneofs and enums. + + The unqualified name of the descriptor, e.g. "Foo" + The descriptor, or null if not found. + + + + Looks up and cross-links all fields and nested types. + + + + + A collection to simplify retrieving the field accessor for a particular field. + + + + + Returns the fields in the message as an immutable list, in the order in which they + are declared in the source .proto file. + + + + + Returns the fields in the message as an immutable list, in ascending field number + order. Field numbers need not be contiguous, so there is no direct mapping from the + index in the list to the field number; to retrieve a field by field number, it is better + to use the indexer. + + + + + Returns a read-only dictionary mapping the field names in this message as they're available + in the JSON representation to the field descriptors. For example, a field foo_bar + in the message would result two entries, one with a key fooBar and one with a key + foo_bar, both referring to the same field. + + + + + Retrieves the descriptor for the field with the given number. + + Number of the field to retrieve the descriptor for + The accessor for the given field + The message descriptor does not contain a field + with the given number + + + + Retrieves the descriptor for the field with the given name. + + Name of the field to retrieve the descriptor for + The descriptor for the given field + The message descriptor does not contain a field + with the given name + + + + Base class for nearly all descriptors, providing common functionality. + + + + + The index of this descriptor within its parent descriptor. + + + This returns the index of this descriptor within its parent, for + this descriptor's type. (There can be duplicate values for different + types, e.g. one enum type with index 0 and one message type with index 0.) + + + + + Returns the name of the entity (field, message etc) being described. + + + + + The fully qualified name of the descriptor's target. + + + + + The file this descriptor was declared in. + + + + + Specifies the original name (in the .proto file) of a named element, + such as an enum value. + + + + + The name of the element in the .proto file. + + + + + Constructs a new attribute instance for the given name. + + The name of the element in the .proto file. + + + + Represents a package in the symbol table. We use PackageDescriptors + just as placeholders so that someone cannot define, say, a message type + that has the same name as an existing package. + + + + + Extension methods for , effectively providing + the familiar members from previous desktop framework versions while + targeting the newer releases, .NET Core etc. + + + + + Returns the public getter of a property, or null if there is no such getter + (either because it's read-only, or the getter isn't public). + + + + + Returns the public setter of a property, or null if there is no such setter + (either because it's write-only, or the setter isn't public). + + + + + The contents of a repeated field: essentially, a collection with some extra + restrictions (no null values) and capabilities (deep cloning). + + + This implementation does not generally prohibit the use of types which are not + supported by Protocol Buffers but nor does it guarantee that all operations will work in such cases. + + The element type of the repeated field. + + + + Creates a deep clone of this repeated field. + + + If the field type is + a message type, each element is also cloned; otherwise, it is + assumed that the field type is primitive (including string and + bytes, both of which are immutable) and so a simple copy is + equivalent to a deep clone. + + A deep clone of this repeated field. + + + + Adds the entries from the given input stream, decoding them with the specified codec. + + The input stream to read from. + The codec to use in order to read each entry. + + + + Calculates the size of this collection based on the given codec. + + The codec to use when encoding each field. + The number of bytes that would be written to a by , + using the same codec. + + + + Writes the contents of this collection to the given , + encoding each value using the specified codec. + + The output stream to write to. + The codec to use when encoding each value. + + + + Adds the specified item to the collection. + + The item to add. + + + + Removes all items from the collection. + + + + + Determines whether this collection contains the given item. + + The item to find. + true if this collection contains the given item; false otherwise. + + + + Copies this collection to the given array. + + The array to copy to. + The first index of the array to copy to. + + + + Removes the specified item from the collection + + The item to remove. + true if the item was found and removed; false otherwise. + + + + Gets the number of elements contained in the collection. + + + + + Gets a value indicating whether the collection is read-only. + + + + + Adds all of the specified values into this collection. + + The values to add to this collection. + + + + Adds all of the specified values into this collection. This method is present to + allow repeated fields to be constructed from queries within collection initializers. + Within non-collection-initializer code, consider using the equivalent + method instead for clarity. + + The values to add to this collection. + + + + Returns an enumerator that iterates through the collection. + + + An enumerator that can be used to iterate through the collection. + + + + + Determines whether the specified , is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Compares this repeated field with another for equality. + + The repeated field to compare this with. + true if refers to an equal repeated field; false otherwise. + + + + Returns the index of the given item within the collection, or -1 if the item is not + present. + + The item to find in the collection. + The zero-based index of the item, or -1 if it is not found. + + + + Inserts the given item at the specified index. + + The index at which to insert the item. + The item to insert. + + + + Removes the item at the given index. + + The zero-based index of the item to remove. + + + + Returns a string representation of this repeated field, in the same + way as it would be represented by the default JSON formatter. + + + + + Gets or sets the item at the specified index. + + + The element at the specified index. + + The zero-based index of the element to get or set. + The item at the specified index. + + + + Representation of a map field in a Protocol Buffer message. + + Key type in the map. Must be a type supported by Protocol Buffer map keys. + Value type in the map. Must be a type supported by Protocol Buffers. + + + For string keys, the equality comparison is provided by . + + + Null values are not permitted in the map, either for wrapper types or regular messages. + If a map is deserialized from a data stream and the value is missing from an entry, a default value + is created instead. For primitive types, that is the regular default value (0, the empty string and so + on); for message types, an empty instance of the message is created, as if the map entry contained a 0-length + encoded value for the field. + + + This implementation does not generally prohibit the use of key/value types which are not + supported by Protocol Buffers (e.g. using a key type of byte) but nor does it guarantee + that all operations will work in such cases. + + + The order in which entries are returned when iterating over this object is undefined, and may change + in future versions. + + + + + + Creates a deep clone of this object. + + + A deep clone of this object. + + + + + Adds the specified key/value pair to the map. + + + This operation fails if the key already exists in the map. To replace an existing entry, use the indexer. + + The key to add + The value to add. + The given key already exists in map. + + + + Determines whether the specified key is present in the map. + + The key to check. + true if the map contains the given key; false otherwise. + + + + Removes the entry identified by the given key from the map. + + The key indicating the entry to remove from the map. + true if the map contained the given key before the entry was removed; false otherwise. + + + + Gets the value associated with the specified key. + + The key whose value to get. + When this method returns, the value associated with the specified key, if the key is found; + otherwise, the default value for the type of the parameter. + This parameter is passed uninitialized. + true if the map contains an element with the specified key; otherwise, false. + + + + Gets or sets the value associated with the specified key. + + The key of the value to get or set. + The property is retrieved and key does not exist in the collection. + The value associated with the specified key. If the specified key is not found, + a get operation throws a , and a set operation creates a new element with the specified key. + + + + Gets a collection containing the keys in the map. + + + + + Gets a collection containing the values in the map. + + + + + Adds the specified entries to the map. The keys and values are not automatically cloned. + + The entries to add to the map. + + + + Returns an enumerator that iterates through the collection. + + + An enumerator that can be used to iterate through the collection. + + + + + Returns an enumerator that iterates through a collection. + + + An object that can be used to iterate through the collection. + + + + + Adds the specified item to the map. + + The item to add to the map. + + + + Removes all items from the map. + + + + + Determines whether map contains an entry equivalent to the given key/value pair. + + The key/value pair to find. + + + + + Copies the key/value pairs in this map to an array. + + The array to copy the entries into. + The index of the array at which to start copying values. + + + + Removes the specified key/value pair from the map. + + Both the key and the value must be found for the entry to be removed. + The key/value pair to remove. + true if the key/value pair was found and removed; false otherwise. + + + + Gets the number of elements contained in the map. + + + + + Gets a value indicating whether the map is read-only. + + + + + Determines whether the specified , is equal to this instance. + + The to compare with this instance. + + true if the specified is equal to this instance; otherwise, false. + + + + + Returns a hash code for this instance. + + + A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + + + + + Compares this map with another for equality. + + + The order of the key/value pairs in the maps is not deemed significant in this comparison. + + The map to compare this with. + true if refers to an equal map; false otherwise. + + + + Adds entries to the map from the given stream. + + + It is assumed that the stream is initially positioned after the tag specified by the codec. + This method will continue reading entries from the stream until the end is reached, or + a different tag is encountered. + + Stream to read from + Codec describing how the key/value pairs are encoded + + + + Writes the contents of this map to the given coded output stream, using the specified codec + to encode each entry. + + The output stream to write to. + The codec to use for each entry. + + + + Calculates the size of this map based on the given entry codec. + + The codec to use to encode each entry. + + + + + Returns a string representation of this repeated field, in the same + way as it would be represented by the default JSON formatter. + + + + + A codec for a specific map field. This contains all the information required to encode and + decode the nested messages. + + + + + Creates a new entry codec based on a separate key codec and value codec, + and the tag to use for each map entry. + + The key codec. + The value codec. + The map tag to use to introduce each map entry. + + + + The tag used in the enclosing message to indicate map entries. + + + + + A mutable message class, used for parsing and serializing. This + delegates the work to a codec, but implements the interface + for interop with and . + This is nested inside Codec as it's tightly coupled to the associated codec, + and it's simpler if it has direct access to all its fields. + + + + + Read-only wrapper around another dictionary. + + + + Holder for reflection information generated from google/protobuf/timestamp.proto + + + File descriptor for google/protobuf/timestamp.proto + + + + A Timestamp represents a point in time independent of any time zone + or calendar, represented as seconds and fractions of seconds at + nanosecond resolution in UTC Epoch time. It is encoded using the + Proleptic Gregorian Calendar which extends the Gregorian calendar + backwards to year one. It is encoded assuming all minutes are 60 + seconds long, i.e. leap seconds are "smeared" so that no leap second + table is needed for interpretation. Range is from + 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. + By restricting to that range, we ensure that we can convert to + and from RFC 3339 date strings. + See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). + + Example 1: Compute Timestamp from POSIX `time()`. + + Timestamp timestamp; + timestamp.set_seconds(time(NULL)); + timestamp.set_nanos(0); + + Example 2: Compute Timestamp from POSIX `gettimeofday()`. + + struct timeval tv; + gettimeofday(&tv, NULL); + + Timestamp timestamp; + timestamp.set_seconds(tv.tv_sec); + timestamp.set_nanos(tv.tv_usec * 1000); + + Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. + + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z + // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. + Timestamp timestamp; + timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); + timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); + + Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. + + long millis = System.currentTimeMillis(); + + Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) + .setNanos((int) ((millis % 1000) * 1000000)).build(); + + Example 5: Compute Timestamp from current time in Python. + + now = time.time() + seconds = int(now) + nanos = int((now - seconds) * 10**9) + timestamp = Timestamp(seconds=seconds, nanos=nanos) + + + + Field number for the "seconds" field. + + + + Represents seconds of UTC time since Unix epoch + 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + 9999-12-31T23:59:59Z inclusive. + + + + Field number for the "nanos" field. + + + + Non-negative fractions of a second at nanosecond resolution. Negative + second values with fractions must still have non-negative nanos values + that count forward in time. Must be from 0 to 999,999,999 + inclusive. + + + + + Returns the difference between one and another, as a . + + The timestamp to subtract from. Must not be null. + The timestamp to subtract. Must not be null. + The difference between the two specified timestamps. + + + + Adds a to a , to obtain another Timestamp. + + The timestamp to add the duration to. Must not be null. + The duration to add. Must not be null. + The result of adding the duration to the timestamp. + + + + Subtracts a from a , to obtain another Timestamp. + + The timestamp to subtract the duration from. Must not be null. + The duration to subtract. + The result of subtracting the duration from the timestamp. + + + + Converts this timestamp into a . + + + The resulting DateTime will always have a Kind of Utc. + If the timestamp is not a precise number of ticks, it will be truncated towards the start + of time. For example, a timestamp with a value of 99 will result in a + value precisely on a second. + + This timestamp as a DateTime. + The timestamp contains invalid values; either it is + incorrectly normalized or is outside the valid range. + + + + Converts this timestamp into a . + + + The resulting DateTimeOffset will always have an Offset of zero. + If the timestamp is not a precise number of ticks, it will be truncated towards the start + of time. For example, a timestamp with a value of 99 will result in a + value precisely on a second. + + This timestamp as a DateTimeOffset. + The timestamp contains invalid values; either it is + incorrectly normalized or is outside the valid range. + + + + Converts the specified to a . + + + The Kind of is not DateTimeKind.Utc. + The converted timestamp. + + + + Converts the given to a + + The offset is taken into consideration when converting the value (so the same instant in time + is represented) but is not a separate part of the resulting value. In other words, there is no + roundtrip operation to retrieve the original DateTimeOffset. + The date and time (with UTC offset) to convert to a timestamp. + The converted timestamp. + + + + Converts a timestamp specified in seconds/nanoseconds to a string. + + + If the value is a normalized duration in the range described in timestamp.proto, + is ignored. Otherwise, if the parameter is true, + a JSON object with a warning is returned; if it is false, an is thrown. + + Seconds portion of the duration. + Nanoseconds portion of the duration. + Determines the handling of non-normalized values + The represented duration is invalid, and is false. + + + + Returns a string representation of this for diagnostic purposes. + + + Normally the returned value will be a JSON string value (including leading and trailing quotes) but + when the value is non-normalized or out of range, a JSON object representation will be returned + instead, including a warning. This is to avoid exceptions being thrown when trying to + diagnose problems - the regular JSON formatter will still throw an exception for non-normalized + values. + + A string representation of this value. + + + Holder for reflection information generated from google/protobuf/struct.proto + + + File descriptor for google/protobuf/struct.proto + + + + `NullValue` is a singleton enumeration to represent the null value for the + `Value` type union. + + The JSON representation for `NullValue` is JSON `null`. + + + + + Null value. + + + + + `Struct` represents a structured data value, consisting of fields + which map to dynamically typed values. In some languages, `Struct` + might be supported by a native representation. For example, in + scripting languages like JS a struct is represented as an + object. The details of that representation are described together + with the proto support for the language. + + The JSON representation for `Struct` is JSON object. + + + + Field number for the "fields" field. + + + + Unordered map of dynamically typed values. + + + + + `Value` represents a dynamically typed value which can be either + null, a number, a string, a boolean, a recursive struct value, or a + list of values. A producer of value is expected to set one of that + variants, absence of any variant indicates an error. + + The JSON representation for `Value` is JSON value. + + + + Field number for the "null_value" field. + + + + Represents a null value. + + + + Field number for the "number_value" field. + + + + Represents a double value. + + + + Field number for the "string_value" field. + + + + Represents a string value. + + + + Field number for the "bool_value" field. + + + + Represents a boolean value. + + + + Field number for the "struct_value" field. + + + + Represents a structured value. + + + + Field number for the "list_value" field. + + + + Represents a repeated `Value`. + + + + Enum of possible cases for the "kind" oneof. + + + + Convenience method to create a Value message with a string value. + + Value to set for the StringValue property. + A newly-created Value message with the given value. + + + + Convenience method to create a Value message with a number value. + + Value to set for the NumberValue property. + A newly-created Value message with the given value. + + + + Convenience method to create a Value message with a Boolean value. + + Value to set for the BoolValue property. + A newly-created Value message with the given value. + + + + Convenience method to create a Value message with a null initial value. + + A newly-created Value message a null initial value. + + + + Convenience method to create a Value message with an initial list of values. + + The values provided are not cloned; the references are copied directly. + A newly-created Value message an initial list value. + + + + Convenience method to create a Value message with an initial struct value + + The value provided is not cloned; the reference is copied directly. + A newly-created Value message an initial struct value. + + + + `ListValue` is a wrapper around a repeated field of values. + + The JSON representation for `ListValue` is JSON array. + + + + Field number for the "values" field. + + + + Repeated field of dynamically typed values. + + + + Holder for reflection information generated from google/protobuf/wrappers.proto + + + + Field number for the single "value" field in all wrapper types. + + + + File descriptor for google/protobuf/wrappers.proto + + + Holder for reflection information generated from google/protobuf/type.proto + + + File descriptor for google/protobuf/type.proto + + + + The syntax in which a protocol buffer element is defined. + + + + + Syntax `proto2`. + + + + + Syntax `proto3`. + + + + + A protocol buffer message type. + + + + Field number for the "name" field. + + + + The fully qualified message name. + + + + Field number for the "fields" field. + + + + The list of fields. + + + + Field number for the "oneofs" field. + + + + The list of types appearing in `oneof` definitions in this type. + + + + Field number for the "options" field. + + + + The protocol buffer options. + + + + Field number for the "source_context" field. + + + + The source context. + + + + Field number for the "syntax" field. + + + + The source syntax. + + + + + A single field of a message type. + + + + Field number for the "kind" field. + + + + The field type. + + + + Field number for the "cardinality" field. + + + + The field cardinality. + + + + Field number for the "number" field. + + + + The field number. + + + + Field number for the "name" field. + + + + The field name. + + + + Field number for the "type_url" field. + + + + The field type URL, without the scheme, for message or enumeration + types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + + + + Field number for the "oneof_index" field. + + + + The index of the field type in `Type.oneofs`, for message or enumeration + types. The first type has index 1; zero means the type is not in the list. + + + + Field number for the "packed" field. + + + + Whether to use alternative packed wire representation. + + + + Field number for the "options" field. + + + + The protocol buffer options. + + + + Field number for the "json_name" field. + + + + The field JSON name. + + + + Field number for the "default_value" field. + + + + The string value of the default value of this field. Proto2 syntax only. + + + + Container for nested types declared in the Field message type. + + + + Basic field types. + + + + + Field type unknown. + + + + + Field type double. + + + + + Field type float. + + + + + Field type int64. + + + + + Field type uint64. + + + + + Field type int32. + + + + + Field type fixed64. + + + + + Field type fixed32. + + + + + Field type bool. + + + + + Field type string. + + + + + Field type group. Proto2 syntax only, and deprecated. + + + + + Field type message. + + + + + Field type bytes. + + + + + Field type uint32. + + + + + Field type enum. + + + + + Field type sfixed32. + + + + + Field type sfixed64. + + + + + Field type sint32. + + + + + Field type sint64. + + + + + Whether a field is optional, required, or repeated. + + + + + For fields with unknown cardinality. + + + + + For optional fields. + + + + + For required fields. Proto2 syntax only. + + + + + For repeated fields. + + + + + Enum type definition. + + + + Field number for the "name" field. + + + + Enum type name. + + + + Field number for the "enumvalue" field. + + + + Enum value definitions. + + + + Field number for the "options" field. + + + + Protocol buffer options. + + + + Field number for the "source_context" field. + + + + The source context. + + + + Field number for the "syntax" field. + + + + The source syntax. + + + + + Enum value definition. + + + + Field number for the "name" field. + + + + Enum value name. + + + + Field number for the "number" field. + + + + Enum value number. + + + + Field number for the "options" field. + + + + Protocol buffer options. + + + + + A protocol buffer option, which can be attached to a message, field, + enumeration, etc. + + + + Field number for the "name" field. + + + + The option's name. For example, `"java_package"`. + + + + Field number for the "value" field. + + + + The option's value. For example, `"com.google.protobuf"`. + + + + Holder for reflection information generated from google/protobuf/field_mask.proto + + + File descriptor for google/protobuf/field_mask.proto + + + + `FieldMask` represents a set of symbolic field paths, for example: + + paths: "f.a" + paths: "f.b.d" + + Here `f` represents a field in some root message, `a` and `b` + fields in the message found in `f`, and `d` a field found in the + message in `f.b`. + + Field masks are used to specify a subset of fields that should be + returned by a get operation or modified by an update operation. + Field masks also have a custom JSON encoding (see below). + + # Field Masks in Projections + + When used in the context of a projection, a response message or + sub-message is filtered by the API to only contain those fields as + specified in the mask. For example, if the mask in the previous + example is applied to a response message as follows: + + f { + a : 22 + b { + d : 1 + x : 2 + } + y : 13 + } + z: 8 + + The result will not contain specific values for fields x,y and z + (their value will be set to the default, and omitted in proto text + output): + + f { + a : 22 + b { + d : 1 + } + } + + A repeated field is not allowed except at the last position of a + field mask. + + If a FieldMask object is not present in a get operation, the + operation applies to all fields (as if a FieldMask of all fields + had been specified). + + Note that a field mask does not necessarily apply to the + top-level response message. In case of a REST get operation, the + field mask applies directly to the response, but in case of a REST + list operation, the mask instead applies to each individual message + in the returned resource list. In case of a REST custom method, + other definitions may be used. Where the mask applies will be + clearly documented together with its declaration in the API. In + any case, the effect on the returned resource/resources is required + behavior for APIs. + + # Field Masks in Update Operations + + A field mask in update operations specifies which fields of the + targeted resource are going to be updated. The API is required + to only change the values of the fields as specified in the mask + and leave the others untouched. If a resource is passed in to + describe the updated values, the API ignores the values of all + fields not covered by the mask. + + If a repeated field is specified for an update operation, the existing + repeated values in the target resource will be overwritten by the new values. + Note that a repeated field is only allowed in the last position of a field + mask. + + If a sub-message is specified in the last position of the field mask for an + update operation, then the existing sub-message in the target resource is + overwritten. Given the target message: + + f { + b { + d : 1 + x : 2 + } + c : 1 + } + + And an update message: + + f { + b { + d : 10 + } + } + + then if the field mask is: + + paths: "f.b" + + then the result will be: + + f { + b { + d : 10 + } + c : 1 + } + + However, if the update mask was: + + paths: "f.b.d" + + then the result would be: + + f { + b { + d : 10 + x : 2 + } + c : 1 + } + + In order to reset a field's value to the default, the field must + be in the mask and set to the default value in the provided resource. + Hence, in order to reset all fields of a resource, provide a default + instance of the resource and set all fields in the mask, or do + not provide a mask as described below. + + If a field mask is not present on update, the operation applies to + all fields (as if a field mask of all fields has been specified). + Note that in the presence of schema evolution, this may mean that + fields the client does not know and has therefore not filled into + the request will be reset to their default. If this is unwanted + behavior, a specific service may require a client to always specify + a field mask, producing an error if not. + + As with get operations, the location of the resource which + describes the updated values in the request message depends on the + operation kind. In any case, the effect of the field mask is + required to be honored by the API. + + ## Considerations for HTTP REST + + The HTTP kind of an update operation which uses a field mask must + be set to PATCH instead of PUT in order to satisfy HTTP semantics + (PUT must only be used for full updates). + + # JSON Encoding of Field Masks + + In JSON, a field mask is encoded as a single string where paths are + separated by a comma. Fields name in each path are converted + to/from lower-camel naming conventions. + + As an example, consider the following message declarations: + + message Profile { + User user = 1; + Photo photo = 2; + } + message User { + string display_name = 1; + string address = 2; + } + + In proto a field mask for `Profile` may look as such: + + mask { + paths: "user.display_name" + paths: "photo" + } + + In JSON, the same mask is represented as below: + + { + mask: "user.displayName,photo" + } + + # Field Masks and Oneof Fields + + Field masks treat fields in oneofs just as regular fields. Consider the + following message: + + message SampleMessage { + oneof test_oneof { + string name = 4; + SubMessage sub_message = 9; + } + } + + The field mask can be: + + mask { + paths: "name" + } + + Or: + + mask { + paths: "sub_message" + } + + Note that oneof type names ("test_oneof" in this case) cannot be used in + paths. + + + + Field number for the "paths" field. + + + + The set of field mask paths. + + + + + Converts a timestamp specified in seconds/nanoseconds to a string. + + + If the value is a normalized duration in the range described in field_mask.proto, + is ignored. Otherwise, if the parameter is true, + a JSON object with a warning is returned; if it is false, an is thrown. + + Paths in the field mask + Determines the handling of non-normalized values + The represented duration is invalid, and is false. + + + + Camel-case converter with added strictness for field mask formatting. + + The field mask is invalid for JSON representation + + + + Returns a string representation of this for diagnostic purposes. + + + Normally the returned value will be a JSON string value (including leading and trailing quotes) but + when the value is non-normalized or out of range, a JSON object representation will be returned + instead, including a warning. This is to avoid exceptions being thrown when trying to + diagnose problems - the regular JSON formatter will still throw an exception for non-normalized + values. + + A string representation of this value. + + + Holder for reflection information generated from google/protobuf/source_context.proto + + + File descriptor for google/protobuf/source_context.proto + + + + `SourceContext` represents information about the source of a + protobuf element, like the file in which it is defined. + + + + Field number for the "file_name" field. + + + + The path-qualified name of the .proto file that contained the associated + protobuf element. For example: `"google/protobuf/source_context.proto"`. + + + + + Extension methods on BCL time-related types, converting to protobuf types. + + + + + Converts the given to a . + + The date and time to convert to a timestamp. + The value has a other than Utc. + The converted timestamp. + + + + Converts the given to a + + The offset is taken into consideration when converting the value (so the same instant in time + is represented) but is not a separate part of the resulting value. In other words, there is no + roundtrip operation to retrieve the original DateTimeOffset. + The date and time (with UTC offset) to convert to a timestamp. + The converted timestamp. + + + + Converts the given to a . + + The time span to convert. + The converted duration. + + + Holder for reflection information generated from google/protobuf/any.proto + + + File descriptor for google/protobuf/any.proto + + + + `Any` contains an arbitrary serialized protocol buffer message along with a + URL that describes the type of the serialized message. + + Protobuf library provides support to pack/unpack Any values in the form + of utility functions or additional generated methods of the Any type. + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + The pack methods provided by protobuf library will by default use + 'type.googleapis.com/full.type.name' as the type URL and the unpack + methods only use the fully qualified type name after the last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". + + JSON + ==== + The JSON representation of an `Any` value uses the regular + representation of the deserialized, embedded message, with an + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": <string>, + "lastName": <string> + } + + If the embedded message type is well-known and has a custom JSON + representation, that representation will be embedded adding a field + `value` which holds the custom JSON in addition to the `@type` + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + + + + Field number for the "type_url" field. + + + + A URL/resource name whose content describes the type of the + serialized protocol buffer message. + + For URLs which use the scheme `http`, `https`, or no scheme, the + following restrictions and interpretations apply: + + * If no scheme is provided, `https` is assumed. + * The last segment of the URL's path must represent the fully + qualified name of the type (as in `path/google.protobuf.Duration`). + The name should be in a canonical form (e.g., leading "." is + not accepted). + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Schemes other than `http`, `https` (or the empty scheme) might be + used with implementation specific semantics. + + + + Field number for the "value" field. + + + + Must be a valid serialized protocol buffer of the above specified type. + + + + + Retrieves the type name for a type URL. This is always just the last part of the URL, + after the trailing slash. No validation of anything before the trailing slash is performed. + If the type URL does not include a slash, an empty string is returned rather than an exception + being thrown; this won't match any types, and the calling code is probably in a better position + to give a meaningful error. + There is no handling of fragments or queries at the moment. + + The URL to extract the type name from + The type name + + + + Unpacks the content of this Any message into the target message type, + which must match the type URL within this Any message. + + The type of message to unpack the content into. + The unpacked message. + The target message type doesn't match the type URL in this message + + + + Packs the specified message into an Any message using a type URL prefix of "type.googleapis.com". + + The message to pack. + An Any message with the content and type URL of . + + + + Packs the specified message into an Any message using the specified type URL prefix. + + The message to pack. + The prefix for the type URL. + An Any message with the content and type URL of . + + + Holder for reflection information generated from google/protobuf/api.proto + + + File descriptor for google/protobuf/api.proto + + + + Api is a light-weight descriptor for a protocol buffer service. + + + + Field number for the "name" field. + + + + The fully qualified name of this api, including package name + followed by the api's simple name. + + + + Field number for the "methods" field. + + + + The methods of this api, in unspecified order. + + + + Field number for the "options" field. + + + + Any metadata attached to the API. + + + + Field number for the "version" field. + + + + A version string for this api. If specified, must have the form + `major-version.minor-version`, as in `1.10`. If the minor version + is omitted, it defaults to zero. If the entire version field is + empty, the major version is derived from the package name, as + outlined below. If the field is not empty, the version in the + package name will be verified to be consistent with what is + provided here. + + The versioning schema uses [semantic + versioning](http://semver.org) where the major version number + indicates a breaking change and the minor version an additive, + non-breaking change. Both version numbers are signals to users + what to expect from different versions, and should be carefully + chosen based on the product plan. + + The major version is also reflected in the package name of the + API, which must end in `v<major-version>`, as in + `google.feature.v1`. For major versions 0 and 1, the suffix can + be omitted. Zero major versions must only be used for + experimental, none-GA apis. + + + + Field number for the "source_context" field. + + + + Source context for the protocol buffer service represented by this + message. + + + + Field number for the "mixins" field. + + + + Included APIs. See [Mixin][]. + + + + Field number for the "syntax" field. + + + + The source syntax of the service. + + + + + Method represents a method of an api. + + + + Field number for the "name" field. + + + + The simple name of this method. + + + + Field number for the "request_type_url" field. + + + + A URL of the input message type. + + + + Field number for the "request_streaming" field. + + + + If true, the request is streamed. + + + + Field number for the "response_type_url" field. + + + + The URL of the output message type. + + + + Field number for the "response_streaming" field. + + + + If true, the response is streamed. + + + + Field number for the "options" field. + + + + Any metadata attached to the method. + + + + Field number for the "syntax" field. + + + + The source syntax of this method. + + + + + Declares an API to be included in this API. The including API must + redeclare all the methods from the included API, but documentation + and options are inherited as follows: + + - If after comment and whitespace stripping, the documentation + string of the redeclared method is empty, it will be inherited + from the original method. + + - Each annotation belonging to the service config (http, + visibility) which is not set in the redeclared method will be + inherited. + + - If an http annotation is inherited, the path pattern will be + modified as follows. Any version prefix will be replaced by the + version of the including API plus the [root][] path if specified. + + Example of a simple mixin: + + package google.acl.v1; + service AccessControl { + // Get the underlying ACL object. + rpc GetAcl(GetAclRequest) returns (Acl) { + option (google.api.http).get = "/v1/{resource=**}:getAcl"; + } + } + + package google.storage.v2; + service Storage { + rpc GetAcl(GetAclRequest) returns (Acl); + + // Get a data record. + rpc GetData(GetDataRequest) returns (Data) { + option (google.api.http).get = "/v2/{resource=**}"; + } + } + + Example of a mixin configuration: + + apis: + - name: google.storage.v2.Storage + mixins: + - name: google.acl.v1.AccessControl + + The mixin construct implies that all methods in `AccessControl` are + also declared with same name and request/response types in + `Storage`. A documentation generator or annotation processor will + see the effective `Storage.GetAcl` method after inherting + documentation and annotations as follows: + + service Storage { + // Get the underlying ACL object. + rpc GetAcl(GetAclRequest) returns (Acl) { + option (google.api.http).get = "/v2/{resource=**}:getAcl"; + } + ... + } + + Note how the version in the path pattern changed from `v1` to `v2`. + + If the `root` field in the mixin is specified, it should be a + relative path under which inherited HTTP paths are placed. Example: + + apis: + - name: google.storage.v2.Storage + mixins: + - name: google.acl.v1.AccessControl + root: acls + + This implies the following inherited HTTP annotation: + + service Storage { + // Get the underlying ACL object. + rpc GetAcl(GetAclRequest) returns (Acl) { + option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; + } + ... + } + + + + Field number for the "name" field. + + + + The fully qualified name of the API which is included. + + + + Field number for the "root" field. + + + + If non-empty specifies a path under which inherited HTTP paths + are rooted. + + + + Holder for reflection information generated from google/protobuf/empty.proto + + + File descriptor for google/protobuf/empty.proto + + + + A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to use it as the request + or the response type of an API method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for `Empty` is empty JSON object `{}`. + + + + + A Duration represents a signed, fixed-length span of time represented + as a count of seconds and fractions of seconds at nanosecond + resolution. It is independent of any calendar and concepts like "day" + or "month". It is related to Timestamp in that the difference between + two Timestamp values is a Duration and it can be added or subtracted + from a Timestamp. Range is approximately +-10,000 years. + + Example 1: Compute Duration from two Timestamps in pseudo code. + + Timestamp start = ...; + Timestamp end = ...; + Duration duration = ...; + + duration.seconds = end.seconds - start.seconds; + duration.nanos = end.nanos - start.nanos; + + if (duration.seconds < 0 && duration.nanos > 0) { + duration.seconds += 1; + duration.nanos -= 1000000000; + } else if (durations.seconds > 0 && duration.nanos < 0) { + duration.seconds -= 1; + duration.nanos += 1000000000; + } + + Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. + + Timestamp start = ...; + Duration duration = ...; + Timestamp end = ...; + + end.seconds = start.seconds + duration.seconds; + end.nanos = start.nanos + duration.nanos; + + if (end.nanos < 0) { + end.seconds -= 1; + end.nanos += 1000000000; + } else if (end.nanos >= 1000000000) { + end.seconds += 1; + end.nanos -= 1000000000; + } + + + + + The number of nanoseconds in a second. + + + + + The number of nanoseconds in a BCL tick (as used by and ). + + + + + The maximum permitted number of seconds. + + + + + The minimum permitted number of seconds. + + + + + Converts this to a . + + If the duration is not a precise number of ticks, it is truncated towards 0. + The value of this duration, as a TimeSpan. + This value isn't a valid normalized duration, as + described in the documentation. + + + + Converts the given to a . + + The TimeSpan to convert. + The value of the given TimeSpan, as a Duration. + + + + Returns the result of negating the duration. For example, the negation of 5 minutes is -5 minutes. + + The duration to negate. Must not be null. + The negated value of this duration. + + + + Adds the two specified values together. + + The first value to add. Must not be null. + The second value to add. Must not be null. + + + + + Subtracts one from another. + + The duration to subtract from. Must not be null. + The duration to subtract. Must not be null. + The difference between the two specified durations. + + + + Creates a duration with the normalized values from the given number of seconds and + nanoseconds, conforming with the description in the proto file. + + + + + Converts a duration specified in seconds/nanoseconds to a string. + + + If the value is a normalized duration in the range described in duration.proto, + is ignored. Otherwise, if the parameter is true, + a JSON object with a warning is returned; if it is false, an is thrown. + + Seconds portion of the duration. + Nanoseconds portion of the duration. + Determines the handling of non-normalized values + The represented duration is invalid, and is false. + + + + Returns a string representation of this for diagnostic purposes. + + + Normally the returned value will be a JSON string value (including leading and trailing quotes) but + when the value is non-normalized or out of range, a JSON object representation will be returned + instead, including a warning. This is to avoid exceptions being thrown when trying to + diagnose problems - the regular JSON formatter will still throw an exception for non-normalized + values. + + A string representation of this value. + + + + Appends a number of nanoseconds to a StringBuilder. Either 0 digits are added (in which + case no "." is appended), or 3 6 or 9 digits. This is internal for use in Timestamp as well + as Duration. + + + + Field number for the "seconds" field. + + + + Signed seconds of the span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. + + + + Field number for the "nanos" field. + + + + Signed fractions of a second at nanosecond resolution of the span + of time. Durations less than one second are represented with a 0 + `seconds` field and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` field must be + of the same sign as the `seconds` field. Must be from -999,999,999 + to +999,999,999 inclusive. + + + + + Wrapper message for `double`. + + The JSON representation for `DoubleValue` is JSON number. + + + + Field number for the "value" field. + + + + The double value. + + + + + Wrapper message for `float`. + + The JSON representation for `FloatValue` is JSON number. + + + + Field number for the "value" field. + + + + The float value. + + + + + Wrapper message for `int64`. + + The JSON representation for `Int64Value` is JSON string. + + + + Field number for the "value" field. + + + + The int64 value. + + + + + Wrapper message for `uint64`. + + The JSON representation for `UInt64Value` is JSON string. + + + + Field number for the "value" field. + + + + The uint64 value. + + + + + Wrapper message for `int32`. + + The JSON representation for `Int32Value` is JSON number. + + + + Field number for the "value" field. + + + + The int32 value. + + + + + Wrapper message for `uint32`. + + The JSON representation for `UInt32Value` is JSON number. + + + + Field number for the "value" field. + + + + The uint32 value. + + + + + Wrapper message for `bool`. + + The JSON representation for `BoolValue` is JSON `true` and `false`. + + + + Field number for the "value" field. + + + + The bool value. + + + + + Wrapper message for `string`. + + The JSON representation for `StringValue` is JSON string. + + + + Field number for the "value" field. + + + + The string value. + + + + + Wrapper message for `bytes`. + + The JSON representation for `BytesValue` is JSON string. + + + + Field number for the "value" field. + + + + The bytes value. + + + + Holder for reflection information generated from google/protobuf/duration.proto + + + File descriptor for google/protobuf/duration.proto + +
+
diff --git a/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml.meta b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml.meta new file mode 100644 index 0000000..2127947 --- /dev/null +++ b/FirClient/Assets/Plugins/protobuff-net/Google.Protobuf.xml.meta @@ -0,0 +1,7 @@ +fileFormatVersion: 2 +guid: eebdc52bfe543f646902f5ae0feda531 +TextScriptImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS/tolua b/FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS/tolua deleted file mode 100644 index fc91970..0000000 Binary files a/FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS/tolua and /dev/null differ diff --git a/FirClient/Assets/Plugins/tolua.meta b/FirClient/Assets/Plugins/tolua.meta new file mode 100644 index 0000000..e71f490 --- /dev/null +++ b/FirClient/Assets/Plugins/tolua.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: ff4c8ad30684e5d4d909668372ecec3f +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/Android.meta b/FirClient/Assets/Plugins/tolua/Android.meta similarity index 100% rename from FirClient/Assets/Plugins/Android.meta rename to FirClient/Assets/Plugins/tolua/Android.meta diff --git a/FirClient/Assets/Plugins/Android/libs.meta b/FirClient/Assets/Plugins/tolua/Android/libs.meta similarity index 100% rename from FirClient/Assets/Plugins/Android/libs.meta rename to FirClient/Assets/Plugins/tolua/Android/libs.meta diff --git a/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a.meta b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a.meta new file mode 100644 index 0000000..e6e37ab --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: a20d0af3979c12e4a8bc660a68276474 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so new file mode 100644 index 0000000..a159e0f Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so differ diff --git a/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so.meta b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so.meta new file mode 100644 index 0000000..c4223ec --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/Android/libs/arm64-v8a/libtolua.so.meta @@ -0,0 +1,32 @@ +fileFormatVersion: 2 +guid: c0fdc89dabc68b643bd13d5de599f89d +PluginImporter: + externalObjects: {} + serializedVersion: 2 + iconMap: {} + executionOrder: {} + defineConstraints: [] + isPreloaded: 0 + isOverridable: 0 + isExplicitlyReferenced: 0 + platformData: + - first: + Android: Android + second: + enabled: 1 + settings: + CPU: ARM64 + - first: + Any: + second: + enabled: 0 + settings: {} + - first: + Editor: Editor + second: + enabled: 0 + settings: + DefaultValueInitialized: true + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/Android/libs/armeabi-v7a.meta b/FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a.meta similarity index 100% rename from FirClient/Assets/Plugins/Android/libs/armeabi-v7a.meta rename to FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a.meta diff --git a/FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a/libtolua.so b/FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a/libtolua.so new file mode 100644 index 0000000..47d6628 Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a/libtolua.so differ diff --git a/FirClient/Assets/Plugins/Android/libs/armeabi-v7a/libtolua.so.meta b/FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a/libtolua.so.meta similarity index 100% rename from FirClient/Assets/Plugins/Android/libs/armeabi-v7a/libtolua.so.meta rename to FirClient/Assets/Plugins/tolua/Android/libs/armeabi-v7a/libtolua.so.meta diff --git a/FirClient/Assets/Plugins/Android/libs/x86.meta b/FirClient/Assets/Plugins/tolua/Android/libs/x86.meta similarity index 100% rename from FirClient/Assets/Plugins/Android/libs/x86.meta rename to FirClient/Assets/Plugins/tolua/Android/libs/x86.meta diff --git a/FirClient/Assets/Plugins/iOS.meta b/FirClient/Assets/Plugins/tolua/iOS.meta similarity index 100% rename from FirClient/Assets/Plugins/iOS.meta rename to FirClient/Assets/Plugins/tolua/iOS.meta diff --git a/FirClient/Assets/Plugins/tolua/iOS/libtolua.a b/FirClient/Assets/Plugins/tolua/iOS/libtolua.a new file mode 100644 index 0000000..78ac498 Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/iOS/libtolua.a differ diff --git a/FirClient/Assets/Plugins/iOS/libtolua.a.meta b/FirClient/Assets/Plugins/tolua/iOS/libtolua.a.meta similarity index 100% rename from FirClient/Assets/Plugins/iOS/libtolua.a.meta rename to FirClient/Assets/Plugins/tolua/iOS/libtolua.a.meta diff --git a/FirClient/Assets/Plugins/tolua.bundle.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle.meta similarity index 100% rename from FirClient/Assets/Plugins/tolua.bundle.meta rename to FirClient/Assets/Plugins/tolua/tolua.bundle.meta diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents.meta similarity index 100% rename from FirClient/Assets/Plugins/tolua.bundle/Contents.meta rename to FirClient/Assets/Plugins/tolua/tolua.bundle/Contents.meta diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents/Info.plist b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/Info.plist similarity index 81% rename from FirClient/Assets/Plugins/tolua.bundle/Contents/Info.plist rename to FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/Info.plist index 87308b4..fdbb3be 100644 --- a/FirClient/Assets/Plugins/tolua.bundle/Contents/Info.plist +++ b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/Info.plist @@ -3,7 +3,7 @@ BuildMachineOSBuild - 16F73 + 19G2021 CFBundleDevelopmentRegion English CFBundleExecutable @@ -47,18 +47,22 @@ DTCompiler com.apple.compilers.llvm.clang.1_0 DTPlatformBuild - 8E3004b + 12A7300 + DTPlatformName + macosx DTPlatformVersion - GM + 10.15.6 DTSDKBuild - 16E185 + 19G68 DTSDKName - macosx10.12 + macosx10.15 DTXcode - 0833 + 1201 DTXcodeBuild - 8E3004b + 12A7300 + LSMinimumSystemVersion + 10.8 NSHumanReadableCopyright - Copyright © 2013 xlcw games. All rights reserved. + Copyright © 2013 topameng. All rights reserved. diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents/Info.plist.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/Info.plist.meta similarity index 100% rename from FirClient/Assets/Plugins/tolua.bundle/Contents/Info.plist.meta rename to FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/Info.plist.meta diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS.meta similarity index 100% rename from FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS.meta rename to FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS.meta diff --git a/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS/tolua b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS/tolua new file mode 100644 index 0000000..94e932e Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS/tolua differ diff --git a/FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS/tolua.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS/tolua.meta similarity index 100% rename from FirClient/Assets/Plugins/tolua.bundle/Contents/MacOS/tolua.meta rename to FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/MacOS/tolua.meta diff --git a/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature.meta new file mode 100644 index 0000000..93a9576 --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: abc0f4219f93da54cac4f46664bc35e9 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources new file mode 100644 index 0000000..d5d0fd7 --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources @@ -0,0 +1,115 @@ + + + + + files + + files2 + + rules + + ^Resources/ + + ^Resources/.*\.lproj/ + + optional + + weight + 1000 + + ^Resources/.*\.lproj/locversion.plist$ + + omit + + weight + 1100 + + ^Resources/Base\.lproj/ + + weight + 1010 + + ^version.plist$ + + + rules2 + + .*\.dSYM($|/) + + weight + 11 + + ^(.*/)?\.DS_Store$ + + omit + + weight + 2000 + + ^(Frameworks|SharedFrameworks|PlugIns|Plug-ins|XPCServices|Helpers|MacOS|Library/(Automator|Spotlight|LoginItems))/ + + nested + + weight + 10 + + ^.* + + ^Info\.plist$ + + omit + + weight + 20 + + ^PkgInfo$ + + omit + + weight + 20 + + ^Resources/ + + weight + 20 + + ^Resources/.*\.lproj/ + + optional + + weight + 1000 + + ^Resources/.*\.lproj/locversion.plist$ + + omit + + weight + 1100 + + ^Resources/Base\.lproj/ + + weight + 1010 + + ^[^/]+$ + + nested + + weight + 10 + + ^embedded\.provisionprofile$ + + weight + 20 + + ^version\.plist$ + + weight + 20 + + + + diff --git a/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources.meta b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources.meta new file mode 100644 index 0000000..b4e930d --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/tolua.bundle/Contents/_CodeSignature/CodeResources.meta @@ -0,0 +1,7 @@ +fileFormatVersion: 2 +guid: dd4305f26fe7f91428bca44b4974a325 +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/tolua/x86.meta b/FirClient/Assets/Plugins/tolua/x86.meta new file mode 100644 index 0000000..c78df8b --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/x86.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: d831f5eb258c6ca4ea9e71c66b43c1d7 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/tolua/x86/tolua.dll b/FirClient/Assets/Plugins/tolua/x86/tolua.dll new file mode 100644 index 0000000..d83465d Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/x86/tolua.dll differ diff --git a/FirClient/Assets/Plugins/tolua/x86/tolua.dll.meta b/FirClient/Assets/Plugins/tolua/x86/tolua.dll.meta new file mode 100644 index 0000000..09cfc41 --- /dev/null +++ b/FirClient/Assets/Plugins/tolua/x86/tolua.dll.meta @@ -0,0 +1,52 @@ +fileFormatVersion: 2 +guid: 41af05cb133e3fc40b484df27d64415a +PluginImporter: + externalObjects: {} + serializedVersion: 2 + iconMap: {} + executionOrder: {} + defineConstraints: [] + isPreloaded: 0 + isOverridable: 0 + isExplicitlyReferenced: 0 + validateReferences: 1 + platformData: + - first: + Any: + second: + enabled: 1 + settings: {} + - first: + Editor: Editor + second: + enabled: 0 + settings: + CPU: x86 + DefaultValueInitialized: true + - first: + Standalone: Linux64 + second: + enabled: 0 + settings: + CPU: None + - first: + Standalone: OSXUniversal + second: + enabled: 0 + settings: + CPU: x86 + - first: + Standalone: Win + second: + enabled: 1 + settings: + CPU: AnyCPU + - first: + Standalone: Win64 + second: + enabled: 0 + settings: + CPU: None + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Plugins/x86_64.meta b/FirClient/Assets/Plugins/tolua/x86_64.meta similarity index 100% rename from FirClient/Assets/Plugins/x86_64.meta rename to FirClient/Assets/Plugins/tolua/x86_64.meta diff --git a/FirClient/Assets/Plugins/tolua/x86_64/tolua.dll b/FirClient/Assets/Plugins/tolua/x86_64/tolua.dll new file mode 100644 index 0000000..037405a Binary files /dev/null and b/FirClient/Assets/Plugins/tolua/x86_64/tolua.dll differ diff --git a/FirClient/Assets/Plugins/x86_64/tolua.dll.meta b/FirClient/Assets/Plugins/tolua/x86_64/tolua.dll.meta similarity index 100% rename from FirClient/Assets/Plugins/x86_64/tolua.dll.meta rename to FirClient/Assets/Plugins/tolua/x86_64/tolua.dll.meta diff --git a/FirClient/Assets/Plugins/x86_64/tolua.dll b/FirClient/Assets/Plugins/x86_64/tolua.dll deleted file mode 100644 index 5d332f3..0000000 Binary files a/FirClient/Assets/Plugins/x86_64/tolua.dll and /dev/null differ diff --git a/FirClient/Assets/Resources.meta b/FirClient/Assets/Resources.meta new file mode 100644 index 0000000..9a3b743 --- /dev/null +++ b/FirClient/Assets/Resources.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 9aa53e588ad3f114583e8abd3632c737 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Resources/AssetSyncSettings.asset b/FirClient/Assets/Resources/AssetSyncSettings.asset new file mode 100644 index 0000000..252f847 --- /dev/null +++ b/FirClient/Assets/Resources/AssetSyncSettings.asset @@ -0,0 +1,43 @@ +%YAML 1.1 +%TAG !u! tag:unity3d.com,2011: +--- !u!114 &11400000 +MonoBehaviour: + m_ObjectHideFlags: 0 + m_CorrespondingSourceObject: {fileID: 0} + m_PrefabInstance: {fileID: 0} + m_PrefabAsset: {fileID: 0} + m_GameObject: {fileID: 0} + m_Enabled: 1 + m_EditorHideFlags: 0 + m_Script: {fileID: 11500000, guid: 1868ac97a459f3b418b8dbe31f8d5a8f, type: 3} + m_Name: AssetSyncSettings + m_EditorClassIdentifier: + serializationData: + SerializedFormat: 2 + SerializedBytes: + ReferencedUnityObjects: [] + SerializedBytesString: + Prefab: {fileID: 0} + PrefabModificationsReferencedUnityObjects: [] + PrefabModifications: [] + SerializationNodes: + - Name: AssetSyncDictionary + Entry: 7 + Data: 0|System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.String, + mscorlib]], mscorlib + - Name: comparer + Entry: 7 + Data: 1|System.Collections.Generic.GenericEqualityComparer`1[[System.String, + mscorlib]], mscorlib + - Name: + Entry: 8 + Data: + - Name: + Entry: 12 + Data: 0 + - Name: + Entry: 13 + Data: + - Name: + Entry: 8 + Data: diff --git a/FirClient/Assets/Resources/AssetSyncSettings.asset.meta b/FirClient/Assets/Resources/AssetSyncSettings.asset.meta new file mode 100644 index 0000000..56b3da7 --- /dev/null +++ b/FirClient/Assets/Resources/AssetSyncSettings.asset.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: f280015fe86be3f4a80451b6fee0e89b +NativeFormatImporter: + externalObjects: {} + mainObjectFileID: 0 + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/res/Resources/DOTweenSettings.asset b/FirClient/Assets/Resources/DOTweenSettings.asset similarity index 100% rename from FirClient/Assets/res/Resources/DOTweenSettings.asset rename to FirClient/Assets/Resources/DOTweenSettings.asset diff --git a/FirClient/Assets/res/Resources/DOTweenSettings.asset.meta b/FirClient/Assets/Resources/DOTweenSettings.asset.meta similarity index 100% rename from FirClient/Assets/res/Resources/DOTweenSettings.asset.meta rename to FirClient/Assets/Resources/DOTweenSettings.asset.meta diff --git a/FirClient/Assets/res/Resources/GameSettings.asset b/FirClient/Assets/Resources/GameSettings.asset similarity index 96% rename from FirClient/Assets/res/Resources/GameSettings.asset rename to FirClient/Assets/Resources/GameSettings.asset index c5a0c6a..4498946 100644 --- a/FirClient/Assets/res/Resources/GameSettings.asset +++ b/FirClient/Assets/Resources/GameSettings.asset @@ -14,6 +14,7 @@ MonoBehaviour: m_EditorClassIdentifier: debugMode: 1 logMode: 1 + updateMode: 0 luaByteMode: 0 aStarDebugMode: 0 uiAtlasPath: Assets/res/Atlas/UI @@ -62,7 +63,7 @@ MonoBehaviour: datasBundlePackSetting: - dataPath: res/Datas packType: 0 - fileExtName: '*.xml|*.dat' + fileExtName: '*.xml|*.dat|*.pb' - dataPath: res/Tables packType: 0 fileExtName: '*.bytes' diff --git a/FirClient/Assets/res/Resources/GameSettings.asset.meta b/FirClient/Assets/Resources/GameSettings.asset.meta similarity index 100% rename from FirClient/Assets/res/Resources/GameSettings.asset.meta rename to FirClient/Assets/Resources/GameSettings.asset.meta diff --git a/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs b/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs new file mode 100644 index 0000000..b4ebca7 --- /dev/null +++ b/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs @@ -0,0 +1,53 @@ +using System.Collections.Generic; +using UnityEngine; +using Sirenix.OdinInspector; +using System.IO; + +[CreateAssetMenu(fileName = "AssetSyncSettings", menuName = "My Game/AssetSyncSettings")] +public class AssetSyncSettings : SerializedScriptableObject +{ + [Title("Assets only")] + [SerializeField] + [DictionaryDrawerSettings(DisplayMode = DictionaryDisplayOptions.CollapsedFoldout)] + public Dictionary AssetSyncDictionary = new Dictionary(); + + [Button(ButtonSizes.Large), GUIColor(0.4f, 0.8f, 1)] + private void StartAssetSync() + { + foreach(var de in AssetSyncDictionary) + { + if (string.IsNullOrEmpty(de.Key) || string.IsNullOrEmpty(de.Value)) + { + continue; + } + CopyFile(de.Key, de.Value); + } + Debug.Log("Assets Sync Completed!!!!"); + } + + private void CopyFile(string src, string dest) + { + if (!src.StartsWith("Assets/")) + { + Debug.LogError("Error Src Path!!!!"); + return; + } + var srcPath = GetFullPath(src); + var destPath = GetFullPath(dest); + File.Copy(srcPath, destPath, true); + } + + private string GetFullPath(string path) + { + var dataPath = Application.dataPath; + if (path.StartsWith("Assets")) + { + return dataPath + path.Replace("Assets", string.Empty); + } + else if (path.StartsWith("../")) + { + return dataPath + "/" + path; + } + return path; + } +} diff --git a/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs.meta b/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs.meta new file mode 100644 index 0000000..997c9d1 --- /dev/null +++ b/FirClient/Assets/Scripts/Common/AssetSyncSettings.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 1868ac97a459f3b418b8dbe31f8d5a8f +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/FirClient/Assets/Scripts/Common/BaseObject.cs b/FirClient/Assets/Scripts/Common/BaseObject.cs index 4adccb3..d982217 100644 --- a/FirClient/Assets/Scripts/Common/BaseObject.cs +++ b/FirClient/Assets/Scripts/Common/BaseObject.cs @@ -1,4 +1,4 @@ -using FirClient.Utility; +using FirClient.Utility; public abstract class BaseObject { diff --git a/FirClient/Assets/Scripts/Common/Behaviour/BaseBehaviour.cs b/FirClient/Assets/Scripts/Common/Behaviour/BaseBehaviour.cs index 3159049..eaf1cc1 100644 --- a/FirClient/Assets/Scripts/Common/Behaviour/BaseBehaviour.cs +++ b/FirClient/Assets/Scripts/Common/Behaviour/BaseBehaviour.cs @@ -1,8 +1,10 @@ -using System.Collections; +using System.Collections; using System.Collections.Generic; using UnityEngine; using FirClient.Manager; using FirClient.Component; +using FirClient.Utility; +using LuaInterface; public abstract class BaseBehaviour { @@ -77,13 +79,32 @@ public Coroutine StartCoroutine(IEnumerator routine) return ManagementCenter.main.StartCoroutine(routine); } + [NoToLua] public static void Initialize() { + InitGameSettings(); InitManager(); InitExtManager(); InitComponent(); } + /// + /// 初始化游戏设置 + /// + private static void InitGameSettings() + { + var settings = Util.LoadGameSettings(); + if (settings != null) + { + AppConst.LogMode = settings.logMode; + AppConst.DebugMode = settings.debugMode; + AppConst.GameFrameRate = settings.GameFrameRate; + AppConst.UpdateMode = settings.updateMode; + AppConst.LuaByteMode = settings.luaByteMode; + AppConst.ShowFps = settings.showFps; + } + } + /// /// 初始化组件 /// @@ -93,6 +114,10 @@ private static void InitComponent() if (mainGame != null) { mainGame.AddComponent(); + if (AppConst.ShowFps) + { + mainGame.AddComponent(); + } } } diff --git a/FirClient/Assets/Scripts/Common/Behaviour/GameBehaviour.cs b/FirClient/Assets/Scripts/Common/Behaviour/GameBehaviour.cs index 3475416..0f6a4b8 100644 --- a/FirClient/Assets/Scripts/Common/Behaviour/GameBehaviour.cs +++ b/FirClient/Assets/Scripts/Common/Behaviour/GameBehaviour.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; public class GameBehaviour : MonoBehaviour { diff --git a/FirClient/Assets/Scripts/Common/Behaviour/LuaBehaviour.cs b/FirClient/Assets/Scripts/Common/Behaviour/LuaBehaviour.cs index f77a43e..48af278 100644 --- a/FirClient/Assets/Scripts/Common/Behaviour/LuaBehaviour.cs +++ b/FirClient/Assets/Scripts/Common/Behaviour/LuaBehaviour.cs @@ -1,7 +1,8 @@ -using LuaInterface; +using LuaInterface; using System.Collections.Generic; using UnityEngine.UI; using FirClient.Utility; +using TMPro; namespace FirClient.Behaviour { @@ -107,6 +108,38 @@ public void RemoveToggleClick(Toggle toggle) toggle.onValueChanged.RemoveAllListeners(); } + /// + /// 添加回车事件监听 + /// + public void AddEndEdit(TMP_InputField input, LuaTable self, LuaFunction luaFunc) + { + if (input != null && luaFunc != null) + { + RemoveEndEdit(input); + + luaEvents.Add(input, new LuaEventData(self, luaFunc)); + input.onEndEdit.AddListener(delegate + { + luaFunc.Call(self, input); + }); + } + } + + public void RemoveEndEdit(TMP_InputField input) + { + if (input == null) return; + LuaEventData evdata = null; + if (luaEvents.TryGetValue(input, out evdata)) + { + if (evdata != null) + { + evdata.Dispose(); + } + luaEvents.Remove(input); + } + input.onEndEdit.RemoveAllListeners(); + } + /// /// 清除单击事件 /// diff --git a/FirClient/Assets/Scripts/Common/GameSettings.cs b/FirClient/Assets/Scripts/Common/GameSettings.cs index 9b7b587..b5a36dc 100644 --- a/FirClient/Assets/Scripts/Common/GameSettings.cs +++ b/FirClient/Assets/Scripts/Common/GameSettings.cs @@ -1,6 +1,8 @@ -using System; +using System; using System.Collections.Generic; +#if UNITY_EDITOR using UnityEditor; +#endif using UnityEngine; public enum TextureSize @@ -14,8 +16,10 @@ public enum TextureSize public class TextureCompressInfo { public string assetPath; +#if UNITY_EDITOR public TextureImporterFormat iosFormat; public TextureImporterFormat androidFormat; +#endif public TextureSize textureSize = TextureSize.MAX_1024; public bool isDynamic = false; } @@ -48,20 +52,32 @@ public class DataBundlePackInfo public string fileExtName; } +[CreateAssetMenu(fileName = "GameSettings", menuName = "My Game/GameSettings")] public class GameSettings : ScriptableObject { + [Header("General Settings")] [Tooltip("游戏的调试模式")] public bool debugMode; [Tooltip("游戏的日志模式")] public bool logMode; + [Tooltip("游戏的更新模式")] + public bool updateMode; + [Tooltip("Lua的字节码模式")] public bool luaByteMode; + [Tooltip("显示帧频数据")] + public bool showFps; + + [Tooltip("游戏的运行帧频,默认30帧")] + public int GameFrameRate; //游戏帧频 + [Tooltip("AStar的调试模式")] public bool aStarDebugMode; + [Header("Atlas Settings")] [Tooltip("UI图集的路径")] public string uiAtlasPath; @@ -71,9 +87,6 @@ public class GameSettings : ScriptableObject [Tooltip("TexturePacker的exe安装路径")] public string texturePackerPath; - [Tooltip("游戏的运行帧频,默认30帧")] - public int GameFrameRate; //游戏帧频 - [SerializeField][HideInInspector] private int selectedAtlasIndex = -1; [SerializeField][HideInInspector] diff --git a/FirClient/Assets/Scripts/Common/LuaLoader.cs b/FirClient/Assets/Scripts/Common/LuaLoader.cs index 2c1fffd..fee7918 100644 --- a/FirClient/Assets/Scripts/Common/LuaLoader.cs +++ b/FirClient/Assets/Scripts/Common/LuaLoader.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using System.Collections; using System.IO; using LuaInterface; diff --git a/FirClient/Assets/Scripts/Common/ManagementCenter.cs b/FirClient/Assets/Scripts/Common/ManagementCenter.cs index 1c33c1b..667eed2 100644 --- a/FirClient/Assets/Scripts/Common/ManagementCenter.cs +++ b/FirClient/Assets/Scripts/Common/ManagementCenter.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using FirClient.Manager; /// diff --git a/FirClient/Assets/Scripts/Component/AStar/AStar.cs b/FirClient/Assets/Scripts/Component/AStar/AStar.cs index b505779..81e3ff4 100644 --- a/FirClient/Assets/Scripts/Component/AStar/AStar.cs +++ b/FirClient/Assets/Scripts/Component/AStar/AStar.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Linq; using UnityEngine; diff --git a/FirClient/Assets/Scripts/Component/AStar/AStarCtrl.cs b/FirClient/Assets/Scripts/Component/AStar/AStarCtrl.cs index 1a90d57..00222ad 100644 --- a/FirClient/Assets/Scripts/Component/AStar/AStarCtrl.cs +++ b/FirClient/Assets/Scripts/Component/AStar/AStarCtrl.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using UnityEngine; using UnityEngine.Tilemaps; diff --git a/FirClient/Assets/Scripts/Component/AStar/AStarDebugger.cs b/FirClient/Assets/Scripts/Component/AStar/AStarDebugger.cs index 6869744..8e9178d 100644 --- a/FirClient/Assets/Scripts/Component/AStar/AStarDebugger.cs +++ b/FirClient/Assets/Scripts/Component/AStar/AStarDebugger.cs @@ -1,4 +1,4 @@ -using FirClient.Component; +using FirClient.Component; using FirClient.Extensions; using System.Collections.Generic; using TMPro; diff --git a/FirClient/Assets/Scripts/Component/AStar/IAStar.cs b/FirClient/Assets/Scripts/Component/AStar/IAStar.cs index 424e37b..4557305 100644 --- a/FirClient/Assets/Scripts/Component/AStar/IAStar.cs +++ b/FirClient/Assets/Scripts/Component/AStar/IAStar.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using UnityEngine; namespace FirClient.Component diff --git a/FirClient/Assets/Scripts/Component/AStar/Node.cs b/FirClient/Assets/Scripts/Component/AStar/Node.cs index a49fede..5e27b44 100644 --- a/FirClient/Assets/Scripts/Component/AStar/Node.cs +++ b/FirClient/Assets/Scripts/Component/AStar/Node.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; namespace FirClient.Component { diff --git a/FirClient/Assets/Scripts/Component/Animation/TweenAnimation.cs b/FirClient/Assets/Scripts/Component/Animation/TweenAnimation.cs index 1f8c634..66efd1a 100644 --- a/FirClient/Assets/Scripts/Component/Animation/TweenAnimation.cs +++ b/FirClient/Assets/Scripts/Component/Animation/TweenAnimation.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using UnityEngine.UI; using System.Collections; using DG.Tweening; diff --git a/FirClient/Assets/Scripts/Component/Animation/UIFrameAnimation.cs b/FirClient/Assets/Scripts/Component/Animation/UIFrameAnimation.cs index e4ade12..3d9d847 100644 --- a/FirClient/Assets/Scripts/Component/Animation/UIFrameAnimation.cs +++ b/FirClient/Assets/Scripts/Component/Animation/UIFrameAnimation.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using System.Collections; using UnityEngine.UI; diff --git a/FirClient/Assets/Scripts/Component/CButton.cs b/FirClient/Assets/Scripts/Component/CButton.cs index 2b37369..b04d826 100644 --- a/FirClient/Assets/Scripts/Component/CButton.cs +++ b/FirClient/Assets/Scripts/Component/CButton.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; using FirClient.Manager; diff --git a/FirClient/Assets/Scripts/Component/CCanvasAlpha.cs b/FirClient/Assets/Scripts/Component/CCanvasAlpha.cs index b268713..d5335e5 100644 --- a/FirClient/Assets/Scripts/Component/CCanvasAlpha.cs +++ b/FirClient/Assets/Scripts/Component/CCanvasAlpha.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using UnityEngine.UI; public class CCanvasAlpha : MonoBehaviour diff --git a/FirClient/Assets/Scripts/Component/CEventObject.cs b/FirClient/Assets/Scripts/Component/CEventObject.cs index 8b38389..6c3d5ec 100644 --- a/FirClient/Assets/Scripts/Component/CEventObject.cs +++ b/FirClient/Assets/Scripts/Component/CEventObject.cs @@ -1,4 +1,4 @@ -using FirClient.Data; +using FirClient.Data; using System.Collections.Generic; using UnityEngine; diff --git a/FirClient/Assets/Scripts/Component/CFPSDisplay.cs b/FirClient/Assets/Scripts/Component/CFPSDisplay.cs index e0c92dc..7893cd5 100644 --- a/FirClient/Assets/Scripts/Component/CFPSDisplay.cs +++ b/FirClient/Assets/Scripts/Component/CFPSDisplay.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using System.Collections; namespace FirClient.Component diff --git a/FirClient/Assets/Scripts/Component/CLuaAnimator.cs b/FirClient/Assets/Scripts/Component/CLuaAnimator.cs index aae65a1..a0499b5 100644 --- a/FirClient/Assets/Scripts/Component/CLuaAnimator.cs +++ b/FirClient/Assets/Scripts/Component/CLuaAnimator.cs @@ -1,4 +1,4 @@ -using LuaInterface; +using LuaInterface; using UnityEngine; namespace FirClient.Component diff --git a/FirClient/Assets/Scripts/Component/CLuaComponent.cs b/FirClient/Assets/Scripts/Component/CLuaComponent.cs index 7732575..2cdee0c 100644 --- a/FirClient/Assets/Scripts/Component/CLuaComponent.cs +++ b/FirClient/Assets/Scripts/Component/CLuaComponent.cs @@ -1,4 +1,4 @@ -using FirClient.Utility; +using FirClient.Utility; using UnityEngine; namespace FirClient.Component diff --git a/FirClient/Assets/Scripts/Component/CMultiProgressBar.cs b/FirClient/Assets/Scripts/Component/CMultiProgressBar.cs index 57a29b2..869eac6 100644 --- a/FirClient/Assets/Scripts/Component/CMultiProgressBar.cs +++ b/FirClient/Assets/Scripts/Component/CMultiProgressBar.cs @@ -1,4 +1,4 @@ -using UnityEngine; +using UnityEngine; using UnityEngine.UI; using DG.Tweening; using FirClient.Extensions; diff --git a/FirClient/Assets/Scripts/Component/CObjectFollow.cs b/FirClient/Assets/Scripts/Component/CObjectFollow.cs index 25b2743..e39aa04 100644 --- a/FirClient/Assets/Scripts/Component/CObjectFollow.cs +++ b/FirClient/Assets/Scripts/Component/CObjectFollow.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using UnityEngine; diff --git a/FirClient/Assets/Scripts/Component/CParticleScale.cs b/FirClient/Assets/Scripts/Component/CParticleScale.cs index 19f99b5..37111df 100644 --- a/FirClient/Assets/Scripts/Component/CParticleScale.cs +++ b/FirClient/Assets/Scripts/Component/CParticleScale.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using UnityEngine; namespace FirClient.Component diff --git a/FirClient/Assets/Scripts/Component/CPrefabVar.cs b/FirClient/Assets/Scripts/Component/CPrefabVar.cs index be98c07..46d19ff 100644 --- a/FirClient/Assets/Scripts/Component/CPrefabVar.cs +++ b/FirClient/Assets/Scripts/Component/CPrefabVar.cs @@ -1,8 +1,10 @@ -using System; +using LuaInterface; +using System; using System.Collections.Generic; using TMPro; using UnityEngine; using UnityEngine.UI; +using Object = UnityEngine.Object; namespace FirClient.Component { @@ -34,10 +36,151 @@ public class VarData public Toggle toggleValue; public Slider sliderValue; public CMultiProgressBar multiProgreValue; + + [NoToLua] + public Object GetValue() + { + switch (type) + { + case VarType.GameObject: return objValue; + case VarType.Transform: return tranValue; + case VarType.Text: return txtValue; + case VarType.Image: return imgValue; + case VarType.Button: return btnValue; + case VarType.TMP_InputField: return inputValue; + case VarType.Toggle: return toggleValue; + case VarType.Slider: return sliderValue; + case VarType.CMultiProgressBar: return multiProgreValue; + default: return null; + } + } + + [NoToLua] + public void Set(VarData newData) + { + //Clear + switch (type) + { + case VarType.GameObject: + objValue = null; break; + case VarType.Transform: + tranValue = null; break; + case VarType.Text: + txtValue = null; break; + case VarType.Image: + imgValue = null; break; + case VarType.Button: + btnValue = null; break; + case VarType.TMP_InputField: + inputValue = null; break; + case VarType.Toggle: + toggleValue = null; break; + case VarType.Slider: + sliderValue = null; break; + case VarType.CMultiProgressBar: + multiProgreValue = null; break; + } + //Set + name = newData.name; + lastType = newData.type; + type = newData.type; + switch (type) + { + case VarType.GameObject: + objValue = newData.objValue; break; + case VarType.Transform: + tranValue = newData.tranValue; break; + case VarType.Text: + txtValue = newData.txtValue; break; + case VarType.Image: + imgValue = newData.imgValue; break; + case VarType.Button: + btnValue = newData.btnValue; break; + case VarType.TMP_InputField: + inputValue = newData.inputValue; break; + case VarType.Toggle: + toggleValue = newData.toggleValue; break; + case VarType.Slider: + sliderValue = newData.sliderValue; break; + case VarType.CMultiProgressBar: + multiProgreValue = newData.multiProgreValue; break; + } + } } public class CPrefabVar : MonoBehaviour { + #region AutoBindDict + [NoToLua] public static readonly Dictionary> AutoBindDict = new Dictionary> + { + {"obj", go => new VarData {lastType = VarType.GameObject, type = VarType.GameObject, objValue = go}}, + {"tran", go => new VarData {lastType = VarType.Transform, type = VarType.Transform, tranValue = go.transform}}, + { + "img", go => + { + var value = go.GetComponent(); + if (value) + return new VarData {lastType = VarType.Image, type = VarType.Image, imgValue = value}; + return null; + } + }, + { + "txt", go => + { + var value = go.GetComponent(); + if (value) + return new VarData {lastType = VarType.Text, type = VarType.Text, txtValue = value}; + return null; + } + }, + { + "btn", go => + { + var value = go.GetComponent