diff --git a/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.installer.yaml b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.installer.yaml new file mode 100644 index 000000000000..5b6aa9436068 --- /dev/null +++ b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.installer.yaml @@ -0,0 +1,31 @@ +# Created with komac v2.11.2 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.installer.1.9.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b7285 +InstallerType: zip +NestedInstallerType: portable +NestedInstallerFiles: +- RelativeFilePath: llama-batched-bench.exe +- RelativeFilePath: llama-bench.exe +- RelativeFilePath: llama-cli.exe +- RelativeFilePath: llama-gguf-split.exe +- RelativeFilePath: llama-imatrix.exe +- RelativeFilePath: llama-mtmd-cli.exe +- RelativeFilePath: llama-perplexity.exe +- RelativeFilePath: llama-quantize.exe +- RelativeFilePath: llama-run.exe +- RelativeFilePath: llama-server.exe +- RelativeFilePath: llama-tokenize.exe +- RelativeFilePath: llama-tts.exe +Dependencies: + PackageDependencies: + - PackageIdentifier: Microsoft.VCRedist.2015+.x64 +ReleaseDate: 2025-12-05 +ArchiveBinariesDependOnPath: true +Installers: +- Architecture: x64 + InstallerUrl: https://github.com/ggml-org/llama.cpp/releases/download/b7285/llama-b7285-bin-win-vulkan-x64.zip + InstallerSha256: 13D647012DFD10CC17359301BDB900AF3076FA70F3236DF0757A2985C11B4928 +ManifestType: installer +ManifestVersion: 1.9.0 diff --git a/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.locale.en-US.yaml b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.locale.en-US.yaml new file mode 100644 index 000000000000..786a6ea6d00e --- /dev/null +++ b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.locale.en-US.yaml @@ -0,0 +1,42 @@ +# Created with komac v2.11.2 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.defaultLocale.1.9.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b7285 +PackageLocale: en-US +Publisher: ggml +PublisherUrl: https://github.com/ggml-org +PublisherSupportUrl: https://github.com/ggml-org/llama.cpp/issues +PackageName: llama.cpp +PackageUrl: https://github.com/ggml-org/llama.cpp +License: MIT +LicenseUrl: https://github.com/ggml-org/llama.cpp/blob/HEAD/LICENSE +ShortDescription: LLM inference in C/C++ +Tags: +- ggml +- llama +ReleaseNotes: |- + Warning + Release Format Update: Linux releases will soon use .tar.gz archives instead of .zip. Please make the necessary changes to your deployment scripts. + HIP : fix RDNA4 build (#17792) + macOS/iOS: + - macOS Apple Silicon (arm64) + - macOS Intel (x64) + - iOS XCFramework + Linux: + - Ubuntu x64 (CPU) + - Ubuntu x64 (Vulkan) + - Ubuntu s390x (CPU) + Windows: + - Windows x64 (CPU) + - Windows arm64 (CPU) + - Windows x64 (CUDA) + - Windows x64 (Vulkan) + - Windows x64 (SYCL) + - Windows x64 (HIP) +ReleaseNotesUrl: https://github.com/ggml-org/llama.cpp/releases/tag/b7285 +Documentations: +- DocumentLabel: Wiki + DocumentUrl: https://github.com/ggml-org/llama.cpp/wiki +ManifestType: defaultLocale +ManifestVersion: 1.9.0 diff --git a/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.yaml b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.yaml new file mode 100644 index 000000000000..1c90033f9aac --- /dev/null +++ b/manifests/g/ggml/llamacpp/b7285/ggml.llamacpp.yaml @@ -0,0 +1,8 @@ +# Created with komac v2.11.2 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.version.1.9.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b7285 +DefaultLocale: en-US +ManifestType: version +ManifestVersion: 1.9.0