diff --git a/.github/workflows/ci3.yml b/.github/workflows/ci3.yml index ec1819545b6a..34828fdb19cb 100644 --- a/.github/workflows/ci3.yml +++ b/.github/workflows/ci3.yml @@ -260,3 +260,48 @@ jobs: AWS_SHUTDOWN_TIME: 180 run: | ./.github/ci3.sh network-tests-kind + + # iOS cross-compilation build for barretenberg. + # Runs in parallel with the main CI job on every PR (~9 min per target). + # This builds the bb-external static library for iOS devices and simulators. + # Non-blocking (continue-on-error) to avoid rugging releases - alerts #honk-team on failure. + build-bb-ios: + name: Build iOS static libraries + runs-on: macos-14 + if: github.event.pull_request.head.repo.fork != true && github.event.pull_request.draft == false + continue-on-error: true + strategy: + fail-fast: false + matrix: + include: + - preset: ios-arm64 + label: arm64-ios + - preset: ios-sim-arm64 + label: arm64-ios-sim + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + + - name: Create Build Environment + run: | + brew install cmake ninja + + - name: Compile bb-external for iOS + working-directory: barretenberg/cpp + run: | + cmake --preset ${{ matrix.preset }} + cmake --build --preset ${{ matrix.preset }} --target bb-external + + - name: Notify Slack on failure + if: failure() + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: | + if [ -n "${SLACK_BOT_TOKEN}" ]; then + curl -X POST https://slack.com/api/chat.postMessage \ + -H "Authorization: Bearer $SLACK_BOT_TOKEN" \ + -H "Content-type: application/json" \ + --data "{\"channel\": \"#honk-team\", \"text\": \"iOS build (${{ matrix.preset }}) FAILED: \"}" + fi diff --git a/.github/workflows/publish-bb-mac.yml b/.github/workflows/publish-bb-mac.yml index eea9b8f765f0..df7935d4d4a3 100644 --- a/.github/workflows/publish-bb-mac.yml +++ b/.github/workflows/publish-bb-mac.yml @@ -1,10 +1,9 @@ name: Publish Barretenberg Mac on: - # We run this manually to patch v2. Otherwise this is obsolete as we cross-compile mac builds. - #push: - # tags: - # - "v*" + push: + tags: + - "v*" workflow_dispatch: # Allow pushing a manual nightly release inputs: @@ -18,9 +17,6 @@ on: description: Whether to publish the build artifacts type: boolean default: false - #schedule: - # # Run a nightly release at 2 AM UTC - # - cron: "0 2 * * *" permissions: # Necessary to upload new release artifacts @@ -28,78 +24,53 @@ permissions: issues: write jobs: - build-mac: - name: Build on Mac (matrix) - runs-on: ${{ matrix.runner }} + build-ios: + name: Build iOS static libraries + runs-on: macos-14 strategy: matrix: include: - - label: amd64-darwin - runner: macos-13 - cmake_flags: "" - optional: false - - label: arm64-darwin - runner: macos-14 - cmake_flags: "" - optional: false - - label: amd64-darwin-starknet - runner: macos-13 - cmake_flags: '-DCMAKE_CXX_FLAGS="-DSTARKNET_GARAGA_FLAVORS=1"' - optional: true - - label: arm64-darwin-starknet - runner: macos-14 - cmake_flags: '-DCMAKE_CXX_FLAGS="-DSTARKNET_GARAGA_FLAVORS=1"' - optional: true + - preset: ios-arm64 + label: arm64-ios + - preset: ios-sim-arm64 + label: arm64-ios-sim steps: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: ref: ${{ inputs.tag || github.sha }} - - name: Create Mac Build Environment + - name: Create Build Environment run: | - brew install cmake ninja llvm@20 - echo "BREW_PREFIX=$(brew --prefix)" >> $GITHUB_ENV + brew install cmake ninja - - name: Replace version string in main.cpp + - name: Compile bb-external for iOS working-directory: barretenberg/cpp run: | - version_string=${{ inputs.ref_name || inputs.tag || github.ref_name }} - version_string=$(echo $version_string | sed "s/\//_/") - sed -i.bak "s/00000000\.00000000\.00000000/$version_string/g" src/barretenberg/bb/cli.cpp + cmake --preset ${{ matrix.preset }} + cmake --build --preset ${{ matrix.preset }} --target bb-external - - name: Compile Barretenberg + - name: Package static library (${{ matrix.label }}) working-directory: barretenberg/cpp - continue-on-error: ${{ matrix.optional }} run: | - cmake --preset homebrew ${{ matrix.cmake_flags }} - cmake --build --preset homebrew --target bb - - - name: Package barretenberg artifact (${{ matrix.label }}) - working-directory: barretenberg/cpp/build/bin - continue-on-error: ${{ matrix.optional }} - run: | - mkdir dist - cp ./bb ./dist/bb - 7z a -ttar -so -an ./dist/* | 7z a -si ./barretenberg-${{ matrix.label }}.tar.gz + tar -czf barretenberg-static-${{ matrix.label }}.tar.gz -C build-${{ matrix.preset }}/lib libbb-external.a - name: Upload artifact (${{ matrix.label }}) uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 - continue-on-error: ${{ matrix.optional }} with: - name: barretenberg-${{ matrix.label }}.tar.gz - path: ./barretenberg/cpp/build/bin/barretenberg-${{ matrix.label }}.tar.gz + name: barretenberg-static-${{ matrix.label }}.tar.gz + path: ./barretenberg/cpp/barretenberg-static-${{ matrix.label }}.tar.gz retention-days: 3 build-check: name: Check builds are successful - needs: build-mac + needs: build-ios if: ${{ always() }} runs-on: ubuntu-latest steps: - name: Report overall success env: - FAIL: ${{ contains(needs.build-mac.*.result, 'failure') }} + FAIL: ${{ contains(needs.build-ios.result, 'failure') }} run: | if [[ $FAIL == true ]]; then echo "At least one job failed, release is unsuccessful." @@ -131,27 +102,15 @@ jobs: needs: build-check runs-on: ubuntu-latest steps: - - name: Download artifact (amd64-darwin) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: barretenberg-amd64-darwin.tar.gz - - - name: Download artifact (arm64-darwin) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: barretenberg-arm64-darwin.tar.gz - - - name: Download artifact (amd64-darwin-starknet) + - name: Download artifact (arm64-ios) uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - continue-on-error: true with: - name: barretenberg-amd64-darwin-starknet.tar.gz + name: barretenberg-static-arm64-ios.tar.gz - - name: Download artifact (arm64-darwin-starknet) + - name: Download artifact (arm64-ios-sim) uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - continue-on-error: true with: - name: barretenberg-arm64-darwin-starknet.tar.gz + name: barretenberg-static-arm64-ios-sim.tar.gz - name: Determine if prerelease id: check-prerelease @@ -171,16 +130,5 @@ jobs: tag_name: ${{ inputs.ref_name || inputs.tag || github.ref_name }} prerelease: ${{ steps.check-prerelease.outputs.is_prerelease == 'true' }} files: | - barretenberg-amd64-darwin.tar.gz - barretenberg-arm64-darwin.tar.gz - - - name: Publish to GitHub (starknet variants) - uses: softprops/action-gh-release@26994186c0ac3ef5cae75ac16aa32e8153525f77 - if: ${{ github.event_name != 'workflow_dispatch' || inputs.publish }} - continue-on-error: true - with: - tag_name: ${{ inputs.ref_name || inputs.tag || github.ref_name }} - prerelease: ${{ steps.check-prerelease.outputs.is_prerelease == 'true' }} - files: | - barretenberg-amd64-darwin-starknet.tar.gz - barretenberg-arm64-darwin-starknet.tar.gz + barretenberg-static-arm64-ios.tar.gz + barretenberg-static-arm64-ios-sim.tar.gz diff --git a/barretenberg/cpp/CMakeLists.txt b/barretenberg/cpp/CMakeLists.txt index 9e9aa7a8e03e..d27d8d4e210b 100644 --- a/barretenberg/cpp/CMakeLists.txt +++ b/barretenberg/cpp/CMakeLists.txt @@ -38,6 +38,7 @@ option(ENABLE_PIC "Builds with position independent code" OFF) option(SYNTAX_ONLY "only check syntax (-fsyntax-only)" OFF) option(ENABLE_WASM_BENCH "Enable BB_BENCH benchmarking support in WASM builds (dev only, not for releases)" OFF) option(AVM "enable building of vm2 module and bb-avm" ON) +option(MOBILE "Build for mobile (excludes lmdb, world_state, vm2, nodejs_module)" OFF) if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") message(STATUS "Compiling for ARM.") @@ -154,7 +155,7 @@ include(cmake/nlohmann_json.cmake) include(cmake/httplib.cmake) include(cmake/libdeflate.cmake) -if (NOT WASM) +if (NOT WASM AND NOT MOBILE) include(cmake/lmdb.cmake) endif() diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index ffa1cbe51fac..6cca44a63d5e 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -530,6 +530,40 @@ "AVM_TRANSPILER_LIB": "${sourceDir}/../../avm-transpiler/target/aarch64-apple-darwin/release/libavm_transpiler.a", "HAVE_STD_REGEX": "ON" } + }, + { + "name": "ios-arm64", + "displayName": "Build for iOS arm64 (device)", + "description": "Build for iOS arm64 devices using ios.toolchain.cmake", + "binaryDir": "build-ios-arm64", + "generator": "Ninja", + "toolchainFile": "cmake/toolchains/ios.toolchain.cmake", + "cacheVariables": { + "PLATFORM": "OS64", + "DEPLOYMENT_TARGET": "16.3", + "CMAKE_BUILD_TYPE": "Release", + "ENABLE_PIC": "ON", + "ENABLE_ARC": "OFF", + "HAVE_STD_REGEX": "ON", + "MOBILE": "ON" + } + }, + { + "name": "ios-sim-arm64", + "displayName": "Build for iOS Simulator arm64", + "description": "Build for iOS Simulator on Apple Silicon using ios.toolchain.cmake", + "binaryDir": "build-ios-sim-arm64", + "generator": "Ninja", + "toolchainFile": "cmake/toolchains/ios.toolchain.cmake", + "cacheVariables": { + "PLATFORM": "SIMULATORARM64", + "DEPLOYMENT_TARGET": "16.3", + "CMAKE_BUILD_TYPE": "Release", + "ENABLE_PIC": "ON", + "ENABLE_ARC": "OFF", + "HAVE_STD_REGEX": "ON", + "MOBILE": "ON" + } } ], "buildPresets": [ @@ -717,6 +751,16 @@ "name": "zig-arm64-macos", "configurePreset": "zig-arm64-macos", "inheritConfigureEnvironment": true + }, + { + "name": "ios-arm64", + "configurePreset": "ios-arm64", + "inheritConfigureEnvironment": true + }, + { + "name": "ios-sim-arm64", + "configurePreset": "ios-sim-arm64", + "inheritConfigureEnvironment": true } ], "testPresets": [ diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index d30cfedba71b..b4514dd33059 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -107,6 +107,17 @@ function build_cross { fi } +# Build for iOS (must run on macOS with Xcode installed) +# Arg is preset name: ios-arm64 or ios-sim-arm64 +function build_ios { + set -eu + preset=$1 + if ! cache_download barretenberg-$preset-$hash.zst; then + build_preset $preset --target bb-external + cache_upload barretenberg-$preset-$hash.zst build-$preset/lib + fi +} + # Selectively build components with address sanitizer (with optimizations) function build_asan_fast { set -eu @@ -230,9 +241,23 @@ function build_release_dir { if [ -f build-zig-arm64-linux/lib/libbb-external.a ]; then tar -czf build-release/barretenberg-static-arm64-linux.tar.gz -C build-zig-arm64-linux/lib libbb-external.a fi + if [ -f build-zig-amd64-macos/lib/libbb-external.a ]; then + tar -czf build-release/barretenberg-static-amd64-darwin.tar.gz -C build-zig-amd64-macos/lib libbb-external.a + fi + if [ -f build-zig-arm64-macos/lib/libbb-external.a ]; then + tar -czf build-release/barretenberg-static-arm64-darwin.tar.gz -C build-zig-arm64-macos/lib libbb-external.a + fi + + # Package iOS static libraries (built on macOS runners) + if [ -f build-ios-arm64/lib/libbb-external.a ]; then + tar -czf build-release/barretenberg-static-arm64-ios.tar.gz -C build-ios-arm64/lib libbb-external.a + fi + if [ -f build-ios-sim-arm64/lib/libbb-external.a ]; then + tar -czf build-release/barretenberg-static-arm64-ios-sim.tar.gz -C build-ios-sim-arm64/lib libbb-external.a + fi } -export -f build_preset build_native_objects build_cross_objects build_native build_cross build_asan_fast build_wasm build_wasm_threads build_gcc_syntax_check_only build_fuzzing_syntax_check_only build_smt_verification inject_version +export -f build_preset build_native_objects build_cross_objects build_native build_cross build_ios build_asan_fast build_wasm build_wasm_threads build_gcc_syntax_check_only build_fuzzing_syntax_check_only build_smt_verification inject_version function build { echo_header "bb cpp build" diff --git a/barretenberg/cpp/cmake/module.cmake b/barretenberg/cpp/cmake/module.cmake index 9a759d2e0a2e..e8affebfe7fc 100644 --- a/barretenberg/cpp/cmake/module.cmake +++ b/barretenberg/cpp/cmake/module.cmake @@ -105,7 +105,7 @@ function(barretenberg_module_with_sources MODULE_NAME) add_dependencies(${MODULE_NAME}_objects msgpack-c) # enable lmdb downloading via dependency (solves race condition) - if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32") + if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32" AND NOT MOBILE) add_dependencies(${MODULE_NAME} lmdb_repo) add_dependencies(${MODULE_NAME}_objects lmdb_repo) endif() @@ -192,7 +192,7 @@ function(barretenberg_module_with_sources MODULE_NAME) add_dependencies(${MODULE_NAME}_tests msgpack-c) # enable lmdb downloading via dependency (solves race condition) - if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32") + if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32" AND NOT MOBILE) add_dependencies(${MODULE_NAME}_test_objects lmdb_repo) add_dependencies(${MODULE_NAME}_tests lmdb_repo) endif() @@ -298,7 +298,7 @@ function(barretenberg_module_with_sources MODULE_NAME) add_dependencies(${BENCHMARK_NAME}_bench msgpack-c) # enable lmdb downloading via dependency (solves race condition) - if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32") + if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "wasm32" AND NOT MOBILE) add_dependencies(${BENCHMARK_NAME}_bench_objects lmdb_repo) add_dependencies(${BENCHMARK_NAME}_bench lmdb_repo) endif() diff --git a/barretenberg/cpp/cmake/toolchains/ios.toolchain.cmake b/barretenberg/cpp/cmake/toolchains/ios.toolchain.cmake new file mode 100644 index 000000000000..57fbc3403830 --- /dev/null +++ b/barretenberg/cpp/cmake/toolchains/ios.toolchain.cmake @@ -0,0 +1,1135 @@ +# This file is part of the ios-cmake project. It was retrieved from +# https://github.com/leetal/ios-cmake.git, which is a fork of +# https://github.com/gerstrong/ios-cmake.git, which is a fork of +# https://github.com/cristeab/ios-cmake.git, which is a fork of +# https://code.google.com/p/ios-cmake/. Which in turn is based off of +# the Platform/Darwin.cmake and Platform/UnixPaths.cmake files which +# are included with CMake 2.8.4 +# +# The ios-cmake project is licensed under the new BSD license. +# +# Copyright (c) 2014, Bogdan Cristea and LTE Engineering Software, +# Kitware, Inc., Insight Software Consortium. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# This file is based on the Platform/Darwin.cmake and +# Platform/UnixPaths.cmake files which are included with CMake 2.8.4 +# It has been altered for iOS development. +# +# Updated by Alex Stewart (alexs.mac@gmail.com) +# +# ***************************************************************************** +# Now maintained by Alexander Widerberg (widerbergaren [at] gmail.com) +# under the BSD-3-Clause license +# https://github.com/leetal/ios-cmake +# ***************************************************************************** +# +# INFORMATION / HELP +# +############################################################################### +# OPTIONS # +############################################################################### +# +# PLATFORM: (default "OS64") +# OS = Build for iPhoneOS. +# OS64 = Build for arm64 iphoneOS. +# OS64COMBINED = Build for arm64 x86_64 iphoneOS + iphoneOS Simulator. Combined into FAT STATIC lib (only supported on 3.14+ of CMake with "-G Xcode" argument in combination with the "cmake --install" CMake build step) +# SIMULATOR = Build for x86 i386 iphoneOS Simulator. +# SIMULATOR64 = Build for x86_64 iphoneOS Simulator. +# SIMULATORARM64 = Build for arm64 iphoneOS Simulator. +# SIMULATOR64COMBINED = Build for arm64 x86_64 iphoneOS Simulator. Combined into FAT STATIC lib (supported on 3.14+ of CMakewith "-G Xcode" argument ONLY) +# TVOS = Build for arm64 tvOS. +# TVOSCOMBINED = Build for arm64 x86_64 tvOS + tvOS Simulator. Combined into FAT STATIC lib (only supported on 3.14+ of CMake with "-G Xcode" argument in combination with the "cmake --install" CMake build step) +# SIMULATOR_TVOS = Build for x86_64 tvOS Simulator. +# SIMULATORARM64_TVOS = Build for arm64 tvOS Simulator. +# VISIONOSCOMBINED = Build for arm64 visionOS + visionOS Simulator. Combined into FAT STATIC lib (only supported on 3.14+ of CMake with "-G Xcode" argument in combination with the "cmake --install" CMake build step) +# VISIONOS = Build for arm64 visionOS. +# SIMULATOR_VISIONOS = Build for arm64 visionOS Simulator. +# WATCHOS = Build for armv7k arm64_32 for watchOS. +# WATCHOSCOMBINED = Build for armv7k arm64_32 x86_64 watchOS + watchOS Simulator. Combined into FAT STATIC lib (only supported on 3.14+ of CMake with "-G Xcode" argument in combination with the "cmake --install" CMake build step) +# SIMULATOR_WATCHOS = Build for x86_64 for watchOS Simulator. +# SIMULATORARM64_WATCHOS = Build for arm64 for watchOS Simulator. +# MAC = Build for x86_64 macOS. +# MAC_ARM64 = Build for Apple Silicon macOS. +# MAC_UNIVERSAL = Combined build for x86_64 and Apple Silicon on macOS. +# MAC_CATALYST = Build for x86_64 macOS with Catalyst support (iOS toolchain on macOS). +# Note: The build argument "MACOSX_DEPLOYMENT_TARGET" can be used to control min-version of macOS +# MAC_CATALYST_ARM64 = Build for Apple Silicon macOS with Catalyst support (iOS toolchain on macOS). +# Note: The build argument "MACOSX_DEPLOYMENT_TARGET" can be used to control min-version of macOS +# +# CMAKE_OSX_SYSROOT: Path to the SDK to use. By default this is +# automatically determined from PLATFORM and xcodebuild, but +# can also be manually specified (although this should not be required). +# +# CMAKE_DEVELOPER_ROOT: Path to the Developer directory for the platform +# being compiled for. By default, this is automatically determined from +# CMAKE_OSX_SYSROOT, but can also be manually specified (although this should +# not be required). +# +# DEPLOYMENT_TARGET: Minimum SDK version to target. Default 6.0 on watchOS, 13.0 on tvOS+iOS/iPadOS, 11.0 on macOS, 1.0 on visionOS +# +# NAMED_LANGUAGE_SUPPORT: +# ON (default) = Will require "enable_language(OBJC) and/or enable_language(OBJCXX)" for full OBJC|OBJCXX support +# OFF = Will embed the OBJC and OBJCXX flags into the CMAKE_C_FLAGS and CMAKE_CXX_FLAGS (legacy behavior, CMake version < 3.16) +# +# ENABLE_BITCODE: (ON|OFF) Enables or disables bitcode support. Default OFF +# +# ENABLE_ARC: (ON|OFF) Enables or disables ARC support. Default ON (ARC enabled by default) +# +# ENABLE_VISIBILITY: (ON|OFF) Enables or disables symbol visibility support. Default OFF (visibility hidden by default) +# +# ENABLE_STRICT_TRY_COMPILE: (ON|OFF) Enables or disables strict try_compile() on all Check* directives (will run linker +# to actually check if linking is possible). Default OFF (will set CMAKE_TRY_COMPILE_TARGET_TYPE to STATIC_LIBRARY) +# +# ARCHS: (armv7 armv7s armv7k arm64 arm64_32 i386 x86_64) If specified, will override the default architectures for the given PLATFORM +# OS = armv7 armv7s arm64 (if applicable) +# OS64 = arm64 (if applicable) +# SIMULATOR = i386 +# SIMULATOR64 = x86_64 +# SIMULATORARM64 = arm64 +# TVOS = arm64 +# SIMULATOR_TVOS = x86_64 (i386 has since long been deprecated) +# SIMULATORARM64_TVOS = arm64 +# WATCHOS = armv7k arm64_32 (if applicable) +# SIMULATOR_WATCHOS = x86_64 (i386 has since long been deprecated) +# SIMULATORARM64_WATCHOS = arm64 +# MAC = x86_64 +# MAC_ARM64 = arm64 +# MAC_UNIVERSAL = x86_64 arm64 +# MAC_CATALYST = x86_64 +# MAC_CATALYST_ARM64 = arm64 +# +# NOTE: When manually specifying ARCHS, put a semi-colon between the entries. E.g., -DARCHS="armv7;arm64" +# +############################################################################### +# END OPTIONS # +############################################################################### +# +# This toolchain defines the following properties (available via get_property()) for use externally: +# +# PLATFORM: The currently targeted platform. +# XCODE_VERSION: Version number (not including Build version) of Xcode detected. +# SDK_VERSION: Version of SDK being used. +# OSX_ARCHITECTURES: Architectures being compiled for (generated from PLATFORM). +# APPLE_TARGET_TRIPLE: Used by autoconf build systems. NOTE: If "ARCHS" is overridden, this will *NOT* be set! +# +# This toolchain defines the following macros for use externally: +# +# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE XCODE_VARIANT) +# A convenience macro for setting xcode specific properties on targets. +# Available variants are: All, Release, RelWithDebInfo, Debug, MinSizeRel +# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1" "all"). +# +# find_host_package (PROGRAM ARGS) +# A macro used to find executable programs on the host system, not within the +# environment. Thanks to the android-cmake project for providing the +# command. +# + +cmake_minimum_required(VERSION 3.8.0) + +# CMake invokes the toolchain file twice during the first build, but only once during subsequent rebuilds. +if(DEFINED ENV{_IOS_TOOLCHAIN_HAS_RUN}) + return() +endif() +set(ENV{_IOS_TOOLCHAIN_HAS_RUN} true) + +# List of supported platform values +list(APPEND _supported_platforms + "OS" "OS64" "OS64COMBINED" "SIMULATOR" "SIMULATOR64" "SIMULATORARM64" "SIMULATOR64COMBINED" + "TVOS" "TVOSCOMBINED" "SIMULATOR_TVOS" "SIMULATORARM64_TVOS" + "WATCHOS" "WATCHOSCOMBINED" "SIMULATOR_WATCHOS" "SIMULATORARM64_WATCHOS" + "MAC" "MAC_ARM64" "MAC_UNIVERSAL" + "VISIONOS" "SIMULATOR_VISIONOS" "VISIONOSCOMBINED" + "MAC_CATALYST" "MAC_CATALYST_ARM64") + +# Cache what generator is used +set(USED_CMAKE_GENERATOR "${CMAKE_GENERATOR}") + +# Check if using a CMake version capable of building combined FAT builds (simulator and target slices combined in one static lib) +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14") + set(MODERN_CMAKE YES) +endif() + +# Get the Xcode version being used. +# Problem: CMake runs toolchain files multiple times, but can't read cache variables on some runs. +# Workaround: On the first run (in which cache variables are always accessible), set an intermediary environment variable. +# +# NOTE: This pattern is used in many places in this toolchain to speed up checks of all sorts +if(DEFINED XCODE_VERSION_INT) + # Environment variables are always preserved. + set(ENV{_XCODE_VERSION_INT} "${XCODE_VERSION_INT}") +elseif(DEFINED ENV{_XCODE_VERSION_INT}) + set(XCODE_VERSION_INT "$ENV{_XCODE_VERSION_INT}") +elseif(NOT DEFINED XCODE_VERSION_INT) + find_program(XCODEBUILD_EXECUTABLE xcodebuild) + if(NOT XCODEBUILD_EXECUTABLE) + message(FATAL_ERROR "xcodebuild not found. Please install either the standalone commandline tools or Xcode.") + endif() + execute_process(COMMAND ${XCODEBUILD_EXECUTABLE} -version + OUTPUT_VARIABLE XCODE_VERSION_INT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) + string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION_INT "${XCODE_VERSION_INT}") + string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION_INT "${XCODE_VERSION_INT}") + set(XCODE_VERSION_INT "${XCODE_VERSION_INT}" CACHE INTERNAL "") +endif() + +# Assuming that xcode 12.0 is installed you most probably have ios sdk 14.0 or later installed (tested on Big Sur) +# if you don't set a deployment target it will be set the way you only get 64-bit builds +#if(NOT DEFINED DEPLOYMENT_TARGET AND XCODE_VERSION_INT VERSION_GREATER 12.0) +# Temporarily fix the arm64 issues in CMake install-combined by excluding arm64 for simulator builds (needed for Apple Silicon...) +# set(CMAKE_XCODE_ATTRIBUTE_EXCLUDED_ARCHS[sdk=iphonesimulator*] "arm64") +#endif() + +# Check if the platform variable is set +if(DEFINED PLATFORM) + # Environment variables are always preserved. + set(ENV{_PLATFORM} "${PLATFORM}") +elseif(DEFINED ENV{_PLATFORM}) + set(PLATFORM "$ENV{_PLATFORM}") +elseif(NOT DEFINED PLATFORM) + message(FATAL_ERROR "PLATFORM argument not set. Bailing configure since I don't know what target you want to build for!") +endif () + +if(PLATFORM MATCHES ".*COMBINED" AND NOT CMAKE_GENERATOR MATCHES "Xcode") + message(FATAL_ERROR "The combined builds support requires Xcode to be used as a generator via '-G Xcode' command-line argument in CMake") +endif() + +# Safeguard that the platform value is set and is one of the supported values +list(FIND _supported_platforms ${PLATFORM} contains_PLATFORM) +if("${contains_PLATFORM}" EQUAL "-1") + string(REPLACE ";" "\n * " _supported_platforms_formatted "${_supported_platforms}") + message(FATAL_ERROR " Invalid PLATFORM specified! Current value: ${PLATFORM}.\n" + " Supported PLATFORM values: \n * ${_supported_platforms_formatted}") +endif() + +# Check if Apple Silicon is supported +if(PLATFORM MATCHES "^(MAC_ARM64)$|^(MAC_CATALYST_ARM64)$|^(MAC_UNIVERSAL)$" AND ${CMAKE_VERSION} VERSION_LESS "3.19.5") + message(FATAL_ERROR "Apple Silicon builds requires a minimum of CMake 3.19.5") +endif() + +# Touch the toolchain variable to suppress the "unused variable" warning. +# This happens if CMake is invoked with the same command line the second time. +if(CMAKE_TOOLCHAIN_FILE) +endif() + +# Fix for PThread library not in path +set(CMAKE_THREAD_LIBS_INIT "-lpthread") +set(CMAKE_HAVE_THREADS_LIBRARY 1) +set(CMAKE_USE_WIN32_THREADS_INIT 0) +set(CMAKE_USE_PTHREADS_INIT 1) + +# Specify named language support defaults. +if(NOT DEFINED NAMED_LANGUAGE_SUPPORT AND ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.16") + set(NAMED_LANGUAGE_SUPPORT ON) + message(STATUS "[DEFAULTS] Using explicit named language support! E.g., enable_language(CXX) is needed in the project files.") +elseif(NOT DEFINED NAMED_LANGUAGE_SUPPORT AND ${CMAKE_VERSION} VERSION_LESS "3.16") + set(NAMED_LANGUAGE_SUPPORT OFF) + message(STATUS "[DEFAULTS] Disabling explicit named language support. Falling back to legacy behavior.") +elseif(DEFINED NAMED_LANGUAGE_SUPPORT AND ${CMAKE_VERSION} VERSION_LESS "3.16") + message(FATAL_ERROR "CMake named language support for OBJC and OBJCXX was added in CMake 3.16.") +endif() +set(NAMED_LANGUAGE_SUPPORT_INT ${NAMED_LANGUAGE_SUPPORT} CACHE BOOL + "Whether or not to enable explicit named language support" FORCE) + +# Specify the minimum version of the deployment target. +if(NOT DEFINED DEPLOYMENT_TARGET) + if (PLATFORM MATCHES "WATCHOS") + # Unless specified, SDK version 4.0 is used by default as minimum target version (watchOS). + set(DEPLOYMENT_TARGET "6.0") + elseif(PLATFORM STREQUAL "MAC") + # Unless specified, SDK version 10.13 (High Sierra) is used by default as the minimum target version (macos). + set(DEPLOYMENT_TARGET "11.0") + elseif(PLATFORM STREQUAL "VISIONOS" OR PLATFORM STREQUAL "SIMULATOR_VISIONOS" OR PLATFORM STREQUAL "VISIONOSCOMBINED") + # Unless specified, SDK version 1.0 is used by default as minimum target version (visionOS). + set(DEPLOYMENT_TARGET "1.0") + elseif(PLATFORM STREQUAL "MAC_ARM64") + # Unless specified, SDK version 11.0 (Big Sur) is used by default as the minimum target version (macOS on arm). + set(DEPLOYMENT_TARGET "11.0") + elseif(PLATFORM STREQUAL "MAC_UNIVERSAL") + # Unless specified, SDK version 11.0 (Big Sur) is used by default as minimum target version for universal builds. + set(DEPLOYMENT_TARGET "11.0") + elseif(PLATFORM STREQUAL "MAC_CATALYST" OR PLATFORM STREQUAL "MAC_CATALYST_ARM64") + # Unless specified, SDK version 13.0 is used by default as the minimum target version (mac catalyst minimum requirement). + set(DEPLOYMENT_TARGET "13.1") + else() + # Unless specified, SDK version 11.0 is used by default as the minimum target version (iOS, tvOS). + set(DEPLOYMENT_TARGET "13.0") + endif() + message(STATUS "[DEFAULTS] Using the default min-version since DEPLOYMENT_TARGET not provided!") +elseif(DEFINED DEPLOYMENT_TARGET AND PLATFORM MATCHES "^MAC_CATALYST" AND ${DEPLOYMENT_TARGET} VERSION_LESS "13.1") + message(FATAL_ERROR "Mac Catalyst builds requires a minimum deployment target of 13.1!") +endif() + +# Store the DEPLOYMENT_TARGET in the cache +set(DEPLOYMENT_TARGET "${DEPLOYMENT_TARGET}" CACHE INTERNAL "") + +# Handle the case where we are targeting iOS and a version above 10.3.4 (32-bit support dropped officially) +if(PLATFORM STREQUAL "OS" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.3.4) + set(PLATFORM "OS64") + message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.") +elseif(PLATFORM STREQUAL "SIMULATOR" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.3.4) + set(PLATFORM "SIMULATOR64") + message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.") +endif() + +set(PLATFORM_INT "${PLATFORM}") + +if(DEFINED ARCHS) + string(REPLACE ";" "-" ARCHS_SPLIT "${ARCHS}") +endif() + +# Determine the platform name and architectures for use in xcodebuild commands +# from the specified PLATFORM_INT name. +if(PLATFORM_INT STREQUAL "OS") + set(SDK_NAME iphoneos) + if(NOT ARCHS) + set(ARCHS armv7 armv7s arm64) + set(APPLE_TARGET_TRIPLE_INT arm-apple-ios${DEPLOYMENT_TARGET}) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}) + endif() +elseif(PLATFORM_INT STREQUAL "OS64") + set(SDK_NAME iphoneos) + if(NOT ARCHS) + if (XCODE_VERSION_INT VERSION_GREATER 10.0) + set(ARCHS arm64) # FIXME: Add arm64e when Apple has fixed the integration issues with it, libarclite_iphoneos.a is currently missing bitcode markers for example + else() + set(ARCHS arm64) + endif() + set(APPLE_TARGET_TRIPLE_INT arm64-apple-ios${DEPLOYMENT_TARGET}) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}) + endif() +elseif(PLATFORM_INT STREQUAL "OS64COMBINED") + set(SDK_NAME iphoneos) + if(MODERN_CMAKE) + if(NOT ARCHS) + if (XCODE_VERSION_INT VERSION_GREATER 12.0) + set(ARCHS arm64 x86_64) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphoneos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphonesimulator*] "x86_64 arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphoneos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphonesimulator*] "x86_64 arm64") + else() + set(ARCHS arm64 x86_64) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphoneos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphonesimulator*] "x86_64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphoneos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphonesimulator*] "x86_64") + endif() + set(APPLE_TARGET_TRIPLE_INT arm64-x86_64-apple-ios${DEPLOYMENT_TARGET}) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the OS64COMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR64COMBINED") + set(SDK_NAME iphonesimulator) + if(MODERN_CMAKE) + if(NOT ARCHS) + if (XCODE_VERSION_INT VERSION_GREATER 12.0) + set(ARCHS arm64 x86_64) # FIXME: Add arm64e when Apple have fixed the integration issues with it, libarclite_iphoneos.a is currently missing bitcode markers for example + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphoneos*] "") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphonesimulator*] "x86_64 arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphoneos*] "") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphonesimulator*] "x86_64 arm64") + else() + set(ARCHS arm64 x86_64) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphoneos*] "") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=iphonesimulator*] "x86_64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphoneos*] "") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=iphonesimulator*] "x86_64") + endif() + set(APPLE_TARGET_TRIPLE_INT aarch64-x86_64-apple-ios${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-simulator) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the SIMULATOR64COMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR") + set(SDK_NAME iphonesimulator) + if(NOT ARCHS) + set(ARCHS i386) + set(APPLE_TARGET_TRIPLE_INT i386-apple-ios${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-simulator) + endif() + message(DEPRECATION "SIMULATOR IS DEPRECATED. Consider using SIMULATOR64 instead.") +elseif(PLATFORM_INT STREQUAL "SIMULATOR64") + set(SDK_NAME iphonesimulator) + if(NOT ARCHS) + set(ARCHS x86_64) + set(APPLE_TARGET_TRIPLE_INT x86_64-apple-ios${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATORARM64") + set(SDK_NAME iphonesimulator) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-ios${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "TVOS") + set(SDK_NAME appletvos) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-tvos${DEPLOYMENT_TARGET}) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-tvos${DEPLOYMENT_TARGET}) + endif() +elseif (PLATFORM_INT STREQUAL "TVOSCOMBINED") + set(SDK_NAME appletvos) + if(MODERN_CMAKE) + if(NOT ARCHS) + set(ARCHS arm64 x86_64) + set(APPLE_TARGET_TRIPLE_INT arm64-x86_64-apple-tvos${DEPLOYMENT_TARGET}) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=appletvos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=appletvsimulator*] "x86_64 arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=appletvos*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=appletvsimulator*] "x86_64 arm64") + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-tvos${DEPLOYMENT_TARGET}) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the TVOSCOMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS") + set(SDK_NAME appletvsimulator) + if(NOT ARCHS) + set(ARCHS x86_64) + set(APPLE_TARGET_TRIPLE_INT x86_64-apple-tvos${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-tvos${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATORARM64_TVOS") + set(SDK_NAME appletvsimulator) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-tvos${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-tvos${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "WATCHOS") + set(SDK_NAME watchos) + if(NOT ARCHS) + if (XCODE_VERSION_INT VERSION_GREATER 10.0) + set(ARCHS armv7k arm64_32) + set(APPLE_TARGET_TRIPLE_INT arm64_32-apple-watchos${DEPLOYMENT_TARGET}) + else() + set(ARCHS armv7k) + set(APPLE_TARGET_TRIPLE_INT arm-apple-watchos${DEPLOYMENT_TARGET}) + endif() + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-watchos${DEPLOYMENT_TARGET}) + endif() +elseif(PLATFORM_INT STREQUAL "WATCHOSCOMBINED") + set(SDK_NAME watchos) + if(MODERN_CMAKE) + if(NOT ARCHS) + if (XCODE_VERSION_INT VERSION_GREATER 10.0) + set(ARCHS armv7k arm64_32 i386) + set(APPLE_TARGET_TRIPLE_INT arm64_32-i386-apple-watchos${DEPLOYMENT_TARGET}) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=watchos*] "armv7k arm64_32") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=watchsimulator*] "i386") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=watchos*] "armv7k arm64_32") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=watchsimulator*] "i386") + else() + set(ARCHS armv7k i386) + set(APPLE_TARGET_TRIPLE_INT arm-i386-apple-watchos${DEPLOYMENT_TARGET}) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=watchos*] "armv7k") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=watchsimulator*] "i386") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=watchos*] "armv7k") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=watchsimulator*] "i386") + endif() + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-watchos${DEPLOYMENT_TARGET}) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the WATCHOSCOMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS") + set(SDK_NAME watchsimulator) + if(NOT ARCHS) + set(ARCHS i386) + set(APPLE_TARGET_TRIPLE_INT i386-apple-watchos${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-watchos${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATORARM64_WATCHOS") + set(SDK_NAME watchsimulator) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-watchos${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-watchos${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR_VISIONOS") + set(SDK_NAME xrsimulator) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-xros${DEPLOYMENT_TARGET}-simulator) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-xros${DEPLOYMENT_TARGET}-simulator) + endif() +elseif(PLATFORM_INT STREQUAL "VISIONOS") + set(SDK_NAME xros) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-xros${DEPLOYMENT_TARGET}) + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-xros${DEPLOYMENT_TARGET}) + endif() +elseif(PLATFORM_INT STREQUAL "VISIONOSCOMBINED") + set(SDK_NAME xros) + if(MODERN_CMAKE) + if(NOT ARCHS) + set(ARCHS arm64) + set(APPLE_TARGET_TRIPLE_INT arm64-apple-xros${DEPLOYMENT_TARGET}) + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=xros*] "arm64") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=xrsimulator*] "arm64") + else() + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-xros${DEPLOYMENT_TARGET}) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the VISIONOSCOMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "MAC" OR PLATFORM_INT STREQUAL "MAC_CATALYST") + set(SDK_NAME macosx) + if(NOT ARCHS) + set(ARCHS x86_64) + endif() + string(REPLACE ";" "-" ARCHS_SPLIT "${ARCHS}") + if(PLATFORM_INT STREQUAL "MAC") + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-macosx${DEPLOYMENT_TARGET}) + elseif(PLATFORM_INT STREQUAL "MAC_CATALYST") + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-macabi) + endif() +elseif(PLATFORM_INT MATCHES "^(MAC_ARM64)$|^(MAC_CATALYST_ARM64)$") + set(SDK_NAME macosx) + if(NOT ARCHS) + set(ARCHS arm64) + endif() + string(REPLACE ";" "-" ARCHS_SPLIT "${ARCHS}") + if(PLATFORM_INT STREQUAL "MAC_ARM64") + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-macosx${DEPLOYMENT_TARGET}) + elseif(PLATFORM_INT STREQUAL "MAC_CATALYST_ARM64") + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-ios${DEPLOYMENT_TARGET}-macabi) + endif() +elseif(PLATFORM_INT STREQUAL "MAC_UNIVERSAL") + set(SDK_NAME macosx) + if(NOT ARCHS) + set(ARCHS "x86_64;arm64") + endif() + string(REPLACE ";" "-" ARCHS_SPLIT "${ARCHS}") + set(APPLE_TARGET_TRIPLE_INT ${ARCHS_SPLIT}-apple-macosx${DEPLOYMENT_TARGET}) +else() + message(FATAL_ERROR "Invalid PLATFORM: ${PLATFORM_INT}") +endif() + +string(REPLACE ";" " " ARCHS_SPACED "${ARCHS}") + +if(MODERN_CMAKE AND PLATFORM_INT MATCHES ".*COMBINED" AND NOT CMAKE_GENERATOR MATCHES "Xcode") + message(FATAL_ERROR "The COMBINED options only work with Xcode generator, -G Xcode") +endif() + +if(CMAKE_GENERATOR MATCHES "Xcode" AND PLATFORM_INT MATCHES "^MAC_CATALYST") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_CXX_LIBRARY "libc++") + set(CMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS "macosx") + set(CMAKE_XCODE_ATTRIBUTE_SUPPORTS_MACCATALYST "YES") + if(NOT DEFINED MACOSX_DEPLOYMENT_TARGET) + set(CMAKE_XCODE_ATTRIBUTE_MACOSX_DEPLOYMENT_TARGET "10.15") + else() + set(CMAKE_XCODE_ATTRIBUTE_MACOSX_DEPLOYMENT_TARGET "${MACOSX_DEPLOYMENT_TARGET}") + endif() +elseif(CMAKE_GENERATOR MATCHES "Xcode") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_CXX_LIBRARY "libc++") + set(CMAKE_XCODE_ATTRIBUTE_IPHONEOS_DEPLOYMENT_TARGET "${DEPLOYMENT_TARGET}") + if(NOT PLATFORM_INT MATCHES ".*COMBINED") + set(CMAKE_XCODE_ATTRIBUTE_ARCHS[sdk=${SDK_NAME}*] "${ARCHS_SPACED}") + set(CMAKE_XCODE_ATTRIBUTE_VALID_ARCHS[sdk=${SDK_NAME}*] "${ARCHS_SPACED}") + endif() +endif() + +# If the user did not specify the SDK root to use, then query xcodebuild for it. +if(DEFINED CMAKE_OSX_SYSROOT_INT) + # Environment variables are always preserved. + set(ENV{_CMAKE_OSX_SYSROOT_INT} "${CMAKE_OSX_SYSROOT_INT}") +elseif(DEFINED ENV{_CMAKE_OSX_SYSROOT_INT}) + set(CMAKE_OSX_SYSROOT_INT "$ENV{_CMAKE_OSX_SYSROOT_INT}") +elseif(NOT DEFINED CMAKE_OSX_SYSROOT_INT) + execute_process(COMMAND ${XCODEBUILD_EXECUTABLE} -version -sdk ${SDK_NAME} Path + OUTPUT_VARIABLE CMAKE_OSX_SYSROOT_INT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) +endif() + +if (NOT DEFINED CMAKE_OSX_SYSROOT_INT AND NOT DEFINED CMAKE_OSX_SYSROOT) + message(SEND_ERROR "Please make sure that Xcode is installed and that the toolchain" + "is pointing to the correct path. Please run:" + "sudo xcode-select -s /Applications/Xcode.app/Contents/Developer" + "and see if that fixes the problem for you.") + message(FATAL_ERROR "Invalid CMAKE_OSX_SYSROOT: ${CMAKE_OSX_SYSROOT} " + "does not exist.") +elseif(DEFINED CMAKE_OSX_SYSROOT_INT) + set(CMAKE_OSX_SYSROOT_INT "${CMAKE_OSX_SYSROOT_INT}" CACHE INTERNAL "") + # Specify the location or name of the platform SDK to be used in CMAKE_OSX_SYSROOT. + set(CMAKE_OSX_SYSROOT "${CMAKE_OSX_SYSROOT_INT}" CACHE INTERNAL "") +endif() + +# Use bitcode or not +if(NOT DEFINED ENABLE_BITCODE) + message(STATUS "[DEFAULTS] Disabling bitcode support by default. ENABLE_BITCODE not provided for override!") + set(ENABLE_BITCODE OFF) +endif() +set(ENABLE_BITCODE_INT ${ENABLE_BITCODE} CACHE BOOL + "Whether or not to enable bitcode" FORCE) +# Use ARC or not +if(NOT DEFINED ENABLE_ARC) + # Unless specified, enable ARC support by default + set(ENABLE_ARC ON) + message(STATUS "[DEFAULTS] Enabling ARC support by default. ENABLE_ARC not provided!") +endif() +set(ENABLE_ARC_INT ${ENABLE_ARC} CACHE BOOL "Whether or not to enable ARC" FORCE) +# Use hidden visibility or not +if(NOT DEFINED ENABLE_VISIBILITY) + # Unless specified, disable symbols visibility by default + set(ENABLE_VISIBILITY OFF) + message(STATUS "[DEFAULTS] Hiding symbols visibility by default. ENABLE_VISIBILITY not provided!") +endif() +set(ENABLE_VISIBILITY_INT ${ENABLE_VISIBILITY} CACHE BOOL "Whether or not to hide symbols from the dynamic linker (-fvisibility=hidden)" FORCE) +# Set strict compiler checks or not +if(NOT DEFINED ENABLE_STRICT_TRY_COMPILE) + # Unless specified, disable strict try_compile() + set(ENABLE_STRICT_TRY_COMPILE OFF) + message(STATUS "[DEFAULTS] Using NON-strict compiler checks by default. ENABLE_STRICT_TRY_COMPILE not provided!") +endif() +set(ENABLE_STRICT_TRY_COMPILE_INT ${ENABLE_STRICT_TRY_COMPILE} CACHE BOOL + "Whether or not to use strict compiler checks" FORCE) + +# Get the SDK version information. +if(DEFINED SDK_VERSION) + # Environment variables are always preserved. + set(ENV{_SDK_VERSION} "${SDK_VERSION}") +elseif(DEFINED ENV{_SDK_VERSION}) + set(SDK_VERSION "$ENV{_SDK_VERSION}") +elseif(NOT DEFINED SDK_VERSION) + execute_process(COMMAND ${XCODEBUILD_EXECUTABLE} -sdk ${CMAKE_OSX_SYSROOT_INT} -version SDKVersion + OUTPUT_VARIABLE SDK_VERSION + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) +endif() + +# Find the Developer root for the specific iOS platform being compiled for +# from CMAKE_OSX_SYSROOT. Should be ../../ from SDK specified in +# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain +# this information from xcrun or xcodebuild. +if (NOT DEFINED CMAKE_DEVELOPER_ROOT AND NOT CMAKE_GENERATOR MATCHES "Xcode") + get_filename_component(PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT_INT} DIRECTORY) + get_filename_component(CMAKE_DEVELOPER_ROOT ${PLATFORM_SDK_DIR} DIRECTORY) + if (NOT EXISTS "${CMAKE_DEVELOPER_ROOT}") + message(FATAL_ERROR "Invalid CMAKE_DEVELOPER_ROOT: ${CMAKE_DEVELOPER_ROOT} does not exist.") + endif() +endif() + +# Find the C & C++ compilers for the specified SDK. +if(DEFINED CMAKE_C_COMPILER) + # Environment variables are always preserved. + set(ENV{_CMAKE_C_COMPILER} "${CMAKE_C_COMPILER}") +elseif(DEFINED ENV{_CMAKE_C_COMPILER}) + set(CMAKE_C_COMPILER "$ENV{_CMAKE_C_COMPILER}") + set(CMAKE_ASM_COMPILER ${CMAKE_C_COMPILER}) +elseif(NOT DEFINED CMAKE_C_COMPILER) + execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT_INT} -find clang + OUTPUT_VARIABLE CMAKE_C_COMPILER + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) + set(CMAKE_ASM_COMPILER ${CMAKE_C_COMPILER}) +endif() +if(DEFINED CMAKE_CXX_COMPILER) + # Environment variables are always preserved. + set(ENV{_CMAKE_CXX_COMPILER} "${CMAKE_CXX_COMPILER}") +elseif(DEFINED ENV{_CMAKE_CXX_COMPILER}) + set(CMAKE_CXX_COMPILER "$ENV{_CMAKE_CXX_COMPILER}") +elseif(NOT DEFINED CMAKE_CXX_COMPILER) + execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT_INT} -find clang++ + OUTPUT_VARIABLE CMAKE_CXX_COMPILER + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) +endif() +# Find (Apple's) libtool. +if(DEFINED BUILD_LIBTOOL) + # Environment variables are always preserved. + set(ENV{_BUILD_LIBTOOL} "${BUILD_LIBTOOL}") +elseif(DEFINED ENV{_BUILD_LIBTOOL}) + set(BUILD_LIBTOOL "$ENV{_BUILD_LIBTOOL}") +elseif(NOT DEFINED BUILD_LIBTOOL) + execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT_INT} -find libtool + OUTPUT_VARIABLE BUILD_LIBTOOL + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) +endif() +# Find the toolchain's provided install_name_tool if none is found on the host +if(DEFINED CMAKE_INSTALL_NAME_TOOL) + # Environment variables are always preserved. + set(ENV{_CMAKE_INSTALL_NAME_TOOL} "${CMAKE_INSTALL_NAME_TOOL}") +elseif(DEFINED ENV{_CMAKE_INSTALL_NAME_TOOL}) + set(CMAKE_INSTALL_NAME_TOOL "$ENV{_CMAKE_INSTALL_NAME_TOOL}") +elseif(NOT DEFINED CMAKE_INSTALL_NAME_TOOL) + execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT_INT} -find install_name_tool + OUTPUT_VARIABLE CMAKE_INSTALL_NAME_TOOL_INT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) + set(CMAKE_INSTALL_NAME_TOOL ${CMAKE_INSTALL_NAME_TOOL_INT} CACHE INTERNAL "") +endif() + +# Configure libtool to be used instead of ar + ranlib to build static libraries. +# This is required on Xcode 7+, but should also work on previous versions of +# Xcode. +get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) +foreach(lang ${languages}) + set(CMAKE_${lang}_CREATE_STATIC_LIBRARY "${BUILD_LIBTOOL} -static -o " CACHE INTERNAL "") +endforeach() + +# CMake 3.14+ support building for iOS, watchOS, and tvOS out of the box. +if(MODERN_CMAKE) + if(SDK_NAME MATCHES "iphone") + set(CMAKE_SYSTEM_NAME iOS) + elseif(SDK_NAME MATCHES "xros") + set(CMAKE_SYSTEM_NAME visionOS) + elseif(SDK_NAME MATCHES "xrsimulator") + set(CMAKE_SYSTEM_NAME visionOS) + elseif(SDK_NAME MATCHES "macosx") + set(CMAKE_SYSTEM_NAME Darwin) + elseif(SDK_NAME MATCHES "appletv") + set(CMAKE_SYSTEM_NAME tvOS) + elseif(SDK_NAME MATCHES "watch") + set(CMAKE_SYSTEM_NAME watchOS) + endif() + # Provide flags for a combined FAT library build on newer CMake versions + if(PLATFORM_INT MATCHES ".*COMBINED") + set(CMAKE_IOS_INSTALL_COMBINED YES) + if(CMAKE_GENERATOR MATCHES "Xcode") + # Set the SDKROOT Xcode properties to a Xcode-friendly value (the SDK_NAME, E.g, iphoneos) + # This way, Xcode will automatically switch between the simulator and device SDK when building. + set(CMAKE_XCODE_ATTRIBUTE_SDKROOT "${SDK_NAME}") + # Force to not build just one ARCH, but all! + set(CMAKE_XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH "NO") + endif() + endif() +elseif(NOT DEFINED CMAKE_SYSTEM_NAME AND ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.10") + # Legacy code path prior to CMake 3.14 or fallback if no CMAKE_SYSTEM_NAME specified + set(CMAKE_SYSTEM_NAME iOS) +elseif(NOT DEFINED CMAKE_SYSTEM_NAME) + # Legacy code path before CMake 3.14 or fallback if no CMAKE_SYSTEM_NAME specified + set(CMAKE_SYSTEM_NAME Darwin) +endif() +# Standard settings. +set(CMAKE_SYSTEM_VERSION ${SDK_VERSION} CACHE INTERNAL "") +set(UNIX ON CACHE BOOL "") +set(APPLE ON CACHE BOOL "") +if(PLATFORM STREQUAL "MAC" OR PLATFORM STREQUAL "MAC_ARM64" OR PLATFORM STREQUAL "MAC_UNIVERSAL") + set(IOS OFF CACHE BOOL "") + set(MACOS ON CACHE BOOL "") +elseif(PLATFORM STREQUAL "MAC_CATALYST" OR PLATFORM STREQUAL "MAC_CATALYST_ARM64") + set(IOS ON CACHE BOOL "") + set(MACOS ON CACHE BOOL "") +elseif(PLATFORM STREQUAL "VISIONOS" OR PLATFORM STREQUAL "SIMULATOR_VISIONOS" OR PLATFORM STREQUAL "VISIONOSCOMBINED") + set(IOS OFF CACHE BOOL "") + set(VISIONOS ON CACHE BOOL "") +else() + set(IOS ON CACHE BOOL "") +endif() +# Set the architectures for which to build. +set(CMAKE_OSX_ARCHITECTURES ${ARCHS} CACHE INTERNAL "") +# Change the type of target generated for try_compile() so it'll work when cross-compiling, weak compiler checks +if(NOT ENABLE_STRICT_TRY_COMPILE_INT) + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) +endif() +# All iOS/Darwin specific settings - some may be redundant. +if (NOT DEFINED CMAKE_MACOSX_BUNDLE) + set(CMAKE_MACOSX_BUNDLE YES) +endif() +set(CMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED "NO") +set(CMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO") +set(CMAKE_SHARED_LIBRARY_PREFIX "lib") +set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib") +set(CMAKE_EXTRA_SHARED_LIBRARY_SUFFIXES ".tbd" ".so") +set(CMAKE_SHARED_MODULE_PREFIX "lib") +set(CMAKE_SHARED_MODULE_SUFFIX ".so") +set(CMAKE_C_COMPILER_ABI ELF) +set(CMAKE_CXX_COMPILER_ABI ELF) +set(CMAKE_C_HAS_ISYSROOT 1) +set(CMAKE_CXX_HAS_ISYSROOT 1) +set(CMAKE_MODULE_EXISTS 1) +set(CMAKE_DL_LIBS "") +set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ") +set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ") +set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}") +set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}") + +if(ARCHS MATCHES "((^|;|, )(arm64|arm64e|x86_64))+") + set(CMAKE_C_SIZEOF_DATA_PTR 8) + set(CMAKE_CXX_SIZEOF_DATA_PTR 8) + if(ARCHS MATCHES "((^|;|, )(arm64|arm64e))+") + set(CMAKE_SYSTEM_PROCESSOR "aarch64") + else() + set(CMAKE_SYSTEM_PROCESSOR "x86_64") + endif() +else() + set(CMAKE_C_SIZEOF_DATA_PTR 4) + set(CMAKE_CXX_SIZEOF_DATA_PTR 4) + set(CMAKE_SYSTEM_PROCESSOR "arm") +endif() + +# Note that only Xcode 7+ supports the newer more specific: +# -m${SDK_NAME}-version-min flags, older versions of Xcode use: +# -m(ios/ios-simulator)-version-min instead. +if(${CMAKE_VERSION} VERSION_LESS "3.11") + if(PLATFORM_INT STREQUAL "OS" OR PLATFORM_INT STREQUAL "OS64") + if(XCODE_VERSION_INT VERSION_LESS 7.0) + set(SDK_NAME_VERSION_FLAGS + "-mios-version-min=${DEPLOYMENT_TARGET}") + else() + # Xcode 7.0+ uses flags we can build directly from SDK_NAME. + set(SDK_NAME_VERSION_FLAGS + "-m${SDK_NAME}-version-min=${DEPLOYMENT_TARGET}") + endif() + elseif(PLATFORM_INT STREQUAL "TVOS") + set(SDK_NAME_VERSION_FLAGS + "-mtvos-version-min=${DEPLOYMENT_TARGET}") + elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS") + set(SDK_NAME_VERSION_FLAGS + "-mtvos-simulator-version-min=${DEPLOYMENT_TARGET}") +elseif(PLATFORM_INT STREQUAL "SIMULATORARM64_TVOS") + set(SDK_NAME_VERSION_FLAGS + "-mtvos-simulator-version-min=${DEPLOYMENT_TARGET}") + elseif(PLATFORM_INT STREQUAL "WATCHOS") + set(SDK_NAME_VERSION_FLAGS + "-mwatchos-version-min=${DEPLOYMENT_TARGET}") + elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS") + set(SDK_NAME_VERSION_FLAGS + "-mwatchos-simulator-version-min=${DEPLOYMENT_TARGET}") + elseif(PLATFORM_INT STREQUAL "SIMULATORARM64_WATCHOS") + set(SDK_NAME_VERSION_FLAGS + "-mwatchos-simulator-version-min=${DEPLOYMENT_TARGET}") + elseif(PLATFORM_INT STREQUAL "MAC") + set(SDK_NAME_VERSION_FLAGS + "-mmacosx-version-min=${DEPLOYMENT_TARGET}") + else() + # SIMULATOR or SIMULATOR64 both use -mios-simulator-version-min. + set(SDK_NAME_VERSION_FLAGS + "-mios-simulator-version-min=${DEPLOYMENT_TARGET}") + endif() +elseif(NOT PLATFORM_INT MATCHES "^MAC_CATALYST") + # Newer versions of CMake sets the version min flags correctly, skip this for Mac Catalyst targets + set(CMAKE_OSX_DEPLOYMENT_TARGET ${DEPLOYMENT_TARGET} CACHE INTERNAL "Minimum OS X deployment version") +endif() + +if(DEFINED APPLE_TARGET_TRIPLE_INT) + set(APPLE_TARGET_TRIPLE ${APPLE_TARGET_TRIPLE_INT} CACHE INTERNAL "") + set(CMAKE_C_COMPILER_TARGET ${APPLE_TARGET_TRIPLE}) + set(CMAKE_CXX_COMPILER_TARGET ${APPLE_TARGET_TRIPLE}) + set(CMAKE_ASM_COMPILER_TARGET ${APPLE_TARGET_TRIPLE}) +endif() + +if(PLATFORM_INT MATCHES "^MAC_CATALYST") + set(C_TARGET_FLAGS "-isystem ${CMAKE_OSX_SYSROOT_INT}/System/iOSSupport/usr/include -iframework ${CMAKE_OSX_SYSROOT_INT}/System/iOSSupport/System/Library/Frameworks") +endif() + +if(ENABLE_BITCODE_INT) + set(BITCODE "-fembed-bitcode") + set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE "bitcode") + set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE "YES") +else() + set(BITCODE "") + set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE "NO") +endif() + +if(ENABLE_ARC_INT) + set(FOBJC_ARC "-fobjc-arc") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC "YES") +else() + set(FOBJC_ARC "-fno-objc-arc") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC "NO") +endif() + +if(NAMED_LANGUAGE_SUPPORT_INT) + set(OBJC_VARS "-fobjc-abi-version=2 -DOBJC_OLD_DISPATCH_PROTOTYPES=0") + set(OBJC_LEGACY_VARS "") +else() + set(OBJC_VARS "") + set(OBJC_LEGACY_VARS "-fobjc-abi-version=2 -DOBJC_OLD_DISPATCH_PROTOTYPES=0") +endif() + +if(NOT ENABLE_VISIBILITY_INT) + foreach(lang ${languages}) + set(CMAKE_${lang}_VISIBILITY_PRESET "hidden" CACHE INTERNAL "") + endforeach() + set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN "YES") + set(VISIBILITY "-fvisibility=hidden -fvisibility-inlines-hidden") +else() + foreach(lang ${languages}) + set(CMAKE_${lang}_VISIBILITY_PRESET "default" CACHE INTERNAL "") + endforeach() + set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN "NO") + set(VISIBILITY "-fvisibility=default") +endif() + +if(DEFINED APPLE_TARGET_TRIPLE) + set(APPLE_TARGET_TRIPLE_FLAG "-target ${APPLE_TARGET_TRIPLE}") +endif() + +#Check if Xcode generator is used since that will handle these flags automagically +if(CMAKE_GENERATOR MATCHES "Xcode") + message(STATUS "Not setting any manual command-line buildflags, since Xcode is selected as the generator. Modifying the Xcode build-settings directly instead.") +else() + set(CMAKE_C_FLAGS "${C_TARGET_FLAGS} ${APPLE_TARGET_TRIPLE_FLAG} ${SDK_NAME_VERSION_FLAGS} ${OBJC_LEGACY_VARS} ${BITCODE} ${VISIBILITY} ${CMAKE_C_FLAGS}" CACHE INTERNAL + "Flags used by the compiler during all C build types.") + set(CMAKE_C_FLAGS_DEBUG "-O0 -g ${CMAKE_C_FLAGS_DEBUG}") + set(CMAKE_C_FLAGS_MINSIZEREL "-DNDEBUG -Os ${CMAKE_C_FLAGS_MINSIZEREL}") + set(CMAKE_C_FLAGS_RELWITHDEBINFO "-DNDEBUG -O2 -g ${CMAKE_C_FLAGS_RELWITHDEBINFO}") + set(CMAKE_C_FLAGS_RELEASE "-DNDEBUG -O3 ${CMAKE_C_FLAGS_RELEASE}") + set(CMAKE_CXX_FLAGS "${C_TARGET_FLAGS} ${APPLE_TARGET_TRIPLE_FLAG} ${SDK_NAME_VERSION_FLAGS} ${OBJC_LEGACY_VARS} ${BITCODE} ${VISIBILITY} ${CMAKE_CXX_FLAGS}" CACHE INTERNAL + "Flags used by the compiler during all CXX build types.") + set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g ${CMAKE_CXX_FLAGS_DEBUG}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "-DNDEBUG -Os ${CMAKE_CXX_FLAGS_MINSIZEREL}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-DNDEBUG -O2 -g ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 ${CMAKE_CXX_FLAGS_RELEASE}") + if(NAMED_LANGUAGE_SUPPORT_INT) + set(CMAKE_OBJC_FLAGS "${C_TARGET_FLAGS} ${APPLE_TARGET_TRIPLE_FLAG} ${SDK_NAME_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} ${FOBJC_ARC} ${OBJC_VARS} ${CMAKE_OBJC_FLAGS}" CACHE INTERNAL + "Flags used by the compiler during all OBJC build types.") + set(CMAKE_OBJC_FLAGS_DEBUG "-O0 -g ${CMAKE_OBJC_FLAGS_DEBUG}") + set(CMAKE_OBJC_FLAGS_MINSIZEREL "-DNDEBUG -Os ${CMAKE_OBJC_FLAGS_MINSIZEREL}") + set(CMAKE_OBJC_FLAGS_RELWITHDEBINFO "-DNDEBUG -O2 -g ${CMAKE_OBJC_FLAGS_RELWITHDEBINFO}") + set(CMAKE_OBJC_FLAGS_RELEASE "-DNDEBUG -O3 ${CMAKE_OBJC_FLAGS_RELEASE}") + set(CMAKE_OBJCXX_FLAGS "${C_TARGET_FLAGS} ${APPLE_TARGET_TRIPLE_FLAG} ${SDK_NAME_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} ${FOBJC_ARC} ${OBJC_VARS} ${CMAKE_OBJCXX_FLAGS}" CACHE INTERNAL + "Flags used by the compiler during all OBJCXX build types.") + set(CMAKE_OBJCXX_FLAGS_DEBUG "-O0 -g ${CMAKE_OBJCXX_FLAGS_DEBUG}") + set(CMAKE_OBJCXX_FLAGS_MINSIZEREL "-DNDEBUG -Os ${CMAKE_OBJCXX_FLAGS_MINSIZEREL}") + set(CMAKE_OBJCXX_FLAGS_RELWITHDEBINFO "-DNDEBUG -O2 -g ${CMAKE_OBJCXX_FLAGS_RELWITHDEBINFO}") + set(CMAKE_OBJCXX_FLAGS_RELEASE "-DNDEBUG -O3 ${CMAKE_OBJCXX_FLAGS_RELEASE}") + endif() + set(CMAKE_C_LINK_FLAGS "${C_TARGET_FLAGS} ${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}" CACHE INTERNAL + "Flags used by the compiler for all C link types.") + set(CMAKE_CXX_LINK_FLAGS "${C_TARGET_FLAGS} ${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}" CACHE INTERNAL + "Flags used by the compiler for all CXX link types.") + if(NAMED_LANGUAGE_SUPPORT_INT) + set(CMAKE_OBJC_LINK_FLAGS "${C_TARGET_FLAGS} ${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_OBJC_LINK_FLAGS}" CACHE INTERNAL + "Flags used by the compiler for all OBJC link types.") + set(CMAKE_OBJCXX_LINK_FLAGS "${C_TARGET_FLAGS} ${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_OBJCXX_LINK_FLAGS}" CACHE INTERNAL + "Flags used by the compiler for all OBJCXX link types.") + endif() + set(CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS} -x assembler-with-cpp -arch ${CMAKE_OSX_ARCHITECTURES} ${APPLE_TARGET_TRIPLE_FLAG}" CACHE INTERNAL + "Flags used by the compiler for all ASM build types.") +endif() + +## Print status messages to inform of the current state +message(STATUS "Configuring ${SDK_NAME} build for platform: ${PLATFORM_INT}, architecture(s): ${ARCHS}") +message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT_INT}") +message(STATUS "Using C compiler: ${CMAKE_C_COMPILER}") +message(STATUS "Using CXX compiler: ${CMAKE_CXX_COMPILER}") +message(STATUS "Using libtool: ${BUILD_LIBTOOL}") +message(STATUS "Using install name tool: ${CMAKE_INSTALL_NAME_TOOL}") +if(DEFINED APPLE_TARGET_TRIPLE) + message(STATUS "Autoconf target triple: ${APPLE_TARGET_TRIPLE}") +endif() +message(STATUS "Using minimum deployment version: ${DEPLOYMENT_TARGET}" + " (SDK version: ${SDK_VERSION})") +if(MODERN_CMAKE) + message(STATUS "Merging integrated CMake 3.14+ iOS,tvOS,watchOS,macOS toolchain(s) with this toolchain!") + if(PLATFORM_INT MATCHES ".*COMBINED") + message(STATUS "Will combine built (static) artifacts into FAT lib...") + endif() +endif() +if(CMAKE_GENERATOR MATCHES "Xcode") + message(STATUS "Using Xcode version: ${XCODE_VERSION_INT}") +endif() +message(STATUS "CMake version: ${CMAKE_VERSION}") +if(DEFINED SDK_NAME_VERSION_FLAGS) + message(STATUS "Using version flags: ${SDK_NAME_VERSION_FLAGS}") +endif() +message(STATUS "Using a data_ptr size of: ${CMAKE_CXX_SIZEOF_DATA_PTR}") +if(ENABLE_BITCODE_INT) + message(STATUS "Bitcode: Enabled") +else() + message(STATUS "Bitcode: Disabled") +endif() + +if(ENABLE_ARC_INT) + message(STATUS "ARC: Enabled") +else() + message(STATUS "ARC: Disabled") +endif() + +if(ENABLE_VISIBILITY_INT) + message(STATUS "Hiding symbols: Disabled") +else() + message(STATUS "Hiding symbols: Enabled") +endif() + +# Set global properties +set_property(GLOBAL PROPERTY PLATFORM "${PLATFORM}") +set_property(GLOBAL PROPERTY APPLE_TARGET_TRIPLE "${APPLE_TARGET_TRIPLE_INT}") +set_property(GLOBAL PROPERTY SDK_VERSION "${SDK_VERSION}") +set_property(GLOBAL PROPERTY XCODE_VERSION "${XCODE_VERSION_INT}") +set_property(GLOBAL PROPERTY OSX_ARCHITECTURES "${CMAKE_OSX_ARCHITECTURES}") + +# Export configurable variables for the try_compile() command. +set(CMAKE_TRY_COMPILE_PLATFORM_VARIABLES + PLATFORM + XCODE_VERSION_INT + SDK_VERSION + NAMED_LANGUAGE_SUPPORT + DEPLOYMENT_TARGET + CMAKE_DEVELOPER_ROOT + CMAKE_OSX_SYSROOT_INT + ENABLE_BITCODE + ENABLE_ARC + CMAKE_ASM_COMPILER + CMAKE_C_COMPILER + CMAKE_C_COMPILER_TARGET + CMAKE_CXX_COMPILER + CMAKE_CXX_COMPILER_TARGET + BUILD_LIBTOOL + CMAKE_INSTALL_NAME_TOOL + CMAKE_C_FLAGS + CMAKE_C_DEBUG + CMAKE_C_MINSIZEREL + CMAKE_C_RELWITHDEBINFO + CMAKE_C_RELEASE + CMAKE_CXX_FLAGS + CMAKE_CXX_FLAGS_DEBUG + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS_RELEASE + CMAKE_C_LINK_FLAGS + CMAKE_CXX_LINK_FLAGS + CMAKE_ASM_FLAGS +) + +if(NAMED_LANGUAGE_SUPPORT_INT) + list(APPEND CMAKE_TRY_COMPILE_PLATFORM_VARIABLES + CMAKE_OBJC_FLAGS + CMAKE_OBJC_DEBUG + CMAKE_OBJC_MINSIZEREL + CMAKE_OBJC_RELWITHDEBINFO + CMAKE_OBJC_RELEASE + CMAKE_OBJCXX_FLAGS + CMAKE_OBJCXX_DEBUG + CMAKE_OBJCXX_MINSIZEREL + CMAKE_OBJCXX_RELWITHDEBINFO + CMAKE_OBJCXX_RELEASE + CMAKE_OBJC_LINK_FLAGS + CMAKE_OBJCXX_LINK_FLAGS + ) +endif() + +set(CMAKE_PLATFORM_HAS_INSTALLNAME 1) +set(CMAKE_SHARED_LINKER_FLAGS "-rpath @executable_path/Frameworks -rpath @loader_path/Frameworks") +set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -Wl,-headerpad_max_install_names") +set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -Wl,-headerpad_max_install_names") +set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,") +set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,") +set(CMAKE_FIND_LIBRARY_SUFFIXES ".tbd" ".dylib" ".so" ".a") +set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-install_name") + +# Set the find root to the SDK developer roots. +# Note: CMAKE_FIND_ROOT_PATH is only useful when cross-compiling. Thus, do not set on macOS builds. +if(NOT PLATFORM_INT MATCHES "^MAC.*$") + list(APPEND CMAKE_FIND_ROOT_PATH "${CMAKE_OSX_SYSROOT_INT}" CACHE INTERNAL "") + set(CMAKE_IGNORE_PATH "/System/Library/Frameworks;/usr/local/lib;/opt/homebrew" CACHE INTERNAL "") +endif() + +# Default to searching for frameworks first. +IF(NOT DEFINED CMAKE_FIND_FRAMEWORK) + set(CMAKE_FIND_FRAMEWORK FIRST) +ENDIF(NOT DEFINED CMAKE_FIND_FRAMEWORK) + +# Set up the default search directories for frameworks. +if(PLATFORM_INT MATCHES "^MAC_CATALYST") + set(CMAKE_FRAMEWORK_PATH + ${CMAKE_DEVELOPER_ROOT}/Library/PrivateFrameworks + ${CMAKE_OSX_SYSROOT_INT}/System/Library/Frameworks + ${CMAKE_OSX_SYSROOT_INT}/System/iOSSupport/System/Library/Frameworks + ${CMAKE_FRAMEWORK_PATH} CACHE INTERNAL "") +else() + set(CMAKE_FRAMEWORK_PATH + ${CMAKE_DEVELOPER_ROOT}/Library/PrivateFrameworks + ${CMAKE_OSX_SYSROOT_INT}/System/Library/Frameworks + ${CMAKE_FRAMEWORK_PATH} CACHE INTERNAL "") +endif() + +# By default, search both the specified iOS SDK and the remainder of the host filesystem. +if(NOT CMAKE_FIND_ROOT_PATH_MODE_PROGRAM) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH CACHE INTERNAL "") +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_LIBRARY) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH CACHE INTERNAL "") +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_INCLUDE) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH CACHE INTERNAL "") +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_PACKAGE) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH CACHE INTERNAL "") +endif() + +# +# Some helper-macros below to simplify and beautify the CMakeFile +# + +# This little macro lets you set any Xcode specific property. +macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE XCODE_RELVERSION) + set(XCODE_RELVERSION_I "${XCODE_RELVERSION}") + if(XCODE_RELVERSION_I STREQUAL "All") + set_property(TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} "${XCODE_VALUE}") + else() + set_property(TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY}[variant=${XCODE_RELVERSION_I}] "${XCODE_VALUE}") + endif() +endmacro(set_xcode_property) + +# This macro lets you find executable programs on the host system. +macro(find_host_package) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE NEVER) + set(_TOOLCHAIN_IOS ${IOS}) + set(IOS OFF) + find_package(${ARGN}) + set(IOS ${_TOOLCHAIN_IOS}) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH) +endmacro(find_host_package) \ No newline at end of file diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index 82dde61ac8f7..6f124c5e60b8 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -36,6 +36,11 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") # CLI11.hpp add_compile_options(-Wno-deprecated-declarations) endif() + if(MOBILE) + # iOS builds use Apple Clang which has stricter defaults but doesn't support -Wno-vla-cxx-extension + add_compile_options(-Wno-missing-field-initializers) + add_compile_options(-Wno-deprecated-declarations) + endif() endif() if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") @@ -114,10 +119,14 @@ add_subdirectory(barretenberg/translator_vm) add_subdirectory(barretenberg/ultra_honk) add_subdirectory(barretenberg/vm2_stub) add_subdirectory(barretenberg/wasi) -add_subdirectory(barretenberg/lmdblib) -if(NOT FUZZING AND NOT WASM) +if(NOT MOBILE) + add_subdirectory(barretenberg/lmdblib) +endif() + +if(NOT FUZZING AND NOT WASM AND NOT MOBILE) # Fuzzing preset cannot be built with world_state as world_state cannot compile with MULTITHREADING=OFF + # Mobile builds exclude these modules that require LMDB or aren't needed on mobile add_subdirectory(barretenberg/world_state) # NOTE: Do not conditionally base this on the AVM flag as it defines a necessary vm2_sim library. add_subdirectory(barretenberg/vm2) @@ -196,8 +205,8 @@ set(BARRETENBERG_TARGET_OBJECTS $ $) -if(NOT WASM AND NOT FUZZING) - # enable merkle trees and lmdb +if(NOT WASM AND NOT FUZZING AND NOT MOBILE) + # enable merkle trees and lmdb (not for mobile builds) list(APPEND BARRETENBERG_TARGET_OBJECTS $) list(APPEND BARRETENBERG_TARGET_OBJECTS $) list(APPEND BARRETENBERG_TARGET_OBJECTS $) diff --git a/barretenberg/cpp/src/barretenberg/acir_formal_proofs/acir_loader.cpp b/barretenberg/cpp/src/barretenberg/acir_formal_proofs/acir_loader.cpp index 7663a9d068d3..f8247e26956a 100644 --- a/barretenberg/cpp/src/barretenberg/acir_formal_proofs/acir_loader.cpp +++ b/barretenberg/cpp/src/barretenberg/acir_formal_proofs/acir_loader.cpp @@ -35,7 +35,7 @@ AcirToSmtLoader::AcirToSmtLoader(std::string filename) bb::UltraCircuitBuilder AcirToSmtLoader::get_circuit_builder() { - acir_format::AcirProgram program{ .constraints = this->constraint_system }; + acir_format::AcirProgram program{ .constraints = this->constraint_system, .witness = {} }; auto builder = acir_format::create_circuit(program); builder.set_variable_name(0, "a"); builder.set_variable_name(1, "b"); diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp index a34cd9f436e1..1af3d5bc6c82 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp @@ -214,7 +214,7 @@ ChonkStats::Response ChonkStats::execute([[maybe_unused]] BBApiRequest& request) Response response; const auto constraint_system = acir_format::circuit_buf_to_acir_format(std::move(circuit.bytecode)); - acir_format::AcirProgram program{ constraint_system }; + acir_format::AcirProgram program{ constraint_system, {} }; // Get IVC constraints if any const auto& ivc_constraints = constraint_system.hn_recursion_constraints; diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp index aa649f50980d..2df128742fa5 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ultra_honk.cpp @@ -21,7 +21,7 @@ template && bytecode, std::vector&& witness) { const acir_format::ProgramMetadata metadata = _create_program_metadata(); - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format(std::move(bytecode)) }; + acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format(std::move(bytecode)), {} }; if (!witness.empty()) { program.witness = acir_format::witness_buf_to_witness_vector(std::move(witness)); @@ -172,7 +172,7 @@ CircuitStats::Response _stats(std::vector&& bytecode, bool include_gate CircuitStats::Response response; response.num_acir_opcodes = static_cast(constraint_system.num_acir_opcodes); - acir_format::AcirProgram program{ std::move(constraint_system) }; + acir_format::AcirProgram program{ std::move(constraint_system), {} }; auto builder = acir_format::create_circuit(program, metadata); builder.finalize_circuit(/*ensure_nonzero=*/true); diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.hpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.hpp index 9b473c796f10..994fbd8fbc7f 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.hpp @@ -119,6 +119,17 @@ class Chonk : public IVCBase { std::shared_ptr honk_vk_and_hash; QUEUE_TYPE type; bool is_kernel = false; + + // Explicit constructor needed for older libc++ (iOS SDK) compatibility with std::deque::emplace_back + StdlibVerifierInputs(StdlibProof proof_, + std::shared_ptr honk_vk_and_hash_, + QUEUE_TYPE type_, + bool is_kernel_) + : proof(std::move(proof_)) + , honk_vk_and_hash(std::move(honk_vk_and_hash_)) + , type(type_) + , is_kernel(is_kernel_) + {} }; using StdlibVerificationQueue = std::deque; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp index 7f27a4338a9e..de215f12155d 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp @@ -409,10 +409,13 @@ template class ShpleminiVerifier_ { scalars.emplace_back(constant_term_accumulator); BatchOpeningClaim batch_opening_claim{ commitments, scalars, shplonk_evaluation_challenge }; - ShpleminiVerifierOutput output{ batch_opening_claim }; - if constexpr (HasZK) { - output.consistency_checked = consistency_checked; - } + ShpleminiVerifierOutput output = [&]() { + if constexpr (HasZK) { + return ShpleminiVerifierOutput{ batch_opening_claim, consistency_checked }; + } else { + return ShpleminiVerifierOutput{ batch_opening_claim }; + } + }(); return output; }; diff --git a/barretenberg/cpp/src/barretenberg/crypto/keccak/keccak.cpp b/barretenberg/cpp/src/barretenberg/crypto/keccak/keccak.cpp index b8448d98076f..9d04016f3449 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/keccak/keccak.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/keccak/keccak.cpp @@ -113,7 +113,7 @@ struct keccak256 ethash_keccak256(const uint8_t* data, size_t size) NOEXCEPT struct keccak256 hash_field_elements(const uint64_t* limbs, size_t num_elements) { - uint8_t input_buffer[num_elements * KECCAK256_OUTPUT_BYTES]; + std::vector input_buffer(num_elements * KECCAK256_OUTPUT_BYTES); for (size_t i = 0; i < num_elements; ++i) { for (size_t j = 0; j < KECCAK256_OUTPUT_WORDS; ++j) { @@ -130,7 +130,7 @@ struct keccak256 hash_field_elements(const uint64_t* limbs, size_t num_elements) } } - return ethash_keccak256(input_buffer, num_elements * KECCAK256_OUTPUT_BYTES); + return ethash_keccak256(input_buffer.data(), num_elements * KECCAK256_OUTPUT_BYTES); } struct keccak256 hash_field_element(const uint64_t* limb) diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp index 1700b509ec0c..76041aa18622 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp @@ -22,8 +22,9 @@ WASM_EXPORT void acir_get_circuit_sizes(uint8_t const* acir_vec, uint32_t* subgroup) { const acir_format::ProgramMetadata metadata{ .has_ipa_claim = *has_ipa_claim }; - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program, metadata); builder.finalize_circuit(/*ensure_nonzero=*/true); *total = htonl((uint32_t)builder.get_finalized_total_circuit_size()); @@ -310,8 +311,9 @@ WASM_EXPORT void acir_write_vk_ultra_honk(uint8_t const* acir_vec, uint8_t** out using VerificationKey = UltraFlavor::VerificationKey; // lambda to free the builder ProverInstance prover_instance = [&] { - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program); return ProverInstance(builder); }(); @@ -327,8 +329,9 @@ WASM_EXPORT void acir_write_vk_ultra_keccak_honk(uint8_t const* acir_vec, uint8_ // lambda to free the builder ProverInstance prover_instance = [&] { - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program); return ProverInstance(builder); }(); @@ -344,8 +347,9 @@ WASM_EXPORT void acir_write_vk_ultra_keccak_zk_honk(uint8_t const* acir_vec, uin // lambda to free the builder ProverInstance prover_instance = [&] { - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program); return ProverInstance(builder); }(); @@ -363,8 +367,9 @@ WASM_EXPORT void acir_write_vk_ultra_starknet_honk([[maybe_unused]] uint8_t cons // lambda to free the builder ProverInstance prover_instance = [&] { - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program); return ProverInstance(builder); }(); @@ -385,8 +390,9 @@ WASM_EXPORT void acir_write_vk_ultra_starknet_zk_honk([[maybe_unused]] uint8_t c // lambda to free the builder ProverInstance prover_instance = [&] { - acir_format::AcirProgram program{ acir_format::circuit_buf_to_acir_format( - from_buffer>(acir_vec)) }; + acir_format::AcirProgram program{ + acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)), {} + }; auto builder = acir_format::create_circuit(program); return ProverInstance(builder); }(); @@ -449,7 +455,7 @@ WASM_EXPORT void acir_gates_aztec_client(uint8_t const* ivc_inputs_buf, uint8_t* acir_format::circuit_buf_to_acir_format(std::move(bytecode_vec)); // Create an acir program from the constraint system - acir_format::AcirProgram program{ constraint_system }; + acir_format::AcirProgram program{ constraint_system, {} }; auto builder = acir_format::create_circuit(program); builder.finalize_circuit(/*ensure_nonzero=*/true); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp index 95e97f4e5024..b72eabf2d7dd 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_contract.hpp @@ -68,7 +68,7 @@ library FrLib { mstore(add(free, 0x20), 0x20) mstore(add(free, 0x40), 0x20) mstore(add(free, 0x60), v) - mstore(add(free, 0x80), sub(MODULUS, 2)) + mstore(add(free, 0x80), sub(MODULUS, 2)) mstore(add(free, 0xa0), MODULUS) let success := staticcall(gas(), 0x05, free, 0xc0, 0x00, 0x20) if iszero(success) { @@ -92,7 +92,7 @@ library FrLib { mstore(add(free, 0x20), 0x20) mstore(add(free, 0x40), 0x20) mstore(add(free, 0x60), b) - mstore(add(free, 0x80), v) + mstore(add(free, 0x80), v) mstore(add(free, 0xa0), MODULUS) let success := staticcall(gas(), 0x05, free, 0xc0, 0x00, 0x20) if iszero(success) { @@ -769,42 +769,42 @@ library RelationsLib { Fr[NUMBER_OF_SUBRELATIONS] memory evals, Fr domainSep ) internal pure { - Fr write_term; - Fr read_term; + Fr table_term; + Fr lookup_term; // Calculate the write term (the table accumulation) - // write_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ + // table_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ { Fr beta_sqr = rp.beta * rp.beta; - write_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + table_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + (wire(p, WIRE.TABLE_3) * beta_sqr) + (wire(p, WIRE.TABLE_4) * beta_sqr * rp.beta); } // Calculate the read term - // read_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ + // lookup_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ { Fr beta_sqr = rp.beta * rp.beta; Fr derived_entry_1 = wire(p, WIRE.W_L) + rp.gamma + (wire(p, WIRE.Q_R) * wire(p, WIRE.W_L_SHIFT)); Fr derived_entry_2 = wire(p, WIRE.W_R) + wire(p, WIRE.Q_M) * wire(p, WIRE.W_R_SHIFT); Fr derived_entry_3 = wire(p, WIRE.W_O) + wire(p, WIRE.Q_C) * wire(p, WIRE.W_O_SHIFT); - read_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + lookup_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + (wire(p, WIRE.Q_O) * beta_sqr * rp.beta); } - Fr read_inverse = wire(p, WIRE.LOOKUP_INVERSES) * write_term; - Fr write_inverse = wire(p, WIRE.LOOKUP_INVERSES) * read_term; + Fr lookup_inverse = wire(p, WIRE.LOOKUP_INVERSES) * table_term; + Fr table_inverse = wire(p, WIRE.LOOKUP_INVERSES) * lookup_term; Fr inverse_exists_xor = wire(p, WIRE.LOOKUP_READ_TAGS) + wire(p, WIRE.Q_LOOKUP) - (wire(p, WIRE.LOOKUP_READ_TAGS) * wire(p, WIRE.Q_LOOKUP)); // Inverse calculated correctly relation - Fr accumulatorNone = read_term * write_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; + Fr accumulatorNone = lookup_term * table_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; accumulatorNone = accumulatorNone * domainSep; // Inverse - Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * read_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * write_inverse; + Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * lookup_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * table_inverse; Fr read_tag = wire(p, WIRE.LOOKUP_READ_TAGS); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp index ec12af6caf9d..fa2d06c8f312 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp @@ -1575,7 +1575,7 @@ contract HonkVerifier is IVerifier { let beta_sqr := mulmod(beta, beta, p) let beta_cube := mulmod(beta_sqr, beta, p) - // write_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ + // table_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ let t0 := addmod(addmod(mload(TABLE1_EVAL_LOC), gamma, p), mulmod(mload(TABLE2_EVAL_LOC), beta, p), p) let t1 := @@ -1584,9 +1584,9 @@ contract HonkVerifier is IVerifier { mulmod(mload(TABLE4_EVAL_LOC), beta_cube, p), p ) - let write_term := addmod(t0, t1, p) + let table_term := addmod(t0, t1, p) - // read_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ + // lookup_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ t0 := addmod( addmod(mload(W1_EVAL_LOC), gamma, p), mulmod(mload(QR_EVAL_LOC), mload(W1_SHIFT_EVAL_LOC), p), @@ -1595,12 +1595,12 @@ contract HonkVerifier is IVerifier { t1 := addmod(mload(W2_EVAL_LOC), mulmod(mload(QM_EVAL_LOC), mload(W2_SHIFT_EVAL_LOC), p), p) let t2 := addmod(mload(W3_EVAL_LOC), mulmod(mload(QC_EVAL_LOC), mload(W3_SHIFT_EVAL_LOC), p), p) - let read_term := addmod(t0, mulmod(t1, beta, p), p) - read_term := addmod(read_term, mulmod(t2, beta_sqr, p), p) - read_term := addmod(read_term, mulmod(mload(QO_EVAL_LOC), beta_cube, p), p) + let lookup_term := addmod(t0, mulmod(t1, beta, p), p) + lookup_term := addmod(lookup_term, mulmod(t2, beta_sqr, p), p) + lookup_term := addmod(lookup_term, mulmod(mload(QO_EVAL_LOC), beta_cube, p), p) - let read_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), write_term, p) - let write_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), read_term, p) + let lookup_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), table_term, p) + let table_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), lookup_term, p) let inverse_exists_xor := addmod(mload(LOOKUP_READ_TAGS_EVAL_LOC), mload(QLOOKUP_EVAL_LOC), p) inverse_exists_xor := addmod( @@ -1609,14 +1609,14 @@ contract HonkVerifier is IVerifier { p ) - let accumulator_none := mulmod(mulmod(read_term, write_term, p), mload(LOOKUP_INVERSES_EVAL_LOC), p) + let accumulator_none := mulmod(mulmod(lookup_term, table_term, p), mload(LOOKUP_INVERSES_EVAL_LOC), p) accumulator_none := addmod(accumulator_none, sub(p, inverse_exists_xor), p) accumulator_none := mulmod(accumulator_none, mload(POW_PARTIAL_EVALUATION_LOC), p) - let accumulator_one := mulmod(mload(QLOOKUP_EVAL_LOC), read_inverse, p) + let accumulator_one := mulmod(mload(QLOOKUP_EVAL_LOC), lookup_inverse, p) accumulator_one := addmod( accumulator_one, - sub(p, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), write_inverse, p)), + sub(p, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), table_inverse, p)), p ) diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp index 397e9ebbd864..475d5168ee74 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/honk_zk_contract.hpp @@ -68,7 +68,7 @@ library FrLib { mstore(add(free, 0x20), 0x20) mstore(add(free, 0x40), 0x20) mstore(add(free, 0x60), v) - mstore(add(free, 0x80), sub(MODULUS, 2)) + mstore(add(free, 0x80), sub(MODULUS, 2)) mstore(add(free, 0xa0), MODULUS) let success := staticcall(gas(), 0x05, free, 0xc0, 0x00, 0x20) if iszero(success) { @@ -92,7 +92,7 @@ library FrLib { mstore(add(free, 0x20), 0x20) mstore(add(free, 0x40), 0x20) mstore(add(free, 0x60), b) - mstore(add(free, 0x80), v) + mstore(add(free, 0x80), v) mstore(add(free, 0xa0), MODULUS) let success := staticcall(gas(), 0x05, free, 0xc0, 0x00, 0x20) if iszero(success) { @@ -825,42 +825,42 @@ library RelationsLib { Fr[NUMBER_OF_SUBRELATIONS] memory evals, Fr domainSep ) internal pure { - Fr write_term; - Fr read_term; + Fr table_term; + Fr lookup_term; // Calculate the write term (the table accumulation) - // write_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ + // table_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ { Fr beta_sqr = rp.beta * rp.beta; - write_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + table_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + (wire(p, WIRE.TABLE_3) * beta_sqr) + (wire(p, WIRE.TABLE_4) * beta_sqr * rp.beta); } // Calculate the read term - // read_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ + // lookup_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ { Fr beta_sqr = rp.beta * rp.beta; Fr derived_entry_1 = wire(p, WIRE.W_L) + rp.gamma + (wire(p, WIRE.Q_R) * wire(p, WIRE.W_L_SHIFT)); Fr derived_entry_2 = wire(p, WIRE.W_R) + wire(p, WIRE.Q_M) * wire(p, WIRE.W_R_SHIFT); Fr derived_entry_3 = wire(p, WIRE.W_O) + wire(p, WIRE.Q_C) * wire(p, WIRE.W_O_SHIFT); - read_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + lookup_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + (wire(p, WIRE.Q_O) * beta_sqr * rp.beta); } - Fr read_inverse = wire(p, WIRE.LOOKUP_INVERSES) * write_term; - Fr write_inverse = wire(p, WIRE.LOOKUP_INVERSES) * read_term; + Fr lookup_inverse = wire(p, WIRE.LOOKUP_INVERSES) * table_term; + Fr table_inverse = wire(p, WIRE.LOOKUP_INVERSES) * lookup_term; Fr inverse_exists_xor = wire(p, WIRE.LOOKUP_READ_TAGS) + wire(p, WIRE.Q_LOOKUP) - (wire(p, WIRE.LOOKUP_READ_TAGS) * wire(p, WIRE.Q_LOOKUP)); // Inverse calculated correctly relation - Fr accumulatorNone = read_term * write_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; + Fr accumulatorNone = lookup_term * table_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; accumulatorNone = accumulatorNone * domainSep; // Inverse - Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * read_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * write_inverse; + Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * lookup_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * table_inverse; Fr read_tag = wire(p, WIRE.LOOKUP_READ_TAGS); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp index f23a255b6f1b..3694cea3591c 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm.test.cpp @@ -144,7 +144,7 @@ void complete_proving_key_for_test(bb::RelationParameters& relation_paramete const size_t unmasked_witness_size = pk->circuit_size - NUM_DISABLED_ROWS_IN_SUMCHECK; // Compute z_perm and inverse polynomial for our logarithmic-derivative lookup method - compute_logderivative_inverse( + compute_logderivative_inverse( pk->polynomials, relation_parameters, unmasked_witness_size); compute_grand_products(pk->polynomials, relation_parameters, unmasked_witness_size); diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/logderivative_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/logderivative_library.hpp index c6c6a8aca0b9..1d6e18984444 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/logderivative_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/logderivative_library.hpp @@ -1,11 +1,11 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Completed, auditors: [Federico], commit: } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== // Generic log-derivative utilities for lookups and permutations. -// For the mathematical background, see relations/LOGDERIV_LOOKUP_RELATION_README.md +// For the mathematical background, see relations/GENERIC_LOGUP_README.md #pragma once @@ -20,31 +20,21 @@ namespace bb { /** * @brief Compute the inverse polynomial I(X) required for logderivative lookups - * * - * details - * Inverse may be defined in terms of its values on X_i = 0,1,...,n-1 as Z_perm[0] = 1 and for i = 1:n-1 - * 1 1 - * Inverse[i] = ∏ -------------------------- * ∏' -------------------------- - * relation::read_term(j) relation::write_term(j) * - * where ∏ := ∏_{j=0:relation::NUM_READ_TERMS-1} and ∏' := ∏'_{j=0:relation::NUM_WRITE_TERMS-1} + * @details For \f$x \in H_N\f$, where \f$H_N\f$ is the hypercube of size N, we define the inverse polynomial + * \f[ + * I(x) = \prod_{j} \frac{1}{\text{lookup_term}_{j}(x)} \cdot \prod_{k} \frac{1}{\text{table_term}_{k}(x)} + * \f] * - * If row [i] does not contain a lookup read gate or a write gate, Inverse[i] = 0 - * N.B. by "write gate" we mean; do the lookup table polynomials contain nonzero values at this row? - * (in the ECCVM, the lookup table is not precomputed, so we have a concept of a "write gate", unlike when precomputed - * lookup tables are used) + * If a given row does not contain a lookup gate, the inverse polynomial is set to zero. * - * The specific algebraic relations that define read terms and write terms are defined in Flavor::LookupRelation - * - * Note: tparam UseMultithreading exists because the AVM calls this fn in a multithreaded loop (no nested multithreading - * allowed) but the ECCVM benefits from multithreading this fn */ template void compute_logderivative_inverse(Polynomials& polynomials, auto& relation_parameters, const size_t circuit_size) { using Accumulator = typename Relation::ValueAccumulator0; - constexpr size_t READ_TERMS = Relation::READ_TERMS; - constexpr size_t WRITE_TERMS = Relation::WRITE_TERMS; + constexpr size_t NUM_LOOKUP_TERMS = Relation::NUM_LOOKUP_TERMS; + constexpr size_t NUM_TABLE_TERMS = Relation::NUM_TABLE_TERMS; auto& inverse_polynomial = Relation::get_inverse_polynomial(polynomials); const size_t offset = inverse_polynomial.start_index(); @@ -57,14 +47,14 @@ void compute_logderivative_inverse(Polynomials& polynomials, auto& relation_para continue; } FF denominator = 1; - bb::constexpr_for<0, READ_TERMS, 1>([&] { + bb::constexpr_for<0, NUM_LOOKUP_TERMS, 1>([&] { auto denominator_term = - Relation::template compute_read_term(row, relation_parameters); + Relation::template compute_lookup_term(row, relation_parameters); denominator *= denominator_term; }); - bb::constexpr_for<0, WRITE_TERMS, 1>([&] { + bb::constexpr_for<0, NUM_TABLE_TERMS, 1>([&] { auto denominator_term = - Relation::template compute_write_term(row, relation_parameters); + Relation::template compute_table_term(row, relation_parameters); denominator *= denominator_term; }); inverse_polynomial.at(i) = denominator; @@ -90,189 +80,94 @@ void compute_logderivative_inverse(Polynomials& polynomials, auto& relation_para } /** - * @brief Compute generic log-derivative lookup subrelation accumulation - * @details The generic log-derivative lookup relation consistes of two subrelations. The first demonstrates that the - * inverse polynomial I, defined via I_i = 1/[(read_term_i) * (write_term_i)], has been computed correctly. The second - * establishes the correctness of the lookups themselves based on the log-derivative lookup argument. Note that the - * latter subrelation is "linearly dependent" in the sense that it establishes that a sum across all rows of the - * exectution trace is zero, rather than that some expression holds independently at each row. Accordingly, this - * subrelation is not multiplied by a scaling factor at each accumulation step. The subrelation expressions are - * respectively: - * - * I_i * (read_term_i) * (write_term_i) - 1 = 0 - * - * \sum_{i=0}^{n-1} [q_{logderiv_lookup} * I_i * write_term_i + read_count_i * I_i * read_term_i] = 0 - * - * The explicit expressions for read_term and write_term are dependent upon the particular structure of the lookup being - * performed and methods for computing them must be defined in the corresponding relation class. + * @brief Unified implementation of log-derivative subrelation accumulation * * @tparam FF * @tparam Relation * @tparam ContainerOverSubrelations * @tparam AllEntities * @tparam Parameters + * @tparam IsPermutation If true, then the read counts used in the second subrelation are hard-coded to 1. * @param accumulator * @param in * @param params * @param scaling_factor */ -template -void accumulate_logderivative_lookup_subrelation_contributions(ContainerOverSubrelations& accumulator, - const AllEntities& in, - const Parameters& params, - const FF& scaling_factor) +template +void _accumulate_logderivative_subrelation_contributions(ContainerOverSubrelations& accumulator, + const AllEntities& in, + const Parameters& params, + const FF& scaling_factor) { - constexpr size_t READ_TERMS = Relation::READ_TERMS; - constexpr size_t WRITE_TERMS = Relation::WRITE_TERMS; + constexpr size_t NUM_LOOKUP_TERMS = Relation::NUM_LOOKUP_TERMS; + constexpr size_t NUM_TABLE_TERMS = Relation::NUM_TABLE_TERMS; using Accumulator = typename std::tuple_element_t<0, ContainerOverSubrelations>; using View = typename Accumulator::View; auto lookup_inverses = View(Relation::get_inverse_polynomial(in)); - constexpr size_t NUM_TOTAL_TERMS = READ_TERMS + WRITE_TERMS; + constexpr size_t NUM_TOTAL_TERMS = NUM_LOOKUP_TERMS + NUM_TABLE_TERMS; std::array lookup_terms; std::array denominator_accumulator; - // The lookup relation = \sum_j (1 / read_term[j]) - \sum_k (read_counts[k] / write_term[k]) - // To get the inverses (1 / read_term[i]), (1 / write_term[i]), we have a commitment to the product of all inverses - // i.e. lookup_inverse = \prod_j (1 / read_term[j]) * \prod_k (1 / write_term[k]) - // The purpose of this next section is to derive individual inverse terms using `lookup_inverses` - // i.e. (1 / read_term[i]) = lookup_inverse * \prod_{j /ne i} (read_term[j]) * \prod_k (write_term[k]) - // (1 / write_term[i]) = lookup_inverse * \prod_j (read_term[j]) * \prod_{k ne i} (write_term[k]) - bb::constexpr_for<0, READ_TERMS, 1>( - [&]() { lookup_terms[i] = Relation::template compute_read_term(in, params); }); - bb::constexpr_for<0, WRITE_TERMS, 1>([&]() { - lookup_terms[i + READ_TERMS] = Relation::template compute_write_term(in, params); + // The inverse polynomial gives us the product of all the inverses, i.e. + // lookup_inverse = \prod_j (1 / lookup_term[j]) * \prod_k (1 / table_term[k]) + // To obtain the inverses 1 / lookup_term[i], 1 / table_term[i], we multiply lookup_inverse by the product of all + // terms except the one we want to invert. We perform this calculation via the following algorithm: + // 1) Compute the successive products of all lookup terms and table terms + // 2) Check that lookup_inverse is the inverse of the full product + // 3) Iteratively compute the inverses as follows: + // (lookup_term_1 * .. * lookup_term_(i-1)) * lookup_inverse * (lookup_term_(i+1) * .. * table_term_m) + + // Collect all the terms in a single vector, first the lookup terms, then the table terms + bb::constexpr_for<0, NUM_LOOKUP_TERMS, 1>( + [&]() { lookup_terms[i] = Relation::template compute_lookup_term(in, params); }); + bb::constexpr_for<0, NUM_TABLE_TERMS, 1>([&]() { + lookup_terms[i + NUM_LOOKUP_TERMS] = Relation::template compute_table_term(in, params); }); + // 1) Construct the successive products bb::constexpr_for<0, NUM_TOTAL_TERMS, 1>([&]() { denominator_accumulator[i] = lookup_terms[i]; }); - bb::constexpr_for<0, NUM_TOTAL_TERMS - 1, 1>( [&]() { denominator_accumulator[i + 1] *= denominator_accumulator[i]; }); - auto inverse_accumulator = Accumulator(lookup_inverses); // denominator_accumulator[NUM_TOTAL_TERMS - 1]; - + // 2) First subrelation: check that lookup_inverse is the inverse of the cumulative product if inverse_exists = 1 + auto inverse_accumulator = Accumulator(lookup_inverses); const auto inverse_exists = Relation::template compute_inverse_exists(in); - // Note: the lookup_inverses are computed so that the value is 0 if !inverse_exists std::get<0>(accumulator) += (denominator_accumulator[NUM_TOTAL_TERMS - 1] * lookup_inverses - inverse_exists) * scaling_factor; - // After this algo, total degree of denominator_accumulator = NUM_TOTAL_TERMS - for (size_t i = 0; i < NUM_TOTAL_TERMS - 1; ++i) { - denominator_accumulator[NUM_TOTAL_TERMS - 1 - i] = - denominator_accumulator[NUM_TOTAL_TERMS - 2 - i] * inverse_accumulator; - inverse_accumulator = inverse_accumulator * lookup_terms[NUM_TOTAL_TERMS - 1 - i]; + // 3) Iteratively compute the single inverses + for (size_t i = NUM_TOTAL_TERMS - 1; i > 0; --i) { + // Take the cumulative product up to the previous index and multiply by the current inverse accumulator + denominator_accumulator[i] = denominator_accumulator[i - 1] * inverse_accumulator; + // Multiply the inverse accumulator by the current term to remove it from the product of the inverses + inverse_accumulator = inverse_accumulator * lookup_terms[i]; } + // Inverse accumulator is now equal to the inverse of the first lookup term denominator_accumulator[0] = inverse_accumulator; - // each predicate is degree-1 - // degree of relation at this point = NUM_TOTAL_TERMS + 1 - bb::constexpr_for<0, READ_TERMS, 1>([&]() { + // Second subrelation + bb::constexpr_for<0, NUM_LOOKUP_TERMS, 1>([&]() { std::get<1>(accumulator) += - Relation::template compute_read_term_predicate(in) * denominator_accumulator[i]; + Relation::template get_lookup_term_predicate(in) * denominator_accumulator[i]; }); - // each predicate is degree-1, `lookup_read_counts` is degree-1 - // degree of relation = NUM_TOTAL_TERMS + 2 - bb::constexpr_for<0, WRITE_TERMS, 1>([&]() { - const auto p = Relation::template compute_write_term_predicate(in); - const auto lookup_read_count = Relation::template lookup_read_counts(in); - std::get<1>(accumulator) -= p * (denominator_accumulator[i + READ_TERMS] * lookup_read_count); + bb::constexpr_for<0, NUM_TABLE_TERMS, 1>([&]() { + auto to_subtract = Relation::template get_table_term_predicate(in) * + denominator_accumulator[i + NUM_LOOKUP_TERMS]; + if constexpr (!IsPermutation) { + // If not a permutation, multiply by the read count + to_subtract *= Relation::template lookup_read_counts(in); + } + std::get<1>(accumulator) -= to_subtract; }); } - -/** - * @brief Compute generic log-derivative set permutation subrelation accumulation - * @details The generic log-derivative lookup relation consistes of two subrelations. The first demonstrates that the - * inverse polynomial I, defined via I = 1/[(read_term) * (write_term)], has been computed correctly. The second - * establishes the correctness of the permutation itself based on the log-derivative argument. Note that the - * latter subrelation is "linearly dependent" in the sense that it establishes that a sum across all rows of the - * execution trace is zero, rather than that some expression holds independently at each row. Accordingly, this - * subrelation is not multiplied by a scaling factor at each accumulation step. The subrelation expressions are - * respectively: - * - * I * (read_term) * (write_term) - q_{permutation_enabler} = 0 - * - * \sum_{i=0}^{n-1} [q_{write_enabler} * I * write_term + q_{read_enabler} * I * read_term] = 0 - * - * The explicit expressions for read_term and write_term are dependent upon the particular structure of the permutation - * being performed and methods for computing them must be defined in the corresponding relation class. The entities - * which are used to determine the use of permutation (is it enabled, is the first "read" set enabled, is the second - * "write" set enabled) must be defined in the relation class. - * - * @tparam FF - * @tparam Relation - * @tparam ContainerOverSubrelations - * @tparam AllEntities - * @tparam Parameters - * @param accumulator - * @param in - * @param params - * @param scaling_factor - */ -template -void accumulate_logderivative_permutation_subrelation_contributions(ContainerOverSubrelations& accumulator, - const AllEntities& in, - const Parameters& params, - const FF& scaling_factor) -{ - constexpr size_t READ_TERMS = Relation::READ_TERMS; - constexpr size_t WRITE_TERMS = Relation::WRITE_TERMS; - - // For now we only do simple permutations over tuples with 1 read and 1 write term - static_assert(READ_TERMS == 1); - static_assert(WRITE_TERMS == 1); - - using Accumulator = typename std::tuple_element_t<0, ContainerOverSubrelations>; - using View = typename Accumulator::View; - - auto permutation_inverses = View(Relation::get_inverse_polynomial(in)); - - constexpr size_t NUM_TOTAL_TERMS = 2; - std::array permutation_terms; - std::array denominator_accumulator; - - // The permutation relation = 1 / read_term - 1 / write_term - // To get the inverses (1 / read_term), (1 / write_term), we have a commitment to the product ofinver ses - // i.e. permutation_inverses = (1 / read_term) * (1 / write_term) - // The purpose of this next section is to derive individual inverse terms using `permutation_inverses` - // i.e. (1 / read_term) = permutation_inverses * write_term - // (1 / write_term) = permutation_inverses * read_term - permutation_terms[0] = Relation::template compute_read_term(in, params); - permutation_terms[1] = Relation::template compute_write_term(in, params); - - bb::constexpr_for<0, NUM_TOTAL_TERMS, 1>([&]() { denominator_accumulator[i] = permutation_terms[i]; }); - - bb::constexpr_for<0, NUM_TOTAL_TERMS - 1, 1>( - [&]() { denominator_accumulator[i + 1] *= denominator_accumulator[i]; }); - - auto inverse_accumulator = Accumulator(permutation_inverses); // denominator_accumulator[NUM_TOTAL_TERMS - 1]; - - const auto inverse_exists = Relation::template compute_inverse_exists(in); - - // Note: the lookup_inverses are computed so that the value is 0 if !inverse_exists - std::get<0>(accumulator) += - (denominator_accumulator[NUM_TOTAL_TERMS - 1] * permutation_inverses - inverse_exists) * scaling_factor; - - // After this algo, total degree of denominator_accumulator = NUM_TOTAL_TERMS - for (size_t i = 0; i < NUM_TOTAL_TERMS - 1; ++i) { - denominator_accumulator[NUM_TOTAL_TERMS - 1 - i] = - denominator_accumulator[NUM_TOTAL_TERMS - 2 - i] * inverse_accumulator; - inverse_accumulator = inverse_accumulator * permutation_terms[NUM_TOTAL_TERMS - 1 - i]; - } - denominator_accumulator[0] = inverse_accumulator; - - // each predicate is degree-1 - // degree of relation at this point = NUM_TOTAL_TERMS + 1 - std::get<1>(accumulator) += - Relation::template compute_read_term_predicate(in) * denominator_accumulator[0]; - - // each predicate is degree-1 - // degree of relation = NUM_TOTAL_TERMS + 1 - std::get<1>(accumulator) -= - Relation::template compute_write_term_predicate(in) * denominator_accumulator[1]; -} - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp b/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp index 4c3aecfea173..02f55b42ff8c 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp +++ b/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp @@ -10,7 +10,17 @@ #include #include #include +#if defined(__APPLE__) +#include +#if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR +#include +extern "C" int getentropy(void* buffer, size_t length); // getentropy on iOS +#else +#include // getentropy on macOS +#endif +#else #include +#endif namespace bb::numeric { diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.cpp index cb3c9b17b704..fd9fbddbd77d 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.cpp @@ -690,11 +690,11 @@ void compute_efficient_interpolation(const Fr* src, Fr* dest, const Fr* evaluati algorithm used in Kate commitment scheme, as the coefficients of N(X)/X are given by numerator_polynomial[j] for j=1,...,n. */ - Fr numerator_polynomial[n + 1]; - polynomial_arithmetic::compute_linear_polynomial_product(evaluation_points, numerator_polynomial, n); + std::vector numerator_polynomial(n + 1); + polynomial_arithmetic::compute_linear_polynomial_product(evaluation_points, numerator_polynomial.data(), n); // First half contains roots, second half contains denominators (to be inverted) - Fr roots_and_denominators[2 * n]; - Fr temp_src[n]; + std::vector roots_and_denominators(2 * n); + std::vector temp_src(n); for (size_t i = 0; i < n; ++i) { roots_and_denominators[i] = -evaluation_points[i]; temp_src[i] = src[i]; @@ -710,10 +710,10 @@ void compute_efficient_interpolation(const Fr* src, Fr* dest, const Fr* evaluati } // at this point roots_and_denominators is populated as follows // (x_0,\ldots, x_{n-1}, d_0, \ldots, d_{n-1}) - Fr::batch_invert(roots_and_denominators, 2 * n); + Fr::batch_invert(roots_and_denominators.data(), 2 * n); Fr z, multiplier; - Fr temp_dest[n]; + std::vector temp_dest(n); size_t idx_zero = 0; bool interpolation_domain_contains_zero = false; // if the constant term of the numerator polynomial N(X) is 0, then the interpolation domain contains 0 diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/LOGUP_README.md b/barretenberg/cpp/src/barretenberg/relations/LOGUP_README.md similarity index 87% rename from barretenberg/cpp/src/barretenberg/honk/proof_system/LOGUP_README.md rename to barretenberg/cpp/src/barretenberg/relations/LOGUP_README.md index bf1a3887fa95..995796ed4fe7 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/LOGUP_README.md +++ b/barretenberg/cpp/src/barretenberg/relations/LOGUP_README.md @@ -6,9 +6,9 @@ Central reference for the log-derivative lookup argument in `logderiv_lookup_rel The log-derivative approach replaces grand-product-based lookups (like plookup) with a sum of inverses. Instead of computing a running product, we prove: -$$\sum_i \left[ q_i \cdot \frac{1}{W_i} - c_i \cdot \frac{1}{R_i} \right] = 0$$ +$$\sum_i \left[ q_i \cdot \frac{1}{L_i} - c_i \cdot \frac{1}{T_i} \right] = 0$$ -where $q$ is the lookup selector, $W$ is the **write** (table) term, $R$ is the **read** (wire) term, and $c$ is the read **count**. +where $q$ is the lookup selector, $L$ is the lookup term, $T$ is the table term, and $c$ is the read **count**. This is motivated by taking the derivative of the log of the grand product identity. @@ -33,7 +33,7 @@ See [Haböck Section 3.4](https://eprint.iacr.org/2022/1530.pdf) for the full an To avoid rational functions in the constraint system, we introduce witness polynomial $I$: -$$I_i = \frac{1}{R_i \cdot W_i}$$ +$$I_i = \frac{1}{L_i \cdot T_i}$$ For efficiency, $I$ is only computed at rows where the lookup is active: - A lookup gate exists ($q = 1$), OR @@ -49,13 +49,13 @@ See [`logderiv_lookup_relation.hpp`](./logderiv_lookup_relation.hpp) for the imp | # | Relation | Purpose | |---|----------|---------| -| 0 | $I \cdot R \cdot W - $ `inverse_exists` $= 0$ | Inverse correctness | -| 1 | $\sum[q \cdot I \cdot W - c \cdot I \cdot R] = 0$ | Lookup identity | +| 0 | $I \cdot L \cdot T - $ `inverse_exists` $= 0$ | Inverse correctness | +| 1 | $\sum[q \cdot I \cdot T - c \cdot I \cdot L] = 0$ | Lookup identity | | 2 | read_tag$^2 - $ read_tag $= 0$ | Boolean check | **Subrelation 1 is "linearly dependent"**: it constrains a sum across the entire trace rather than holding independently at each row, so it's not multiplied by a scaling factor during accumulation. -**Why the boolean check?** If read_tag isn't boolean, inverse_exists becomes linear in read_tag, allowing a malicious prover to set it to 0 even when $q = 1$, bypassing the inverse correctness check. +**Why the boolean check?** If read_tag isn't boolean, inverse_exists becomes linear in read_tag, allowing a malicious prover to set it to whatever they want when $q = 0$. **Security note:** Tampering with read_tags or read_counts only hurts the prover—it reduces the effective table size, making it harder to satisfy the lookup. @@ -83,7 +83,7 @@ $\beta$ and $\gamma$ are generated by splitting one hash output into two 127-bit | File | Contents | |------|----------| -| `logderiv_lookup_relation.hpp` | Subrelations, `compute_read/write_term`, inverse computation | +| `logderiv_lookup_relation.hpp` | Subrelations, `compute_lookup/table_term`, inverse computation | | `relation_parameters.hpp` | Challenge storage ($\beta$, $\gamma$, $\beta^2$, $\beta^3$) | | `oink_prover.cpp` | Challenge generation, inverse polynomial computation | | `sol/src/honk/Relations.sol` | Solidity verifier | diff --git a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp index 3efd6a84a166..c866d05b5228 100644 --- a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp @@ -17,47 +17,62 @@ namespace bb { /** * @brief Log-derivative lookup argument relation for establishing DataBus reads + * * @details Each column of the databus can be thought of as a table from which we can look up values. The log-derivative * lookup argument seeks to prove lookups from a column by establishing the following sum: * - * \sum_{i=0}^{n-1} q_{logderiv_lookup}_i * (1 / write_term_i) + read_count_i * (1 / read_term_i) = 0 + * \f[ + * \sum_{i=0}^{n-1} q_{\text{logderiv_lookup},i} \cdot \frac{1}{\text{lookup_term}_i} - + * \text{read_count}_i \cdot \frac{1}{\text{table_term}_i} = 0 + * \f] + * + * where the lookup and table terms are both of the form \f$\text{value}_i + \text{idx}_i \cdot \beta + \gamma\f$. + * This expression is motivated by taking the derivative of the log of a more conventional grand product style set + * equivalence argument (see e.g. https://eprint.iacr.org/2022/1530.pdf for details). For the table term, the + * (idx, value) pair comes from the "table" (bus column), and for the lookup term the (idx, value) pair comes from + * wires 1 and 2 which should contain a valid entry in the table. * - * where the read and write terms are both of the form value_i + idx_i*\beta + \gamma. This expression is motivated by - * taking the derivative of the log of a more conventional grand product style set equivalence argument (see e.g. - * https://eprint.iacr.org/2022/1530.pdf for details). For the write term, the (idx, value) pair comes from the "table" - * (bus column), and for the read term the (idx, value) pair comes from wires 1 and 2 which should contain a valid entry - * in the table. (Note: the meaning of "read" here is clear: the inputs are an (index, value) pair that we want to read - * from the table. Here "write" refers to data that is present in the "table", i.e. the bus column. There is no gate - * associated with a write, the data is simply populated in the corresponding column and committed to when constructing - * a proof). + * In practice, we must rephrase this expression in terms of polynomials, one of which is a polynomial \f$I\f$ + * containing (indirectly) the rational functions in the above expression: + * \f$I_i = 1/[(\text{lookup_term}_i) \cdot (\text{table_term}_i)]\f$. This leads to two subrelations. The first + * demonstrates that the inverse polynomial \f$I\f$ is correctly formed. The second is the primary lookup identity, + * where the rational functions are replaced by the use of the inverse polynomial \f$I\f$. These two subrelations can + * be expressed as follows: * - * In practice, we must rephrase this expression in terms of polynomials, one of which is a polynomial I containing - * (indirectly) the rational functions in the above expression: I_i = 1/[(read_term_i) * (write_term_i)]. This leads to - * two subrelations. The first demonstrates that the inverse polynomial I is correctly formed. The second is the primary - * lookup identity, where the rational functions are replaced by the use of the inverse polynomial I. These two - * subrelations can be expressed as follows: + * Subrelation 1 (Inverse correctness): + * \f[ + * I_i \cdot (\text{lookup_term}_i) \cdot (\text{table_term}_i) - 1 = 0 + * \f] * - * (1) I_i * (read_term_i) * (write_term_i) - 1 = 0 - * In reality this relation is I_i * (read_term_i) * (write_term_i) - inverse_exists = 0, i.e. it is only checked for - * active gates (more explanation below). + * In reality this relation is \f$I_i \cdot (\text{lookup_term}_i) \cdot (\text{table_term}_i) - + * \text{inverse_exists} = 0\f$, i.e. it is only checked for active gates (more explanation below). * - * (2) \sum_{i=0}^{n-1} [q_{logderiv_lookup} * I_i * write_term_i + read_count_i * I_i * read_term_i] = 0 + * Subrelation 2 (Lookup identity): + * \f[ + * \sum_{i=0}^{n-1} [q_{\text{logderiv_lookup}} \cdot I_i \cdot \text{table_term}_i - + * \text{read_count}_i \cdot I_i \cdot \text{lookup_term}_i] = 0 + * \f] * * Each column of the DataBus requires its own pair of subrelations. The column being read is selected via a unique - * product, i.e. a lookup from bus column j is selected via q_busread * q_j (j = 1,2,...). + * product, i.e. a lookup from bus column \f$j\f$ is selected via \f$q_{\text{busread}} \cdot q_j\f$ (j = 1,2,...). * - * To not compute the inverse terms packed in I_i for indices not included in the sum we introduce a - * witness called inverse_exists, which is zero when either read_count_i is nonzero (a boolean called read_tag) or we - * have a read gate. This is represented by setting inverse_exists = 1- (1- read_tag)*(1- is_read_gate). Since read_gate - * is only dependent on selector values, we can assume that the verifier can check that it is boolean. However, if - * read_tag (which is a derived witness), is not constrained to be boolean, one can set the inverse_exists to 0, even - * when is_read_gate is 1, because inverse_exists is a linear function of read_tag then. Thus we have a third - * subrelation, that ensures that read_tag is a boolean value. - * (3) read_tag * read_tag - read_tag = 0 - * Note: that subrelation (2) is "linearly dependent" in the sense that it establishes that a sum - * across all rows of the exectution trace is zero, rather than that some expression holds independently at each row. - * Accordingly, this subrelation is not multiplied by a scaling factor at each accumulation step. + * To not compute the inverse terms packed in \f$I_i\f$ for indices not included in the sum we introduce a + * witness called inverse_exists, which is zero when either \f$\text{read_count}_i\f$ is nonzero (a boolean called + * read_tag) or we have a read gate. This is represented by setting \f$\text{inverse_exists} = 1 - (1 - + * \text{read_tag}) \cdot (1 - \text{is_read_gate})\f$. Since read_gate is only dependent on selector values, we can + * assume that the verifier can check that it is boolean. However, if read_tag (which is a derived witness), is not + * constrained to be boolean, one can set the inverse_exists to any value when is_read_gate = 0, because + * inverse_exists is a linear function of read_tag then. Thus we have a third subrelation, that ensures that read_tag + * is a boolean value. * + * Subrelation 3 (Boolean check): + * \f[ + * \text{read_tag} \cdot \text{read_tag} - \text{read_tag} = 0 + * \f] + * + * @note Subrelation (2) is "linearly dependent" in the sense that it establishes that a sum across all rows of the + * execution trace is zero, rather than that some expression holds independently at each row. Accordingly, this + * subrelation is not multiplied by a scaling factor at each accumulation step. */ template class DatabusLookupRelationImpl { public: @@ -157,11 +172,13 @@ template class DatabusLookupRelationImpl { /** * @brief Compute the Accumulator whose values indicate whether the inverse is computed or not + * * @details This is needed for efficiency since we don't need to compute the inverse unless the log derivative - * lookup relation is active at a given row. - * We skip the inverse computation for all the rows that read_count_i == 0 AND read_selector is 0 - * @note read_tag is constructed such that read_tag_i = 1 or 0. We add a subrelation to check that read_tag is a - * boolean value + * lookup relation is active at a given row. We skip the inverse computation for all the rows that + * \f$\text{read_count}_i = 0\f$ AND \f$\text{read_selector}$ is 0. + * + * @note \f$\text{read_tag}\f$ is constructed such that \f$\text{read_tag}_i \in \{0, 1\}\f$. We add a subrelation + * to check that \f$\text{read_tag}$ is a boolean value. * */ template @@ -184,7 +201,9 @@ template class DatabusLookupRelationImpl { /** * @brief Compute scalar for read term in log derivative lookup argument - * @details The selector indicating read from bus column j is given by q_busread * q_j, j = 1,2,3 + * + * @details The selector indicating read from bus column \f$j\f$ is given by + * \f$q_{\text{busread}} \cdot q_j\f$, where \f$j \in \{1, 2, 3\}\f$. * */ template @@ -204,7 +223,7 @@ template class DatabusLookupRelationImpl { * */ template - static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) + static Accumulator compute_table_term(const AllEntities& in, const Parameters& params) { using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; using ParameterCoefficientAccumulator = typename Parameters::DataType::CoefficientAccumulator; @@ -225,7 +244,7 @@ template class DatabusLookupRelationImpl { * */ template - static Accumulator compute_read_term(const AllEntities& in, const Parameters& params) + static Accumulator compute_lookup_term(const AllEntities& in, const Parameters& params) { using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; using ParameterCoefficientAccumulator = typename Parameters::DataType::CoefficientAccumulator; @@ -241,11 +260,14 @@ template class DatabusLookupRelationImpl { } /** - * @brief Construct the polynomial I whose components are the inverse of the product of the read and write terms - * @details If the denominators of log derivative lookup relation are read_term and write_term, then I_i = - * (read_term_i*write_term_i)^{-1}. - * @note Importantly, I_i = 0 for rows i at which there is no read or write, so the cost of this method is - * proportional to the actual databus usage. + * @brief Construct the polynomial \f$I\f$ whose components are the inverse of the product of the read and write + * terms + * + * @details If the denominators of log derivative lookup relation are lookup_term and table_term, then + * \f$I_i = (\text{lookup_term}_i \cdot \text{table_term}_i)^{-1}\f$. + * + * @note Importantly, \f$I_i = 0\f$ for rows \f$i\f$ at which there is no read or write, so the cost of this method + * is proportional to the actual databus usage. * */ template @@ -281,8 +303,8 @@ template class DatabusLookupRelationImpl { if (is_read || nonzero_read_count) { // TODO(https://github.com/AztecProtocol/barretenberg/issues/940): avoid get_row if possible. auto row = polynomials.get_row(i); // Note: this is a copy. use sparingly! - auto value = compute_read_term(row, relation_parameters) * - compute_write_term(row, relation_parameters); + auto value = compute_lookup_term(row, relation_parameters) * + compute_table_term(row, relation_parameters); inverse_polynomial.at(i) = value; } } @@ -319,8 +341,8 @@ template class DatabusLookupRelationImpl { const auto inverses_m = CoefficientAccumulator(BusData::inverses(in)); // Degree 1 Accumulator inverses(inverses_m); const auto read_counts_m = CoefficientAccumulator(BusData::read_counts(in)); // Degree 1 - const auto read_term = compute_read_term(in, params); // Degree 1 - const auto write_term = compute_write_term(in, params); // Degree 1 + const auto lookup_term = compute_lookup_term(in, params); // Degree 1 + const auto table_term = compute_table_term(in, params); // Degree 1 const auto inverse_exists = compute_inverse_exists(in); // Degree 3 const auto read_selector = get_read_selector(in); // Degree 2 @@ -335,15 +357,15 @@ template class DatabusLookupRelationImpl { // Establish the correctness of the polynomial of inverses I. Note: inverses is computed so that the value // is 0 if !inverse_exists. Degree 3 // degrees 3 = 1 1 1 3 - std::get(accumulator) += (read_term * write_term * inverses - inverse_exists) * scaling_factor; + std::get(accumulator) += (lookup_term * table_term * inverses - inverse_exists) * scaling_factor; // Establish validity of the read. Note: no scaling factor here since this constraint is enforced across the // entire trace, not on a per-row basis. // degree 3 = 2 1 - Accumulator tmp = read_selector * write_term; + Accumulator tmp = read_selector * table_term; // degree 2 = 1 1 - tmp -= Accumulator(read_counts_m) * read_term; + tmp -= Accumulator(read_counts_m) * lookup_term; // degree 1 tmp *= inverses; std::get(accumulator) += tmp; // Deg 4 (4) diff --git a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp index c2cc00bb0718..402ffab873cf 100644 --- a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp @@ -5,8 +5,8 @@ * implementation produces the same results as a simpler, more readable reference implementation. * * The DatabusLookupRelation implements a log-derivative lookup argument with 3 subrelations per bus column: - * 1. Inverse correctness: I * read_term * write_term - inverse_exists = 0 - * 2. Log-derivative lookup: sum of (read_selector * write_term - read_count * read_term) * I = 0 + * 1. Inverse correctness: I * lookup_term * table_term - inverse_exists = 0 + * 2. Log-derivative lookup: sum of (read_selector * table_term - read_count * lookup_term) * I = 0 * 3. Read tag boolean check: read_tag * read_tag - read_tag = 0 */ #include "barretenberg/ecc/curves/bn254/fr.hpp" @@ -140,20 +140,20 @@ static std::array compute_expected_values(const DatabusInputElements& in, std::fill(expected_values.begin(), expected_values.end(), FF(0)); // Read term (same for all columns): value + index * beta + gamma - auto read_term = in.w_l + in.w_r * beta + gamma; + auto lookup_term = in.w_l + in.w_r * beta + gamma; // Lambda to compute subrelations for a given bus column auto compute_column_subrelations = [&](size_t bus_idx, FF column_selector, FF bus_value, FF read_counts, FF read_tags, FF inverses) { auto is_read_gate = in.q_busread * column_selector; auto inverse_exists = is_read_gate + read_tags - is_read_gate * read_tags; - auto write_term = bus_value + in.databus_id * beta + gamma; + auto table_term = bus_value + in.databus_id * beta + gamma; // Subrelation 1: Inverse correctness - expected_values[bus_idx * 3] = read_term * write_term * inverses - inverse_exists; + expected_values[bus_idx * 3] = lookup_term * table_term * inverses - inverse_exists; // Subrelation 2: Log-derivative lookup (no scaling factor since linearly dependent) - expected_values[bus_idx * 3 + 1] = (is_read_gate * write_term - read_counts * read_term) * inverses; + expected_values[bus_idx * 3 + 1] = (is_read_gate * table_term - read_counts * lookup_term) * inverses; // Subrelation 3: Read tag boolean check expected_values[bus_idx * 3 + 2] = read_tags * read_tags - read_tags; @@ -316,7 +316,7 @@ TEST_F(DatabusLookupRelationConsistency, InactiveGates) /** * @brief Test a valid read gate scenario where inverse is correctly computed - * @details When the inverse is set correctly: I = 1/(read_term * write_term), + * @details When the inverse is set correctly: I = 1/(lookup_term * table_term), * the inverse correctness subrelation should be zero (satisfied) */ TEST_F(DatabusLookupRelationConsistency, ValidInverseComputation) @@ -342,9 +342,9 @@ TEST_F(DatabusLookupRelationConsistency, ValidInverseComputation) in.calldata = value; // value in bus matches // Compute the correct inverse - auto read_term = value + index * beta + gamma; - auto write_term = value + index * beta + gamma; // same since value and index match - auto inverse = (read_term * write_term).invert(); + auto lookup_term = value + index * beta + gamma; + auto table_term = value + index * beta + gamma; // same since value and index match + auto inverse = (lookup_term * table_term).invert(); in.calldata_inverses = inverse; // Read tag = 1 (this row is being read) @@ -364,8 +364,8 @@ TEST_F(DatabusLookupRelationConsistency, ValidInverseComputation) Relation::accumulate(accumulator, in, parameters, FF(1)); // Inverse correctness subrelation should be 0 (satisfied) - // I * read_term * write_term - inverse_exists = 0 - // inverse * read_term * write_term = 1 + // I * lookup_term * table_term - inverse_exists = 0 + // inverse * lookup_term * table_term = 1 // inverse_exists = is_read_gate + read_tag - is_read_gate * read_tag = 1 + 1 - 1 = 1 EXPECT_EQ(accumulator[0], FF(0)); @@ -380,7 +380,7 @@ TEST_F(DatabusLookupRelationConsistency, ValidInverseComputation) } /** - * @brief Test that when read_term != write_term, inverse correctness fails with wrong inverse + * @brief Test that when lookup_term != table_term, inverse correctness fails with wrong inverse * @details If the value in bus doesn't match what's being read, the relation should fail */ TEST_F(DatabusLookupRelationConsistency, MismatchedReadWriteTerms) @@ -407,11 +407,11 @@ TEST_F(DatabusLookupRelationConsistency, MismatchedReadWriteTerms) in.databus_id = index; in.calldata = bus_value; - // Compute inverse based on read_term (which uses w_l, w_r) and write_term (which uses calldata) - auto read_term = read_value + index * beta + gamma; - auto write_term = bus_value + index * beta + gamma; + // Compute inverse based on lookup_term (which uses w_l, w_r) and table_term (which uses calldata) + auto lookup_term = read_value + index * beta + gamma; + auto table_term = bus_value + index * beta + gamma; // Even with "correct" inverse, the lookup will fail because terms don't match - auto inverse = (read_term * write_term).invert(); + auto inverse = (lookup_term * table_term).invert(); in.calldata_inverses = inverse; in.calldata_read_tags = FF(1); @@ -430,12 +430,12 @@ TEST_F(DatabusLookupRelationConsistency, MismatchedReadWriteTerms) // Inverse correctness subrelation should still be 0 (inverse is computed correctly for these terms) EXPECT_EQ(accumulator[0], FF(0)); - // But the lookup subrelation (index 1) will be non-zero because read_term != write_term + // But the lookup subrelation (index 1) will be non-zero because lookup_term != table_term // This is where the soundness comes in - the sum across the trace won't be zero - // For a single row: (read_selector * write_term - read_count * read_term) * inverse - // = (1 * write_term - 1 * read_term) * inverse - // = (write_term - read_term) * inverse != 0 when read_term != write_term - FF expected_lookup = (write_term - read_term) * inverse; + // For a single row: (read_selector * table_term - read_count * lookup_term) * inverse + // = (1 * table_term - 1 * lookup_term) * inverse + // = (table_term - lookup_term) * inverse != 0 when lookup_term != table_term + FF expected_lookup = (table_term - lookup_term) * inverse; EXPECT_EQ(accumulator[1], expected_lookup); EXPECT_NE(accumulator[1], FF(0)); // Confirm it's non-zero } diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp index ffb3d95fd57b..b3d0001c1276 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp @@ -17,10 +17,10 @@ namespace bb { template class ECCVMLookupRelationImpl { public: using FF = FF_; - static constexpr size_t READ_TERMS = 4; - static constexpr size_t WRITE_TERMS = 2; + static constexpr size_t NUM_LOOKUP_TERMS = 4; + static constexpr size_t NUM_TABLE_TERMS = 2; // 1 + polynomial degree of this relation - static constexpr size_t LENGTH = READ_TERMS + WRITE_TERMS + 3; // 9 + static constexpr size_t LENGTH = NUM_LOOKUP_TERMS + NUM_TABLE_TERMS + 3; // 9 static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ LENGTH, // grand product construction sub-relation @@ -68,36 +68,36 @@ template class ECCVMLookupRelationImpl { return Accumulator(1); } - template - static Accumulator compute_read_term_predicate(const AllEntities& in) + template + static Accumulator get_lookup_term_predicate(const AllEntities& in) { using View = typename Accumulator::View; - if constexpr (read_index == 0) { + if constexpr (lookup_index == 0) { return Accumulator(View(in.msm_add1)); } - if constexpr (read_index == 1) { + if constexpr (lookup_index == 1) { return Accumulator(View(in.msm_add2)); } - if constexpr (read_index == 2) { + if constexpr (lookup_index == 2) { return Accumulator(View(in.msm_add3)); } - if constexpr (read_index == 3) { + if constexpr (lookup_index == 3) { return Accumulator(View(in.msm_add4)); } return Accumulator(1); } - template - static Accumulator compute_write_term_predicate(const AllEntities& in) + template + static Accumulator get_table_term_predicate(const AllEntities& in) { using View = typename Accumulator::View; - if constexpr (write_index == 0) { + if constexpr (table_index == 0) { return Accumulator(View(in.precompute_select)); } - if constexpr (write_index == 1) { + if constexpr (table_index == 1) { // TODO(https://github.com/AztecProtocol/barretenberg/issues/750) Is this a bug? return Accumulator(View(in.precompute_select)); } @@ -107,14 +107,14 @@ template class ECCVMLookupRelationImpl { * @brief Returns the fingerprint of `(precompute_pc, compressed_slice, (2 * compressed_slice - 15)[P])`, where [P] * is the point corresponding to `precompute_pc` and `compressed_slice`∈{0, ..., 15}. */ - template - static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_table_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; - static_assert(write_index < WRITE_TERMS); - // write_index == 0 means our wNAF digit is positive (i.e., ∈{1, 3..., 15}). - // write_index == 1 means our wNAF digit is negative (i.e., ∈{-15, -13..., -1}) + static_assert(table_index < NUM_TABLE_TERMS); + // table_index == 0 means our wNAF digit is positive (i.e., ∈{1, 3..., 15}). + // table_index == 1 means our wNAF digit is negative (i.e., ∈{-15, -13..., -1}) // round starts at 0 and increments to 7 // point starts at 15[P] and decrements to [P] @@ -122,8 +122,8 @@ template class ECCVMLookupRelationImpl { // we have computed `(15 - 2 * round)[P] =: (precompute_tx, precompute_ty)`. // `round`∈{0, 1..., 7} - // if write_index == 0, we want to write (pc, 15 - 2 * round, precompute_tx, precompute_ty) - // if write_index == 1, we want to write (pc, round, precompute_tx, -precompute_ty) + // if table_index == 0, we want to write (pc, 15 - 2 * round, precompute_tx, precompute_ty) + // if table_index == 1, we want to write (pc, round, precompute_tx, -precompute_ty) // to sum up, both: // (pc, round, precompute_tx, -precompute_ty) _and_ // (pc, 15 - 2 * round, precompute_tx, precompute_ty) @@ -156,27 +156,27 @@ template class ECCVMLookupRelationImpl { const auto& beta_sqr = params.beta_sqr; const auto& beta_cube = params.beta_cube; - if constexpr (write_index == 0) { + if constexpr (table_index == 0) { const auto positive_slice_value = -(precompute_round) + 15; const auto positive_term = precompute_pc + gamma + positive_slice_value * beta + tx * beta_sqr + ty * beta_cube; return positive_term; // degree 1 } - if constexpr (write_index == 1) { + if constexpr (table_index == 1) { const auto negative_term = precompute_pc + gamma + precompute_round * beta + tx * beta_sqr - ty * beta_cube; return negative_term; // degree 1 } return Accumulator(1); } - template - static Accumulator compute_read_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_lookup_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; // read term: (pc, compressed_slice, (2 * compressed_slice - 15)[P]) // (the latter term is of course represented via an x and y coordinate.) - static_assert(read_index < READ_TERMS); + static_assert(lookup_index < NUM_LOOKUP_TERMS); const auto& gamma = params.gamma; const auto& beta = params.beta; const auto& beta_sqr = params.beta_sqr; @@ -204,24 +204,24 @@ template class ECCVMLookupRelationImpl { // performed in the current row. const auto current_pc = msm_pc - msm_count; - if constexpr (read_index == 0) { - const auto read_term1 = (current_pc) + gamma + msm_slice1 * beta + msm_x1 * beta_sqr + msm_y1 * beta_cube; - return read_term1; // degree 1 + if constexpr (lookup_index == 0) { + const auto lookup_term1 = (current_pc) + gamma + msm_slice1 * beta + msm_x1 * beta_sqr + msm_y1 * beta_cube; + return lookup_term1; // degree 1 } - if constexpr (read_index == 1) { - const auto read_term2 = + if constexpr (lookup_index == 1) { + const auto lookup_term2 = (current_pc - 1) + gamma + msm_slice2 * beta + msm_x2 * beta_sqr + msm_y2 * beta_cube; - return read_term2; // degree 1 + return lookup_term2; // degree 1 } - if constexpr (read_index == 2) { - const auto read_term3 = + if constexpr (lookup_index == 2) { + const auto lookup_term3 = (current_pc - 2) + gamma + msm_slice3 * beta + msm_x3 * beta_sqr + msm_y3 * beta_cube; - return read_term3; // degree 1 + return lookup_term3; // degree 1 } - if constexpr (read_index == 3) { - const auto read_term4 = + if constexpr (lookup_index == 3) { + const auto lookup_term4 = (current_pc - 3) + gamma + msm_slice4 * beta + msm_x4 * beta_sqr + msm_y4 * beta_cube; - return read_term4; // degree 1 + return lookup_term4; // degree 1 } return Accumulator(1); } diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation_impl.hpp index 34c5605bdad6..be82d88d3ef1 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation_impl.hpp @@ -32,7 +32,11 @@ void ECCVMLookupRelationImpl::accumulate(ContainerOverSubrelations& accumula const Parameters& params, const FF& scaling_factor) { - accumulate_logderivative_lookup_subrelation_contributions>( - accumulator, in, params, scaling_factor); + _accumulate_logderivative_subrelation_contributions, + ContainerOverSubrelations, + AllEntities, + Parameters, + false>(accumulator, in, params, scaling_factor); } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/generic_lookup/GENERIC_LOGUP_README.md b/barretenberg/cpp/src/barretenberg/relations/generic_lookup/GENERIC_LOGUP_README.md new file mode 100644 index 000000000000..a38908e52a53 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/generic_lookup/GENERIC_LOGUP_README.md @@ -0,0 +1,60 @@ +# Generic Log-Derivative Lookup + +## Overview + +The file `generic_lookup_relation.hpp` implements a generic log-derivative lookup argument. The relation is similar to the one described in `LOGUP_README.md`, with the difference that `GenericLookupRelationImpl` has been designed to allow the implementor to customize the lookup argument. In this readme we reuse some of the notation introduced in `LOGUP_README.md`. + +The argument is still based on a log-derivative expression, but instead of using the expression + +$$\sum_i \left[ q_i \cdot \frac{1}{L_i} - c_i \cdot \frac{1}{T_i} \right] = 0$$ + +we use the following expression: + +$$\sum_i \left[ q_{L,i} \cdot \frac{1}{L_i} - c_i \cdot q_{T,i} \cdot \frac{1}{T_i} \right] = 0$$ + +where $L_i$ is the lookup term (the thing we look up), $T_i$ is the table term (the thing in the table from which we look up), $c_i$ is the read count (how many times $T_i$ has been looked up), and $q_{L,i}$, $q_{T,i}$ are two boolean values used to decide which elements should/can be looked up. We call them the lookup term predicate* and the table term predicate*, respectively. + +## Types of lookups + +Let $f_1, \dots, f_n$ be the columns whose values we wish to look up (the lookup columns), and $t_1, \dots, t_m$ the columns whose values determine what we can look up (the table columns). `GenericLookupRelationImpl` implements two types of lookups: +- `BASIC_LOOKUP/BASIC_TABLE`: $n = m$ and we look up the rows of the ordered tuple $(f_1, \dots, f_n)$ from the rows of the ordered tuple $(t_1, \dots, t_n)$. Instead of looking up the tuple, we use a random challenge $\beta$ to batch the elements. Thus, we look up rows of $\sum_i \beta^i f_i$ from the rows of $\sum_i \beta^i t_i$. The length of the tuple is denoted in the code by `LOOKUP_TUPLE_SIZE` $ = n = m$. +- `CUSTOMIZED_LOOKUP/CUSTOMIZED_TABLE`: $n$ and $m$ can be arbitrary and we lookup up functions of the rows of $(f_1, \dots, f_n)$ from functions of the rows of $(t_1, \dots, t_m)$. The implementor is free to choose how the values generated from the columns are generated `. + +The implementor can choose: +- how many lookups are performed in each row by setting the variable `NUM_LOOKUP_TERMS` +- how many table terms in each row should generated by setting the variable `NUM_TABLE_TERMS` +Each lookup can be either a basic lookup or a customized lookup. + +**Note:** We assume that all basic lookups have the same size `LOOKUP_TUPLE_SIZE`. + +## Predicates + +The lookup term and table term predicate are used to decide which elements should/can be looked up. In our codebase these values are either: +-both selectors* (in the AVM) +-both witnesses* (in the ECCVM) + +The case in which both terms are selectors is standard: the relation is hard-coded in the circuit and the only freedom the prover has is to set the witnesses for the lookup columns/table columns (depedening on whether the table is precomputed or not). + +The case in which both terms are witnesses is slightly less standard. We provide an example of its usage which is close to the usage in our codebase. Say we want to perform a scalar multiplication $a \cdot P$, where $a \in \mathbb{F}$ and $P \in \mathbb{G}$. Then, the prover precomputes some multiples $k \cdot P$, $0 \leq k \leq 2^i - 1$, and performs a windowed-double-and-add to compute $a \cdot P$. The prover sets the lookup term predicates to $1$ where they need to lookup up one of the multiples $k \cdot P$, while they set the table term predicates to $1$ where they compute the multiples $k \cdot P$. Clearly, the predicates must be consistent with the double-and-add algorithm, this is constrained by a separate relation. + +**Note:** When the predicates are selectors they do not need to be constrained to be booleans because the verifier can check this condition by themselves. When they are witnesses, they must be explicitly constrained. + +## Relations rolled out + +The relation is composed of two subrelations: one that validates the calculation of the inverse polynomial, one that enforces the sum of the inverses to be zero. The expressions for the two subrelations are: +$$ + I(x)\cdot \prod_{i=1}^{n} L_i(x) \cdot \prod_{i=0}^{m} T_i(x) - \text{inverse\_exists}(x) = 0 + $$ +and +$$ + \sum_{i=0}^{n} q_{L,i}(x) \cdot \frac{1}{L_i(x)} - \sum_{i=0}^{m} q_{T,i}(x) \cdot c_i(x) \cdot \frac{1}{T_i(x)} +$$ + +The degrees of the above relations are: +1. The degree of the first relation is $\max(1 + \sum \deg(L_i) + \sum \deg(T_i), \deg(\text{inverse\_exists}))$ +2. The degree of the second relation is is $3 + M$, where +$$ M = \max(\sum\deg(L_i) + \sum \deg(T_i) - \deg(\text{term}))$$ +for $\text{term}$ iterates over all terms. This is because we compute the inverses as: +$$ + \frac{1}{T_i(x)} = I(x) \cdot \prod_{j \neq i} T_j(x) \cdot \prod_{j} L_i(x) +$$ diff --git a/barretenberg/cpp/src/barretenberg/relations/generic_lookup/generic_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/generic_lookup/generic_lookup_relation.hpp index da9441c4684b..392671e814ff 100644 --- a/barretenberg/cpp/src/barretenberg/relations/generic_lookup/generic_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/generic_lookup/generic_lookup_relation.hpp @@ -1,25 +1,9 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Completed, auditors: [Federico], commit: } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== -/** - * @file generic_lookup_relation.hpp - * @author Rumata888 - * @brief This file contains the template for the generic lookup that can be specialized to enforce various - * lookups (for explanation on how to define them, see "relation_definer.hpp") - * - * @details Lookup is a mechanism to ensure that a particular value or tuple of values (these can be values of - * witnesses, selectors or a function of these) is contained within a particular set. It is a relative of set - * permutation, but has a one-to-many relationship beween elements that are being looked up and the table of values they - * are being looked up from. In this relation template we use the following terminology: - * + READ - the action of looking up the value in the table - * + WRITE - the action of adding the value to the lookup table - * - * TODO(@Rumata888): Talk to Zac why "lookup_read_count" refers to the count of the looked up element in the multiset. - * (The value is applied to the write predicate, so it is confusing). - */ #pragma once #include #include @@ -30,93 +14,266 @@ #include "barretenberg/relations/relation_types.hpp" namespace bb { + /** - * @brief Specifies positions of elements in the tuple of entities received from methods in the Settings class + * Write f_1, .., f_n for the values at a row i of the columns we wish to look up, and t_1, .., t_m for table + * columns. We allow two types of lookups: + * - BASIC_LOOKUP/BASIC_TABLE: Looking up a subset S_f \subset {f_1, .., f_n} from a subset S_t \subset {t_1, .., + * t_m} + * - CUSTOMIZED_LOOKUP/CUSTOMIZED_TABLE: Looking up values that are computed arbitrarily from {f_1, .., f_n} from + * values that are computed arbitrarily (and possibly in a different way) + * from {t_1, .., t_m} * */ +enum LOOKUP_TYPE : uint8_t { BASIC_LOOKUP, CUSTOMIZED_LOOKUP }; +enum TABLE_TYPE : uint8_t { BASIC_TABLE, CUSTOMIZED_TABLE }; -template class GenericLookupRelationImpl { - public: - using FF = FF_; +/** + * @brief Polynomial structure required for the lookup argument + * + * @details The implementor must provide methods get_const_entities and get_nonconst_entities via Settings + * that return the polynomials required for the lookup argument. These polynomials have a structure that is in + * part fixed and in part variable: + * + * Fixed Part: + * 1. The first polynomial is the inverse polynomial + * 2. Next we have NUM_TABLE_TERMS polynomials representing the lookup read counts, i.e., how many times each + * table term has been read + * 3. Next we have NUM_LOOKUP_TERMS polynomials representing the lookup term predicates, which toggle + * whether a lookup term can be looked up in this row or not + * 4. Next we have NUM_TABLE_TERMS polynomials representing the table term predicates, which toggle whether a + * table term can be looked up in this row or not + * + * Variable Part: + * 5. For each lookup term, we have a variable number of polynomials depending on the type of lookup: + * - BASIC_LOOKUP: LOOKUP_TUPLE_SIZE polynomials representing the columns being looked up (and that will be + * batched) + * - CUSTOMIZED_LOOKUP: No additional polynomials are required, as the logic is fully specified in Settings + * 6. For each table term, we have a variable number of polynomials depending on the type of table: + * - BASIC_TABLE: LOOKUP_TUPLE_SIZE polynomials representing the table columns (and that will be batched) + * - CUSTOMIZED_TABLE: No additional polynomials are required, as the logic is fully specified in Settings + */ +template class LookupPolynomialStructure { + private: + static constexpr size_t NUM_LOOKUP_TERMS = Settings_::NUM_LOOKUP_TERMS; + static constexpr size_t NUM_TABLE_TERMS = Settings_::NUM_TABLE_TERMS; - // Read terms specified how many maximum lookups can be performed in 1 row - static constexpr size_t READ_TERMS = Settings::READ_TERMS; + static constexpr size_t INVERSE_POLYNOMIAL_INDEX = 0; + static constexpr size_t LOOKUP_READ_COUNT_START_POLYNOMIAL_INDEX = 1; + static constexpr size_t LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX = + LOOKUP_READ_COUNT_START_POLYNOMIAL_INDEX + NUM_TABLE_TERMS; + static constexpr size_t TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX = + LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX + NUM_LOOKUP_TERMS; + static constexpr size_t LOOKUP_TERM_START_POLYNOMIAL_INDEX = + TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX + NUM_TABLE_TERMS; - // Looked up entries can be a basic tuple, a scaled tuple or completely arbitrary - enum READ_TERM_TYPES { READ_BASIC_TUPLE = 0, READ_SCALED_TUPLE, READ_ARBITRARY }; + public: + static constexpr size_t get_inverse_polynomial_index() { return INVERSE_POLYNOMIAL_INDEX; } - // Write terms specifies how many insertions into the lookup table can be performed in 1 row - static constexpr size_t WRITE_TERMS = Settings::WRITE_TERMS; + static constexpr size_t get_read_count_polynomial_index(const size_t index) + { + return LOOKUP_READ_COUNT_START_POLYNOMIAL_INDEX + index; + } - // Entries put into the table are ever defined as a tuple or constructed arbitrarily - enum WRITE_TERM_TYPES { WRITE_BASIC_TUPLE = 0, WRITE_ARBITRARY }; + static constexpr size_t get_lookup_term_predicate_index(const size_t lookup_index) + { + return LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX + lookup_index; + } - // Lookup tuple size specifies how many values are bundled together to represent a single entry in the lookup table. - // For example, it would be 1 for a range constraint lookup, or 3 for XOR lookup - static constexpr size_t LOOKUP_TUPLE_SIZE = Settings::LOOKUP_TUPLE_SIZE; + static constexpr size_t get_table_term_predicate_index(const size_t table_index) + { + return TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX + table_index; + } /** - * @brief Compute the maximum degree of read terms + * @brief Compute where the polynomials defining a particular lookup term are located * - * @details We need this to evaluate the length of the subrelations correctly - * @return constexpr size_t + * @param lookup_index Index of the lookup term + * @return Offset in the polynomial array where this lookup term's polynomials begin */ - static constexpr size_t compute_maximum_read_term_degree() + static constexpr size_t compute_lookup_term_polynomial_offset(size_t lookup_index) { - size_t maximum_degree = 0; - for (size_t i = 0; i < READ_TERMS; i++) { - size_t current_degree = 0; - if (Settings::READ_TERM_TYPES[i] == READ_BASIC_TUPLE) { - current_degree = 1; - } else if (Settings::READ_TERM_TYPES[i] == READ_SCALED_TUPLE) { - current_degree = 2; - } else { - current_degree = Settings::READ_TERM_DEGREE; - } - maximum_degree = std::max(current_degree, maximum_degree); + // If it's the starting index, then there is nothing to compute, just get the starting index + if (lookup_index == 0) { + return LOOKUP_TERM_START_POLYNOMIAL_INDEX; + } + + switch (Settings_::LOOKUP_TYPES[lookup_index - 1]) { + case BASIC_LOOKUP: + // If the previous lookup was a basic lookup, add lookup tuple size (it was using just a linear combination + // of polynomials) + return compute_lookup_term_polynomial_offset(lookup_index - 1) + Settings_::LOOKUP_TUPLE_SIZE; + case CUSTOMIZED_LOOKUP: + // In case of customized lookup, no polynomials from the tuple are being used + return compute_lookup_term_polynomial_offset(lookup_index - 1); + default: + bb::assert_failure("Invalid lookup type"); + return SIZE_MAX; } - return maximum_degree; } /** - * @brief Compute the maximum degree of write terms + * @brief Compute where the polynomials defining a particular table term are located * - * @details We need this to evaluate the length of the subrelations correctly - * @return constexpr size_t + * @param table_index Index of the table term + * @return Offset in the polynomial array where this table term's polynomials begin */ - static constexpr size_t compute_maximum_write_term_degree() + static constexpr size_t compute_table_term_polynomial_offset(size_t table_index) { - size_t maximum_degree = 0; - for (size_t i = 0; i < WRITE_TERMS; i++) { - size_t current_degree = 0; - if (Settings::WRITE_TERM_TYPES[i] == WRITE_BASIC_TUPLE) { - current_degree = 1; - } else { - current_degree = Settings::WRITE_TERM_DEGREE; - } - maximum_degree = std::max(current_degree, maximum_degree); + // If it's the starting index, then we need to find out how many polynomials were taken by lookup terms + if (table_index == 0) { + return compute_lookup_term_polynomial_offset(NUM_LOOKUP_TERMS); + } + + switch (Settings_::TABLE_TYPES[table_index - 1]) { + case BASIC_TABLE: + // If the previous lookup was a basic table, add lookup tuple size (it was using just a linear combination + // of polynomials) + return compute_table_term_polynomial_offset(table_index - 1) + Settings_::LOOKUP_TUPLE_SIZE; + case CUSTOMIZED_TABLE: + // In case of customized table, no polynomials from the tuple are being used + return compute_table_term_polynomial_offset(table_index - 1); + default: + bb::assert_failure("Invalid lookup type"); + return SIZE_MAX; } - return maximum_degree; } +}; + +// clang-format off +/** + * @brief Concept defining the requirements for the Settings struct used to configure the GenericLookupRelationImpl + * + * @details This is the concept that should be satisfied by lookup settings. As the AVM instantiates many lookup relations + * (+200), enforcing this concept hurts compilation times. Thus, we only use this concept for documentation purposes. + */ +template +concept GenericLookupSettings = requires { + // We allow looking up multiple items per row from a variable number of table columns. These values are not + // bound to the real number of columns the lookup operates on. We allow looking up virtual columns (i.e., + // combinations of columns) from virtual table columns (i.e., combinations of table columns). + requires std::is_same_v; + requires std::is_same_v; + + // An array defining the types of the lookups performed. They can be BASIC_LOOKUP or CUSTOMIZED_LOOKUP + requires std::is_same_v>; + // An array defining the types of the tables used. They can be BASIC_TABLE or CUSTOMIZED_TABLE + requires std::is_same_v>; + // An array specifying the degree of the lookup terms + requires std::is_same_v>; + // An array specifying the degree of the table terms + requires std::is_same_v>; + + requires std::is_same_v; // Number of columns to batch for basic lookups + + // Degree of the polynomial expression indicating whether the inverse polynomial exists at a given row + requires std::is_same_v; + + // Settings also require the following methods, but some of them are templated, so we can't check them here. + // 1) Settings::inverse_polynomial_is_computed_at_row(const AllValues& row), method to compute whether the inverse polynomial should be computed at a given row + // 2) Settings::compute_inverse_exists(const AllEntities& in), method to compute the value of the inverse_exists polynomial at a given row + // 3) Settings::template compute_lookup_term(const AllEntities&, const Parameters&), method to compute the lookup term at a given index + // 4) Settings::template compute_table_term(const AllEntities&, const Parameters&), method to compute the table term at a given index + // 5) Settings::get_nonconst_entities(AllEntities&), method to extract non constant references to the columns used in the relation + // 6) Settings::get_const_entities(const AllEntities&), method to extract constant references to the columns used in the relation +}; + +/** + * @brief Generic implementation of a log-derivative based lookup relation + * + * @details The following is a generic implementation of a log-derivative based lookup relation that allows the + * implementor to highly customize the lookup operations performed. For ease of use, the struct implements a + * default lookup argument with column batching, see below for more details. + * + * The implementor is expected to provide two template parameters: + * - FF_: the base field over which the relation is defined + * - Settings: a struct that defines parameters and methods that allow the customization of the lookup relation. + * + * Write \f$f_1, \ldots, f_n\f$ for the columns to be looked up, and \f$t_1, \ldots, t_m\f$ for the table columns. + * The relation implements the log-derivative lookup argument for two cases: + * - BASIC_LOOKUP/BASIC_TABLE: LOOKUP_TUPLE_SIZE := n = m and we wish to look up the multiset + * \f$\{(f_1(x), \ldots, f_n(x)) : x \in H_N\}\f$, where \f$H_N\f$ is the hypercube of size N, from the table + * \f$\{(t_1(y), \ldots, t_n(y)) : y \in H_N\}\f$. In this case, we perform the lookup by batching together + * the \f$f_i\f$'s and the \f$t_i\f$'s: we define \f$f(x) = \sum_i f_i \cdot Y^i\f$, + * \f$t(x) = \sum_i t_i \cdot Y^i\f$, and we check the existence of a function + * \f$\text{counts} : B_N \rightarrow F\f$ such that + * \f[ + * \sum_{x \in H_N} \frac{1}{\gamma - f(x, \beta)} = \sum_{x \in H_N} \frac{\text{counts}(x)}{\gamma - t(x, \beta)} + * \f] + * - CUSTOMIZED_LOOKUP/CUSTOMIZED_TABLE: We allow looking up values that are computed arbitrarily from + * \f$\{f_1, \ldots, f_n\}\f$ from values that are computed arbitrarily (and possibly in a different way) + * from \f$\{t_1, \ldots, t_m\}\f$. + * + * In both cases, we rephrase the equation check in terms of two relations: + * 1. \f[ + * I(x) \cdot \prod_{i=1}^{\text{NUM_LOOKUP_TERMS}} \text{lookup_entry}_i(x) \cdot + * \prod_{i=0}^{\text{NUM_TABLE_TERMS}} \text{table_entry}_i(x) - \text{inverse_exists}(x) = 0 + * \f] + * 2. \f[ + * \sum_{i=0}^{\text{NUM_LOOKUP_TERMS}} \text{lookup_entry_predicate}_i(x) \cdot \frac{1}{\text{lookup_entry}_i(x)} + * - \sum_{i=0}^{\text{NUM_TABLE_TERMS}} \text{table_entry_predicate}_i(x) \cdot + * \text{lookup_read_count}_i(x) \cdot \frac{1}{\text{table_entry}_i(x)} + * \f] + * + * The first relation ensures that the polynomial \f$I\f$ represents the inverse of the product of the entries to be + * looked up and the table entries. As this polynomial doesn't need to be defined everywhere, we set the result + * of the multiplication to be equal to the value of another polynomial: inverse_exist, which is set to 1 only + * if the inverse must be computed. Note that relation 1) is *independent*: it must be satisfied at every row + * in the trace. + * + * The second relation is a *dependent* relation, it is satisfied only when its values are summed over the entire trace. + * The result of the sum is the log-derivative expression that bears witness to the validity of the lookup. Note + * that the lookup and table entries are multiplied by predicates that enable specifying which table lookup/table + * entries the prover is allowed to use at any given row. + * + * The degrees of the above relations are: + * 1. The degree of relation 1) is \f$\max(1 + \sum \deg(\text{lookup_entries}) + \sum \deg(\text{table_entries}), \deg(\text{inverse_exists}))\f$ + * 2. The degree of relation 2) is is \f$3 + M\f$, where \f$M = \max(\sum \deg(\text{lookup_entries}) + \sum \deg(\text{table_entries} - \deg(\text{term}_i))\f$ + * for \f$\text{term}_i\f$ iterating over all terms. This is because we compute the inverses as: + * \f[ + * \frac{1}{\text{table_entry}_i(x)} = I(x) \cdot \prod_{j \neq i} \text{table_entry}_j(x) \cdot + * \prod_{j} \text{lookup_entry}_j(x) + * \f] + * + * @note The predicates involved in relation 2) are assumed to have been constrained to be boolean outside this relation. + * +*/ +// clang-format on +template class GenericLookupRelationImpl { + public: + using FF = FF_; + using PolynomialStructure = LookupPolynomialStructure; + + static constexpr size_t NUM_LOOKUP_TERMS = Settings::NUM_LOOKUP_TERMS; + static constexpr size_t NUM_TABLE_TERMS = Settings::NUM_TABLE_TERMS; /** - * @brief Compute the degree of of the product of read terms + * When performing a basic lookup, we batch columns for efficiency. This constant represents the number of columns + * to be batched together. For example, it would be 1 for a range constraint lookup, 3 for a XOR lookup. * - * @details The degree of the inverse polynomial check subrelation is dependent on this value + * @note For simplicity of implementation, we assume that all basic lookups use the same tuple size. + */ + static constexpr size_t LOOKUP_TUPLE_SIZE = Settings::LOOKUP_TUPLE_SIZE; + + /** + * @brief Compute the degree of the product of lookup terms * - * @return constexpr size_t + * @return Accumulated degree of all lookup terms */ - static constexpr size_t compute_read_term_product_degree() + static constexpr size_t compute_lookup_term_product_degree() { size_t accumulated_degree = 0; - for (size_t i = 0; i < READ_TERMS; i++) { + for (size_t i = 0; i < NUM_LOOKUP_TERMS; i++) { size_t current_degree = 0; - if (Settings::READ_TERM_TYPES[i] == READ_BASIC_TUPLE) { + switch (Settings::LOOKUP_TYPES[i]) { + case BASIC_LOOKUP: current_degree = 1; - } else if (Settings::READ_TERM_TYPES[i] == READ_SCALED_TUPLE) { - current_degree = 2; - } else { - current_degree = Settings::READ_TERM_DEGREE; + break; + case CUSTOMIZED_LOOKUP: + current_degree = Settings::LOOKUP_TERM_DEGREES[i]; + break; + default: + bb::assert_failure("Invalid lookup type"); } accumulated_degree += current_degree; } @@ -124,341 +281,305 @@ template class GenericLookupRelationImpl { } /** - * @brief Compute the degree of of the product of write terms + * @brief Compute the degree of the product of table terms * - * @details The degree of the inverse polynomial check subrelation is dependent on this value - * - * @return constexpr size_t + * @return Accumulated degree of all table terms */ - static constexpr size_t compute_write_term_product_degree() + static constexpr size_t compute_table_term_product_degree() { size_t accumulated_degree = 0; - for (size_t i = 0; i < WRITE_TERMS; i++) { + for (size_t i = 0; i < NUM_TABLE_TERMS; i++) { size_t current_degree = 0; - if (Settings::WRITE_TERM_TYPES[i] == WRITE_BASIC_TUPLE) { + switch (Settings::TABLE_TYPES[i]) { + case BASIC_TABLE: current_degree = 1; - } else { - current_degree = Settings::WRITE_TERM_DEGREE; + break; + case CUSTOMIZED_TABLE: + current_degree = Settings::TABLE_TERM_DEGREES[i]; + break; + default: + bb::assert_failure("Invalid table type"); + break; } accumulated_degree += current_degree; } return accumulated_degree; } - // Read term degree is dependent on what type of read term we use - static constexpr size_t READ_TERM_DEGREE = compute_maximum_read_term_degree(); - static_assert(READ_TERM_DEGREE != 0); - - // Write term degree is dependent on what type of write term we use - static constexpr size_t WRITE_TERM_DEGREE = compute_maximum_write_term_degree(); - - static_assert(WRITE_TERM_DEGREE != 0); - - // Compute the length of the inverse polynomial correctness sub-relation MAX(product of terms * inverse, inverse - // exists polynomial) + 1; - static constexpr size_t FIRST_SUBRELATION_LENGTH = - std::max((compute_read_term_product_degree() + compute_write_term_product_degree() + 1), - Settings::INVERSE_EXISTS_POLYNOMIAL_DEGREE) + - 1; - - // Compute the length of the log-derived term subrelation MAX(read term * enable read, write term * write count * - // enable write) - static constexpr size_t SECOND_SUBRELATION_LENGTH = std::max(READ_TERM_DEGREE + 1, WRITE_TERM_DEGREE + 2); - // 1 + polynomial degree of this relation - static constexpr size_t LENGTH = std::max(FIRST_SUBRELATION_LENGTH, SECOND_SUBRELATION_LENGTH); - - // The structure of polynomial tuple returned from Settings' functions get_const_entities and get_nonconst_entities - // is the following: - // 1) 1 Polynomial used to contain the inverse product from which we reconstruct individual inverses - // used in the sum - // 2) WRITE_TERMS number of polynomials representing how much each write term has been read - // 3) READ_TERMS number of polynomials enabling the addition of a particular read term in this row (should we lookup - // or not) - // 4) WRITE_TERMS number of polynomials enabling a particular write term in this row (should we add it to - // the lookup table or not) - // 5) For each read term depending on its type (READ_BASIC_TUPLE, READ_SCALED_TUPLE or READ_ARBITRARY): - // 1. In case of basic tuple LOOKUP_TUPLE_SIZE polynomials the combination of whose values in a row is supposed to - // represent the looked up entry - // 2. In case of scaled tuple there are LOOKUP_TUPLE_SIZE previous accumulator polynomials, LOOKUP_TUPLE_SIZE - // scaling polynomials and LOOKUP_TUPLE_SIZE current accumulator polynomials. The tuple is comprised of values - // (current_accumulator-scale*previous_accumulator) - // 3. In the arbitrary case the are no additional - // polynomials, because the logic is completely decided in the settings - // 6) For each write term depending on its type (READ_BASIC_TUPLE or READ_ARBITRARY): - // 1. In case of basic tuple LOOKUP_TUPLE_SIZE polynomials the combination of whose values in a row is supposed to - // represent the entry written into the lookup table - // 2. In the arbitrary case the are no additional write term polynomials, - // because the logic is completely decided in the settings - static constexpr size_t INVERSE_POLYNOMIAL_INDEX = 0; - static constexpr size_t LOOKUP_READ_COUNT_START_POLYNOMIAL_INDEX = 1; - static constexpr size_t LOOKUP_READ_TERM_PREDICATE_START_POLYNOMIAL_INDEX = - LOOKUP_READ_COUNT_START_POLYNOMIAL_INDEX + WRITE_TERMS; - static constexpr size_t LOOKUP_WRITE_TERM_PREDICATE_START_POLYNOMIAL_INDEX = - LOOKUP_READ_TERM_PREDICATE_START_POLYNOMIAL_INDEX + READ_TERMS; - static constexpr size_t LOOKUP_READ_PREDICATE_START_POLYNOMIAL_INDEX = - LOOKUP_WRITE_TERM_PREDICATE_START_POLYNOMIAL_INDEX + WRITE_TERMS; - - static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - LENGTH, // inverse polynomial correctness sub-relation - LENGTH // log-derived terms subrelation - }; /** - * @brief We apply the power polynomial only to the first subrelation + * @brief Compute the degree of the second subrelation * - *@details The first subrelation establishes correspondence between the inverse polynomial elements and the terms. - *The second relation computes the inverses of individual terms, which are then summed up with sumcheck + * @details Iterate over all terms and compute the maximum of the sum of the degree of all terms minus the degree of + * the term we are currently looking at. The degree of the subrelation is the maximum plus 3 to account for the + * inverse polynomial, the read count, and the table term predicate. * */ + static constexpr size_t compute_second_subrelation_degree() + { + // Account for inverse polynomial, read count, and table term predicate + constexpr size_t ADDITIONAL_DEGREE = 3; + constexpr size_t TOTAL_TERM_PRODUCT_DEGREE = + compute_lookup_term_product_degree() + compute_table_term_product_degree(); + + size_t max_degree = 0; + for (size_t i = 0; i < NUM_LOOKUP_TERMS; i++) { + size_t current_degree = 0; + switch (Settings::LOOKUP_TYPES[i]) { + case BASIC_LOOKUP: + current_degree = 1; + break; + case CUSTOMIZED_LOOKUP: + current_degree = Settings::LOOKUP_TERM_DEGREES[i]; + break; + default: + bb::assert_failure("Invalid lookup type"); + } + size_t adjusted_degree = TOTAL_TERM_PRODUCT_DEGREE - current_degree; + max_degree = std::max(max_degree, adjusted_degree); + } + for (size_t i = 0; i < NUM_TABLE_TERMS; i++) { + size_t current_degree = 0; + switch (Settings::TABLE_TYPES[i]) { + case BASIC_TABLE: + current_degree = 1; + break; + case CUSTOMIZED_TABLE: + current_degree = Settings::TABLE_TERM_DEGREES[i]; + break; + default: + bb::assert_failure("Invalid table type"); + break; + } + size_t adjusted_degree = TOTAL_TERM_PRODUCT_DEGREE - current_degree; + max_degree = std::max(max_degree, adjusted_degree); + } + return max_degree + ADDITIONAL_DEGREE; + } + + // (Sub)relation lengths: equal to 1 + relation degree + static constexpr size_t LOOKUP_TERM_ACCUMULATED_DEGREE = compute_lookup_term_product_degree(); + static constexpr size_t TABLE_TERM_ACCUMULATED_DEGREE = compute_table_term_product_degree(); + static_assert(LOOKUP_TERM_ACCUMULATED_DEGREE > 0); + static_assert(TABLE_TERM_ACCUMULATED_DEGREE > 0); + + static constexpr size_t FIRST_RELATION_PARTIAL_LENGTH = + std::max(LOOKUP_TERM_ACCUMULATED_DEGREE + TABLE_TERM_ACCUMULATED_DEGREE + 1, + Settings::INVERSE_EXISTS_POLYNOMIAL_DEGREE) + + 1; // inverse polynomial correctness sub-relation + static constexpr size_t SECOND_RELATION_PARTIAL_LENGTH = + compute_second_subrelation_degree() + 1; // log-derived terms sub-relation + static constexpr size_t LENGTH = std::max(FIRST_RELATION_PARTIAL_LENGTH, SECOND_RELATION_PARTIAL_LENGTH); + + // We use the max of the subrelation lengths because the inverses of lookup/table terms must be used in both + // subrelations + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ LENGTH, LENGTH }; + + // The first subrelation must be satisfied at every row. + // The second subrelation must be satisfied when summed across the entire trace static constexpr std::array SUBRELATION_LINEARLY_INDEPENDENT = { true, false }; /** * @brief Check if we need to compute the inverse polynomial element value for this row - * @details This proxies to a method in the Settings class * + * @tparam AllValues Type containing all polynomial values at a given row * @param row All values at row + * @return true if the inverse polynomial should be computed at this row, false otherwise */ template static bool operation_exists_at_row(const AllValues& row) - { return Settings::inverse_polynomial_is_computed_at_row(row); } /** - * @brief Get the inverse permutation polynomial (needed to compute its value) + * @brief Get the inverse permutation polynomial * + * @details This method needs to return a non-const reference because it's used to compute the value of the inverse + * polynomial + * + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Non-const reference to the inverse polynomial */ template static auto& get_inverse_polynomial(AllEntities& in) { - // WIRE containing the inverse of the product of terms at this row. Used to reconstruct individual inversed - // terms - return std::get(Settings::get_nonconst_entities(in)); + return std::get(Settings::get_nonconst_entities(in)); } /** - * @brief Get selector/wire switching on(1) or off(0) inverse computation + * @brief Get selector/wire switching on (1) or off (0) inverse computation * + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator value indicating whether inverse should be computed (1) or not (0) */ template static Accumulator compute_inverse_exists(const AllEntities& in) { - // A lookup could be enabled by one of several selectors or witnesses, so we want to give as much freedom as // possible to the implementor return Settings::template compute_inverse_exists(in); } /** - * @brief Returns the number of times a particular value is written (how many times it is being looked up) + * @brief Get the number of times a particular table value has been looked up * - * @details Lookup read counts should be independent columns, so there is no need to call a separate function + * @details We assume lookup read counts are independent columns and therefore do not allow customization of this + * method to the implementor. * - * @tparam Accumulator - * @tparam index The index of the write predicate to which this count belongs - * @tparam AllEntities - * @param in - * @return Accumulator + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam index Index of the table term (must be less than NUM_TABLE_TERMS) + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator containing the read count for the specified table term */ template static Accumulator lookup_read_counts(const AllEntities& in) { - static_assert(index < WRITE_TERMS); + static_assert(index < NUM_TABLE_TERMS); using View = typename Accumulator::View; - return Accumulator( - View(std::get(Settings::get_const_entities(in)))); + return Accumulator(View( + std::get(Settings::get_const_entities(in)))); } + /** - * @brief Compute if the value from the first set exists in this row + * @brief Extract predicate enabling looking up a given lookup term at this row * - * @tparam read_index Kept for compatibility with lookups, behavior doesn't change + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam lookup_index Index of the lookup term (must be less than NUM_LOOKUP_TERMS) + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator containing the predicate for the specified lookup term */ - template - static Accumulator compute_read_term_predicate(const AllEntities& in) + template + static Accumulator get_lookup_term_predicate(const AllEntities& in) { - static_assert(read_index < READ_TERMS); + static_assert(lookup_index < NUM_LOOKUP_TERMS); using View = typename Accumulator::View; - // The selector/wire value that determines that an element from the first set needs to be included. Can be - // different from the wire used in the write part. - return Accumulator(View(std::get( + return Accumulator(View(std::get( Settings::get_const_entities(in)))); } /** - * @brief Compute if the value from the second set exists in this row + * @brief Extract predicate enabling looking up a given table term at this row * - * @tparam write_index Kept for compatibility with lookups, behavior doesn't change + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam table_index Index of the table term (must be less than NUM_TABLE_TERMS) + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator containing the predicate for the specified table term */ - template - static Accumulator compute_write_term_predicate(const AllEntities& in) + template + static Accumulator get_table_term_predicate(const AllEntities& in) { - static_assert(write_index < WRITE_TERMS); + static_assert(table_index < NUM_TABLE_TERMS); using View = typename Accumulator::View; - // The selector/wire value that determines that an element from the first set needs to be included. Can be - // different from the wire used in the write part. - return Accumulator(View(std::get( + return Accumulator(View(std::get( Settings::get_const_entities(in)))); } /** - * @brief Compute where the polynomials defining a particular read term are located - * - * @details We pass polynomials involved in read an write terms from settings as a tuple of references. However, - * depending on the type of read term different number of polynomials can be used to compute it. So we need to - * compute the offset in the tuple iteratively - * - * @param read_index Index of the read term - * @return constexpr size_t - */ - static constexpr size_t compute_read_term_polynomial_offset(size_t read_index) - { - // If it's the starting index, then there is nothing to compute, just get the starting index - if (read_index == 0) { - return LOOKUP_READ_PREDICATE_START_POLYNOMIAL_INDEX; - } - - // If the previous term used basic tuple lookup, add lookup tuple size (it was using just a linear combination - // of polynomials) - if (Settings::READ_TERM_TYPES[read_index - 1] == READ_BASIC_TUPLE) { - return compute_read_term_polynomial_offset(read_index - 1) + LOOKUP_TUPLE_SIZE; - } - - // If the previous term used scaled tuple lookup, add lookup tuple size x 3 (it was using just a linear - // combination of differences (current - previous⋅scale)) - - if (Settings::READ_TERM_TYPES[read_index - 1] == READ_SCALED_TUPLE) { - return compute_read_term_polynomial_offset(read_index - 1) + 3 * LOOKUP_TUPLE_SIZE; - } - // In case of arbitrary read term, no polynomials from the tuple are being used - if (Settings::READ_TERM_TYPES[read_index - 1] == READ_ARBITRARY) { - return compute_read_term_polynomial_offset(read_index - 1); - } - return SIZE_MAX; - } - - /** - * @brief Compute where the polynomials defining a particular write term are located - * - * @details We pass polynomials involved in read an write terms from settings as a tuple of references. However, - * depending on the type of term different number of polynomials can be used to compute it. So we need to - * compute the offset in the tuple iteratively - * - * @param write_index Index of the write term - * @return constexpr size_t - */ - static constexpr size_t compute_write_term_polynomial_offset(size_t write_index) - { - // If it's the starting index, then we need to find out how many polynomials were taken by read terms - if (write_index == 0) { - return compute_read_term_polynomial_offset(READ_TERMS); - } - - // If the previous term used basic tuple lookup, add lookup tuple size (it was using just a linear combination - // of polynomials) - if (Settings::WRITE_TERM_TYPES[write_index - 1] == WRITE_BASIC_TUPLE) { - return compute_write_term_polynomial_offset(write_index - 1) + LOOKUP_TUPLE_SIZE; - } - - // In case of arbitrary write term, no polynomials from the tuple are being used - if (Settings::WRITE_TERM_TYPES[write_index - 1] == WRITE_ARBITRARY) { - return compute_write_term_polynomial_offset(write_index - 1); - } - return SIZE_MAX; - } - - /** - * @brief Compute the value of a single item in the set + * @brief Compute the value of the lookup term at a given index * - * @details Computes the polynomial \gamma + \sum_{i=0}^{num_columns}(column_i*\beta^i), so the tuple of columns is - * in the first set - * - * @tparam read_index The chosen polynomial relation - * - * @param params Used for beta and gamma + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam lookup_index Index of the lookup term to compute + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters (beta, gamma) + * @param in All entities + * @param params Relation parameters + * @return Accumulator containing the computed lookup term value */ - template - static Accumulator compute_read_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_lookup_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; - static_assert(read_index < READ_TERMS); - constexpr size_t start_polynomial_index = compute_read_term_polynomial_offset(read_index); - if constexpr (Settings::READ_TERM_TYPES[read_index] == READ_BASIC_TUPLE) { - // Retrieve all polynomials used - const auto all_polynomials = Settings::get_const_entities(in); + static_assert(lookup_index < NUM_LOOKUP_TERMS); + constexpr size_t start_polynomial_index = + PolynomialStructure::compute_lookup_term_polynomial_offset(lookup_index); + const FF beta = params.beta; + const FF gamma = params.gamma; - auto result = Accumulator(0); + if constexpr (Settings::LOOKUP_TYPES[lookup_index] == BASIC_LOOKUP) { + // In this case we batch all the lookup columns pertaining to this lookup term using the randomness beta + Accumulator result = Accumulator(0); - // Iterate over tuple and sum as a polynomial over beta - bb::constexpr_for( - [&]() { result = (result * params.beta) + View(std::get(all_polynomials)); }); - const auto& gamma = params.gamma; - return result + gamma; - } else if constexpr (Settings::READ_TERM_TYPES[read_index] == READ_SCALED_TUPLE) { - // Retrieve all polynomials used const auto all_polynomials = Settings::get_const_entities(in); + bb::constexpr_for( + [&]() { result = (result * beta) + View(std::get(all_polynomials)); }); - auto result = Accumulator(0); - // Iterate over tuple and sum as a polynomial over beta - bb::constexpr_for([&]() { - result = (result * params.beta) + View(std::get(all_polynomials)) - - View(std::get(all_polynomials)) * View(std::get(all_polynomials)); - }); - const auto& gamma = params.gamma; return result + gamma; + } else if constexpr (Settings::LOOKUP_TYPES[lookup_index] == CUSTOMIZED_LOOKUP) { + return Settings::template compute_lookup_term(in, params); } else { - - return Settings::template compute_read_term(in, params); + bb::assert_failure("Invalid lookup type"); + return Accumulator(0); } } /** - * @brief Compute the value of a single item in the set - * - * @details Computes the polynomial \gamma + \sum_{i=0}^{num_columns}(column_i*\beta^i), so the tuple of columns is - * in the second set + * @brief Compute the value of a table term at a given index * - * @tparam write_index Kept for compatibility with lookups, behavior doesn't change - * - * @param params Used for beta and gamma + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam table_index Index of the table term to compute + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters (beta, gamma) + * @param in All entities + * @param params Relation parameters + * @return Accumulator containing the computed table term value */ - template - static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_table_term(const AllEntities& in, const Parameters& params) { + using View = typename Accumulator::View; - static_assert(write_index < WRITE_TERMS); + static_assert(table_index < NUM_TABLE_TERMS); + constexpr size_t start_polynomial_index = + PolynomialStructure::compute_table_term_polynomial_offset(table_index); + const FF beta = params.beta; + const FF gamma = params.gamma; - using View = typename Accumulator::View; - constexpr size_t start_polynomial_index = compute_write_term_polynomial_offset(write_index); + if constexpr (Settings::TABLE_TYPES[table_index] == BASIC_TABLE) { + // In this case we batch all the lookup columns pertaining to this lookup term using the randomness beta + Accumulator result = Accumulator(0); - if constexpr (Settings::WRITE_TERM_TYPES[write_index] == WRITE_BASIC_TUPLE) { - // Retrieve all polynomials used const auto all_polynomials = Settings::get_const_entities(in); - auto result = Accumulator(0); - - // Iterate over tuple and sum as a polynomial over beta bb::constexpr_for( - [&]() { result = (result * params.beta) + View(std::get(all_polynomials)); }); - const auto& gamma = params.gamma; + [&]() { result = (result * beta) + View(std::get(all_polynomials)); }); + return result + gamma; + } else if constexpr (Settings::TABLE_TYPES[table_index] == CUSTOMIZED_TABLE) { + return Settings::template compute_table_term(in, params); } else { - // Sometimes we construct lookup tables on the fly from intermediate - - return Settings::template compute_write_term(in, params); + bb::assert_failure("Invalid table type"); + return Accumulator(0); } } /** - * @brief Expression for generic log-derivative-based set permutation. - * @param accumulator transformed to `evals + C(in(X)...)*scaling_factor` - * @param in an std::array containing the fully extended Accumulator edges. - * @param relation_params contains beta, gamma, and public_input_delta, .... - * @param scaling_factor optional term to scale the evaluation before adding to evals. + * @brief Compute generic log-derivative lookup subrelation accumulation + * @details The generic log-derivative lookup relation consists of two subrelations. The first demonstrates that the + * inverse polynomial I has been computed correctly. The second establishes the correctness of the lookups + * themselves based on the log-derivative lookup argument. Note that the latter subrelation is "linearly dependent" + * in the sense that it establishes that a sum across all rows of the exectution trace is zero, rather than that + * some expression holds independently at each row. Accordingly, this subrelation is not multiplied by a scaling + * factor at each accumulation step. See the documentation for GenericLookupRelationImpl for the definition of the + * subrelations. + * + * @tparam ContainerOverSubrelations Container type for accumulating subrelation contributions + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters + * @param accumulator Transformed to `evals + C(in(X)...)*scaling_factor` + * @param in An std::array containing the fully extended Accumulator edges + * @param params Contains beta, gamma relation parameters + * @param scaling_factor Optional term to scale the evaluation before adding to evals */ template static void accumulate(ContainerOverSubrelations& accumulator, @@ -466,14 +587,16 @@ template class GenericLookupRelationImpl { const Parameters& params, const FF& scaling_factor) { - accumulate_logderivative_lookup_subrelation_contributions>( - accumulator, in, params, scaling_factor); + _accumulate_logderivative_subrelation_contributions, + ContainerOverSubrelations, + AllEntities, + Parameters, + false>(accumulator, in, params, scaling_factor); } }; template using GenericLookupRelation = Relation>; -template using GenericLookup = GenericLookupRelationImpl; - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/generic_permutation/GENERIC_PERMUTATION_README.md b/barretenberg/cpp/src/barretenberg/relations/generic_permutation/GENERIC_PERMUTATION_README.md new file mode 100644 index 000000000000..f0ca284e7b79 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/generic_permutation/GENERIC_PERMUTATION_README.md @@ -0,0 +1,13 @@ +# Generic Log-Derivative Permutation + +The file `generic_permutation_relation.hpp` implements a generic log-derivative permutation argument. The relation `GenericPermutationRelationImpl` is the specialization of `GenericLookupRelationImpl` to the case in which all read counts are equal to $1$. + +More precisely, the general log-derivative expression used by `GenericLookupRelationImpl` + +$$\sum_i \left[ q_{L,i} \cdot \frac{1}{L_i} - c_i \cdot q_{T,i} \cdot \frac{1}{T_i} \right] = 0$$ + +is specialized to: + +$$\sum_i \left[ q_{L,i} \cdot \frac{1}{L_i} - q_{T,i} \cdot \frac{1}{T_i} \right] = 0$$ + +If the relation is satisfied it means that all the table terms have been looked up exactly once, i.e. $\{ L_i \} = \{ T_i \}$ lookup and table terms are permutations of each other. diff --git a/barretenberg/cpp/src/barretenberg/relations/generic_permutation/generic_permutation_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/generic_permutation/generic_permutation_relation.hpp index f9c92664f2fb..7dd9ee1d1360 100644 --- a/barretenberg/cpp/src/barretenberg/relations/generic_permutation/generic_permutation_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/generic_permutation/generic_permutation_relation.hpp @@ -1,16 +1,9 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Completed, auditors: [Federico], commit: } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== -/** - * @file generic_permutation_relation.hpp - * @author Rumata888 - * @brief This file contains the template for the generic permutation that can be specialized to enforce various - * permutations (for explanation on how to define them, see "relation_definer.hpp") - * - */ #pragma once #include #include @@ -21,31 +14,79 @@ #include "barretenberg/relations/relation_types.hpp" namespace bb { + /** - * @brief Specifies positions of elements in the tuple of entities received from methods in the Settings class + * @brief Specialization of the polynomial structure required for the lookup argument to the case of the permutation + * argument * + * @details This class works exactly as LookupPolynomialStructure, but with fixed parameters for the permutation + * argument: NUM_LOOKUP_TERMS = NUM_TABLE_TERMS = 1 and all terms are of type BASIC (and therefore they have degree 1) */ -enum GenericPermutationSettingIndices { - INVERSE_POLYNOMIAL_INDEX, /* The index of the inverse polynomial*/ - FIRST_PERMUTATION_SET_ENABLE_POLYNOMIAL_INDEX, /* The index of the polynomial that adds an element from the first - set to the sum*/ - SECOND_PERMUTATION_SET_ENABLE_POLYNOMIAL_INDEX, /* The index of the polynomial that adds an element from the second - set to the sum*/ - - PERMUTATION_SETS_START_POLYNOMIAL_INDEX, /* The starting index of the polynomials that are used in the permutation - sets*/ +template class PermutationPolynomialStructure { + private: + static constexpr size_t NUM_LOOKUP_TERMS = 1; + static constexpr size_t NUM_TABLE_TERMS = 1; + + static constexpr size_t INVERSE_POLYNOMIAL_INDEX = 0; + static constexpr size_t LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX = INVERSE_POLYNOMIAL_INDEX + NUM_TABLE_TERMS; + static constexpr size_t TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX = + LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX + NUM_LOOKUP_TERMS; + static constexpr size_t LOOKUP_TERM_START_POLYNOMIAL_INDEX = + TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX + NUM_TABLE_TERMS; + static constexpr size_t TABLE_TERM_START_POLYNOMIAL_INDEX = + LOOKUP_TERM_START_POLYNOMIAL_INDEX + Settings::COLUMNS_PER_SET; + + public: + static constexpr size_t get_inverse_polynomial_index() { return INVERSE_POLYNOMIAL_INDEX; } + + static constexpr size_t get_lookup_term_predicate_index() { return LOOKUP_TERM_PREDICATE_START_POLYNOMIAL_INDEX; } + static constexpr size_t get_table_term_predicate_index() { return TABLE_TERM_PREDICATE_START_POLYNOMIAL_INDEX; } + + static constexpr size_t compute_lookup_term_polynomial_offset() { return LOOKUP_TERM_START_POLYNOMIAL_INDEX; } + + static constexpr size_t compute_table_term_polynomial_offset() { return TABLE_TERM_START_POLYNOMIAL_INDEX; } }; +/** + * @brief Implementation of a generic permutation relation + * + * @details Implementation of a generic permutation relation that uses a log-derivative argument to prove that elements + * in two columns of the execution trace are equal. The strategy is to use the lookup log-derivate argument with read + * counts (i.e., number of times the lookup terms are looked up) equal to 1. This enforces the sets we are comparing to + * be permutations of one another. The relation is composed of two subrelations, the first is equal to the first + * subrelation in GenericLookupRelationImpl. The second one is the modification of the second subrelation of the generic + * lookup in which we hard-code the read counts to 1: + * \f[ + * \sum_{i=0}^{\text{NUM_LOOKUP_TERMS}} \text{lookup_entry_predicate}_i(x) \cdot \frac{1}{\text{lookup_entry}_i(x)} + * - \sum_{i=0}^{\text{NUM_TABLE_TERMS}} \text{table_entry_predicate}_i(x) \cdot \frac{1}{\text{table_entry}_i(x)} + * \f] + * + * @note The predicates involved in the second subrelation are assumed to have been constrained to be boolean outside + * this relation. + * + */ template class GenericPermutationRelationImpl { public: using FF = FF_; - // Read and write terms counts should stay set to 1 unless we want to permute several columns at once as accumulated - // sets (not as tuples). - static constexpr size_t READ_TERMS = 1; - static constexpr size_t WRITE_TERMS = 1; - // 1 + polynomial degree of this relation - static constexpr size_t LENGTH = READ_TERMS + WRITE_TERMS + 3; // 5 + using PolynomialStructure = PermutationPolynomialStructure; + // The term counts should stay set to 1 unless we want to permute several columns at once as accumulated + // sets (not as tuples). + static constexpr size_t NUM_LOOKUP_TERMS = 1; + static constexpr size_t NUM_TABLE_TERMS = 1; + + // Specialization of the calculation of the length for the generic lookup relation to the permutation relation; note + // that the second subrelation degree is one lower than the one in the lookup argument because there is not read + // count polynomial + static constexpr size_t FIRST_RELATION_PARTIAL_LENGTH = std::max(NUM_LOOKUP_TERMS + NUM_TABLE_TERMS + 1, + Settings::INVERSE_EXISTS_POLYNOMIAL_DEGREE) + + 1; // inverse polynomial correctness sub-relation + static constexpr size_t SECOND_RELATION_PARTIAL_LENGTH = + NUM_LOOKUP_TERMS + NUM_TABLE_TERMS + 2; // log-derived terms sub-relation + static constexpr size_t LENGTH = std::max(FIRST_RELATION_PARTIAL_LENGTH, SECOND_RELATION_PARTIAL_LENGTH); + + // We use the max of the subrelation lengths because the inverses of lookup/table terms must be used in both + // subrelations static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ LENGTH, // inverse polynomial correctness sub-relation LENGTH // log-derived terms subrelation @@ -54,17 +95,18 @@ template class GenericPermutationRelationImpl /** * @brief We apply the power polynomial only to the first subrelation * - *@details The first subrelation establishes correspondence between the inverse polynomial elements and the terms. - *The second relation computes the inverses of individual terms, which are then summed up with sumcheck + * @details The first subrelation establishes correspondence between the inverse polynomial elements and the terms. + * The second relation computes the inverses of individual terms, which are then summed up with sumcheck * */ static constexpr std::array SUBRELATION_LINEARLY_INDEPENDENT = { true, false }; /** * @brief Check if we need to compute the inverse polynomial element value for this row - * @details This proxies to a method in the Settings class * + * @tparam AllValues Type containing all polynomial values at a given row * @param row All values at row + * @return true if the inverse polynomial should be computed at this row, false otherwise */ template static bool operation_exists_at_row(const AllValues& row) @@ -78,131 +120,152 @@ template class GenericPermutationRelationImpl */ template static auto& get_inverse_polynomial(AllEntities& in) { - // WIRE containing the inverse of the product of terms at this row. Used to reconstruct individual inversed - // terms - return std::get(Settings::get_nonconst_entities(in)); + return std::get(Settings::get_nonconst_entities(in)); } /** - * @brief Get selector/wire switching on(1) or off(0) inverse computation - * We turn it on if either of the permutation contribution selectors are active + * @brief Get selector/wire switching on (1) or off (0) inverse computation * + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator value indicating whether inverse should be computed (1) or not (0) */ template static Accumulator compute_inverse_exists(const AllEntities& in) { using View = typename Accumulator::View; - // WIRE/SELECTOR enabling the permutation used in the sumcheck computation. This affects the first - // subrelation Accumulator const& first_set_enabled = Accumulator( - View(std::get(Settings::get_const_entities(in)))); + View(std::get(Settings::get_const_entities(in)))); Accumulator const& second_set_enabled = Accumulator( - View(std::get(Settings::get_const_entities(in)))); + View(std::get(Settings::get_const_entities(in)))); - // This has the truth table of a logical OR + // The following expression (assuming the values are boolean) is the algebraic representation of a logical OR return (first_set_enabled + second_set_enabled - (first_set_enabled * second_set_enabled)); } /** - * @brief Compute if the value from the first set exists in this row + * @brief Extract predicate enabling looking up a given lookup term at this row * - * @tparam read_index Kept for compatibility with lookups, behavior doesn't change + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam lookup_index Index of the lookup term (kept for compatibility with lookups, always 0) + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator containing the predicate for the specified lookup term */ - template - static Accumulator compute_read_term_predicate(const AllEntities& in) + template + static Accumulator get_lookup_term_predicate(const AllEntities& in) { - static_assert(read_index < READ_TERMS); + static_assert(lookup_index < NUM_LOOKUP_TERMS); using View = typename Accumulator::View; // The selector/wire value that determines that an element from the first set needs to be included. Can be // different from the wire used in the write part. return Accumulator( - View(std::get(Settings::get_const_entities(in)))); + View(std::get(Settings::get_const_entities(in)))); } /** - * @brief Compute if the value from the second set exists in this row + * @brief Extract predicate enabling looking up a given table term at this row * - * @tparam write_index Kept for compatibility with lookups, behavior doesn't change + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam table_index Index of the table term (kept for compatibility with lookups, always 0) + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator containing the predicate for the specified table term */ - template - static Accumulator compute_write_term_predicate(const AllEntities& in) + template + static Accumulator get_table_term_predicate(const AllEntities& in) { - static_assert(write_index < WRITE_TERMS); + static_assert(table_index < NUM_TABLE_TERMS); using View = typename Accumulator::View; - // The selector/wire value that determines that an element from the second set needs to be included. Can be - // different from the wire used in the read part. return Accumulator( - View(std::get(Settings::get_const_entities(in)))); + View(std::get(Settings::get_const_entities(in)))); } /** - * @brief Compute the value of a single item in the set + * @brief Compute the value of the lookup term at a given index * - * @details Computes the polynomial \gamma + \sum_{i=0}^{num_columns}(column_i*\beta^i), so the tuple of columns is - * in the first set - * - * @tparam read_index Kept for compatibility with lookups, behavior doesn't change - * - * @param params Used for beta and gamma + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam lookup_index Index of the lookup term to compute (kept for compatibility with lookups, always 0) + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters (beta, gamma) + * @param in All entities + * @param params Relation parameters + * @return Accumulator containing the computed lookup term value */ - template - static Accumulator compute_read_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_lookup_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; - static_assert(read_index < READ_TERMS); + static_assert(lookup_index < NUM_LOOKUP_TERMS); + constexpr size_t start_polynomial_index = PolynomialStructure::compute_lookup_term_polynomial_offset(); + const FF beta = params.beta; + const FF gamma = params.gamma; + + auto result = Accumulator(0); // Retrieve all polynomials used const auto all_polynomials = Settings::get_const_entities(in); - auto result = Accumulator(0); - // Iterate over tuple and sum as a polynomial over beta - bb::constexpr_for([&]() { result = result * params.beta + View(std::get(all_polynomials)); }); + bb::constexpr_for( + [&]() { result = result * beta + View(std::get(all_polynomials)); }); - const auto& gamma = params.gamma; return result + gamma; } /** - * @brief Compute the value of a single item in the set - * - * @details Computes the polynomial \gamma + \sum_{i=0}^{num_columns}(column_i*\beta^i), so the tuple of columns is - * in the second set - * - * @tparam write_index Kept for compatibility with lookups, behavior doesn't change + * @brief Compute the value of a table term at a given index * - * @param params Used for beta and gamma + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam table_index Index of the table term to compute (kept for compatibility with lookups, always 0) + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters (beta, gamma) + * @param in All entities + * @param params Relation parameters + * @return Accumulator containing the computed table term value */ - template - static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) + template + static Accumulator compute_table_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; - static_assert(write_index < WRITE_TERMS); - - // Get all used entities - const auto& used_entities = Settings::get_const_entities(in); + static_assert(table_index < NUM_TABLE_TERMS); + constexpr size_t start_polynomial_index = PolynomialStructure::compute_table_term_polynomial_offset(); + const FF beta = params.beta; + const FF gamma = params.gamma; auto result = Accumulator(0); + + // Retrieve all polynomials used + const auto all_polynomials = Settings::get_const_entities(in); + // Iterate over tuple and sum as a polynomial over beta - bb::constexpr_for([&]() { result = result * params.beta + View(std::get(used_entities)); }); + bb::constexpr_for( + [&]() { result = result * beta + View(std::get(all_polynomials)); }); - const auto& gamma = params.gamma; return result + gamma; } /** - * @brief Expression for generic log-derivative-based set permutation. + * @brief Compute generic log-derivative set permutation subrelation accumulation + * @details he generic log-derivative lookup relation consists of two subrelations. The first demonstrates that the + * inverse polynomial I has been computed correctly. The second establishes the correctness of the lookups + * themselves based on the log-derivative lookup argument. Note that the latter subrelation is "linearly dependent" + * in the sense that it establishes that a sum across all rows of the exectution trace is zero, rather than that + * some expression holds independently at each row. Accordingly, this subrelation is not multiplied by a scaling + * factor at each accumulation step. See the documentation for GenericPermutationRelationImpl for the definition of + * the subrelations. + * + * @tparam ContainerOverSubrelations Container type for accumulating subrelation contributions + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters * @param accumulator transformed to `evals + C(in(X)...)*scaling_factor` * @param in an std::array containing the fully extended Accumulator edges. * @param relation_params contains beta, gamma, and public_input_delta, .... @@ -214,15 +277,16 @@ template class GenericPermutationRelationImpl const Parameters& params, const FF& scaling_factor) { - accumulate_logderivative_permutation_subrelation_contributions>( - accumulator, in, params, scaling_factor); + _accumulate_logderivative_subrelation_contributions, + ContainerOverSubrelations, + AllEntities, + Parameters, + true>(accumulator, in, params, scaling_factor); } }; template using GenericPermutationRelation = Relation>; -template using GenericPermutation = GenericPermutationRelationImpl; - } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp index 8493223fc061..5810e458cab5 100644 --- a/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/logderiv_lookup_relation.hpp @@ -17,8 +17,77 @@ namespace bb { /** - * @brief Log-derivative lookup relation (LogUp) for tables with up to 3 columns. - * @details See LOGDERIV_LOOKUP_RELATION_README.md for full documentation. + * @brief Log-derivative lookup argument relation for establishing lookup reads from tables with 3 or fewer columns + * + * @details The lookup argument seeks to prove lookups from a column by establishing the following sum: + * \f[ + * \sum_{i=0}^{n-1} q_{\text{logderiv_lookup},i} \cdot \frac{1}{\text{lookup_term}_i} + * - \text{read_count}_i \cdot \frac{1}{\text{table_term}_i} = 0 + * \f] + * where + * \f[ + * \text{table_term} = \text{table_col}_1 + \gamma + \text{table_col}_2 \cdot \beta + * + \text{table_col}_3 \cdot \beta^2 + \text{table_index} \cdot \beta^3 + * \f] + * and + * \f[ + * \text{lookup_term} = \text{derived_table_entry}_1 + \gamma + \text{derived_table_entry}_2 \cdot \beta + * + \text{derived_table_entry}_3 \cdot \beta^2 + \text{table_index} \cdot \beta^3 + * \f] + * with + * \f[ + * \text{derived_table_entry}_i = w_i - \text{col_step_size}_i \cdot w_{i,\text{shift}} + * \f] + * (read note for explanation). + * + * This expression is motivated by taking the derivative of the log of a more conventional grand product style set + * equivalence argument (see e.g. https://eprint.iacr.org/2022/1530.pdf for details). + * + * In practice, we must rephrase this expression in terms of polynomials, one of which is a polynomial \f$I\f$ + * containing (indirectly) the rational functions in the above expression: + * \f$I_i = 1/[(\text{lookup_term}_i) \cdot (\text{table_term}_i)]\f$. This leads to two subrelations. The first + * demonstrates that the inverse polynomial \f$I\f$ is correctly formed. The second is the primary lookup identity, + * where the rational functions are replaced by the use of the inverse polynomial \f$I\f$. These two subrelations can + * be expressed as follows: + * + * Subrelation 1 (Inverse correctness): + * \f[ + * I_i \cdot (\text{lookup_term}_i) \cdot (\text{table_term}_i) - 1 = 0 + * \f] + * + * Subrelation 2 (Lookup identity): + * \f[ + * \sum_{i=0}^{n-1} [q_{\text{logderiv_lookup}} \cdot I_i \cdot \text{table_term}_i + * - \text{read_count}_i \cdot I_i \cdot \text{lookup_term}_i] = 0 + * \f] + * + * To not compute the inverse terms packed in \f$I_i\f$ for indices not included in the sum we introduce a + * witness called inverse_exists, which is zero when either read_count\f$_i\f$ is nonzero (a boolean called + * read_tag) or we have a read gate. This is represented by setting \f$\text{inverse_exists} = 1 - (1 - + * \text{read_tag}) \cdot (1 - \text{is_read_gate})\f$. Since is_read_gate is only dependent on selector values, + * we can assume that the verifier can check that it is boolean. However, if read_tag (which is a derived witness), + * is not constrained to be boolean, one can set the inverse_exists to any value when is_read_gate = 0, because + * inverse_exists is a linear function of read_tag then. Thus we have a third subrelation that ensures read_tag is + * a boolean value. + * + * Subrelation 3 (Boolean check): + * \f[ + * \text{read_tag} \cdot \text{read_tag} - \text{read_tag} = 0 + * \f] + * + * Further constraining of read_tags and read_counts is not required, since by tampering read_tags a malicious + * prover can only skip a table_term. This is disadvantageous for the cheating prover as it reduces the size of the + * lookup table. Hence, a malicious prover cannot abuse this to prove an incorrect lookup. + * + * @note Subrelation (2) is "linearly dependent" in the sense that it establishes that a sum across all rows of the + * execution trace is zero, rather than that some expression holds independently at each row. Accordingly, this + * subrelation is not multiplied by a scaling factor at each accumulation step. + * + * @note The "real" table entries must be 'derived' from wire values since instead of storing actual values in wires + * we store successive accumulators, the differences of which are equal to entries in a table. This is an efficiency + * trick for the case where entries of the "real" table correspond to limbs of a value too large to be supported by + * the lookup table. This way we avoid using additional gates to reconstruct full size values from the limbs contained + * in tables. See the documentation in method bb::plookup::get_lookup_accumulators(). * * IMPORTANT: γ and β must be independent challenges for soundness. */ @@ -26,7 +95,7 @@ namespace bb { template class LogDerivLookupRelationImpl { public: using FF = FF_; - static constexpr size_t WRITE_TERMS = 1; // the number of write terms in the lookup relation + static constexpr size_t TABLE_TERMS = 1; // the number of table terms in the lookup relation // 1 + polynomial degree of this relation static constexpr size_t INVERSE_SUBRELATION_LENGTH = 5; // both subrelations are degree 4 static constexpr size_t LOOKUP_SUBRELATION_LENGTH = 5; // both subrelations are degree 4 @@ -51,13 +120,14 @@ template class LogDerivLookupRelationImpl { } /** - * @brief Does the provided row contain data relevant to table lookups; Used to determine whether the polynomial of - * inverses must be computed at a given row - * @details In order to avoid unnecessary computation, the polynomial of inverses I is only computed for rows at - * which the lookup relation is "active". It is active if either (1) the present row contains a lookup gate (i.e. - * q_lookup == 1), or (2) the present row contains table data that has been looked up in this circuit - * (lookup_read_tags == 1, or equivalently, if the row in consideration has index i, the data in polynomials table_i - * has been utlized in the circuit). + * @brief Does the provided row contain data relevant to table lookups + * + * @details Used to determine whether the polynomial of inverses must be computed at a given row. In order to avoid + * unnecessary computation, the polynomial of inverses \f$I\f$ is only computed for rows at which the lookup + * relation is "active". It is active if either (1) the present row contains a lookup gate (i.e. + * \f$q_{\text{lookup}} = 1\f$), or (2) the present row contains table data that has been looked up in this circuit + * (lookup_read_tags \f$= 1\f$, or equivalently, if the row in consideration has index \f$i\f$, the data in + * polynomials table\f$_i\f$ has been utilized in the circuit). * */ template static bool operation_exists_at_row(const AllValues& row) @@ -71,12 +141,18 @@ template class LogDerivLookupRelationImpl { /** * @brief Compute the Accumulator whose values indicate whether the inverse is computed or not + * * @details This is needed for efficiency since we don't need to compute the inverse unless the log derivative - * lookup relation is active at a given row. - * We skip the inverse computation for all the rows that read_count_i == 0 AND read_selector is 0 - * @note read_tag is constructed such that read_tag_i = 1 or 0. We add a subrelation to check that read_tag is a - * boolean value + * lookup relation is active at a given row. We skip the inverse computation for all the rows that + * \f$\text{read_count}_i = 0\f$ AND read_selector is 0. + * + * @note read_tag is constructed such that \f$\text{read_tag}_i \in \{0, 1\}\f$. We add a subrelation to check + * that read_tag is a boolean value. * + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam AllEntities Type containing all polynomial entities + * @param in All entities + * @return Accumulator indicating whether inverse should be computed */ template static Accumulator compute_inverse_exists(const AllEntities& in) @@ -95,11 +171,25 @@ template class LogDerivLookupRelationImpl { return Accumulator(-(row_has_write * row_has_read) + row_has_write + row_has_read); } - // Compute table_1 + gamma + table_2 * β + table_3 * β² + table_4 * β³ + /** + * @brief Compute the table term + * + * @details Computes \f$\text{table}_1 + \gamma + \text{table}_2 \cdot \beta + \text{table}_3 \cdot \beta^2 + + * \text{table}_4 \cdot \beta^3\f$, where table\f$_{1,2,3}\f$ correspond to the (maximum) three columns of the + * lookup table and table\f$_4\f$ is the unique identifier of the lookup table (table_index). + * + * @tparam Accumulator Accumulator type for polynomial evaluations + * @tparam AllEntities Type containing all polynomial entities + * @tparam Parameters Type containing relation parameters + * @param in All entities + * @param params Relation parameters (gamma, eta, eta_two, eta_three) + * @return Accumulator containing the computed table term + */ + // Compute table_1 + gamma + table_2 * eta + table_3 * eta_2 + table_4 * eta_3 // table_1,2,3 correspond to the (maximum) three columns of the lookup table and table_4 is the unique identifier // of the lookup table table_index template - static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) + static Accumulator compute_table_term(const AllEntities& in, const Parameters& params) { using ParameterCoefficientAccumulator = typename Parameters::DataType::CoefficientAccumulator; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; @@ -122,7 +212,7 @@ template class LogDerivLookupRelationImpl { } template - static Accumulator compute_read_term(const AllEntities& in, const Parameters& params) + static Accumulator compute_lookup_term(const AllEntities& in, const Parameters& params) { using ParameterCoefficientAccumulator = typename Parameters::DataType::CoefficientAccumulator; using CoefficientAccumulator = typename Accumulator::CoefficientAccumulator; @@ -166,11 +256,14 @@ template class LogDerivLookupRelationImpl { } /** - * @brief Construct the polynomial I whose components are the inverse of the product of the read and write terms - * @details If the denominators of log derivative lookup relation are read_term and write_term, then I_i = - * (read_term_i*write_term_i)^{-1}. - * @note Importantly, I_i = 0 for rows i at which there is no read or write, so the cost of this method is - * proportional to the actual number of lookups. + * @brief Construct the polynomial \f$I\f$ whose components are the inverse of the product of the read and write + * terms + * + * @details If the denominators of log derivative lookup relation are lookup_term and table_term, then + * \f$I_i = (\text{lookup_term}_i \cdot \text{table_term}_i)^{-1}\f$. + * + * @note Importantly, \f$I_i = 0\f$ for rows \f$i\f$ at which there is no read or write, so the cost of this method + * is proportional to the actual number of lookups. * */ template @@ -190,8 +283,8 @@ template class LogDerivLookupRelationImpl { if (polynomials.q_lookup.get(i) == 1 || polynomials.lookup_read_tags.get(i) == 1) { // TODO(https://github.com/AztecProtocol/barretenberg/issues/940): avoid get_row if possible. auto row = polynomials.get_row(i); // Note: this is a copy. use sparingly! - auto value = compute_read_term(row, relation_parameters) * - compute_write_term(row, relation_parameters); + auto value = compute_lookup_term(row, relation_parameters) * + compute_table_term(row, relation_parameters); inverse_polynomial.at(i) = value; } } @@ -234,21 +327,21 @@ template class LogDerivLookupRelationImpl { const auto read_counts_m = CoefficientAccumulator(in.lookup_read_counts); // Degree 1 const auto read_selector_m = CoefficientAccumulator(in.q_lookup); // Degree 1 - const auto inverse_exists = compute_inverse_exists(in); // Degree 2 - const auto read_term = compute_read_term(in, params); // Degree 2 - const auto write_term = compute_write_term(in, params); // Degree 1 + const auto inverse_exists = compute_inverse_exists(in); // Degree 2 + const auto lookup_term = compute_lookup_term(in, params); // Degree 2 + const auto table_term = compute_table_term(in, params); // Degree 1 // Establish the correctness of the polynomial of inverses I. Note: inverses is computed so that the value is 0 // if !inverse_exists. // Degrees: 5 2 1 1 0 - const Accumulator logderiv_first_term = (read_term * write_term * inverses - inverse_exists) * scaling_factor; + const Accumulator logderiv_first_term = (lookup_term * table_term * inverses - inverse_exists) * scaling_factor; std::get<0>(accumulator) += ShortView(logderiv_first_term); // Deg 5 // Establish validity of the read. Note: no scaling factor here since this constraint is 'linearly dependent, // i.e. enforced across the entire trace, not on a per-row basis. // Degrees: 1 2 = 3 - Accumulator tmp = Accumulator(read_selector_m) * write_term; - tmp -= (Accumulator(read_counts_m) * read_term); + Accumulator tmp = Accumulator(read_selector_m) * table_term; + tmp -= (Accumulator(read_counts_m) * lookup_term); tmp *= inverses; // degree 4(5) std::get<1>(accumulator) += tmp; // Deg 4 (5) diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp index 83269ff871d5..90e9796dd88b 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/http_download.hpp @@ -4,6 +4,8 @@ #ifdef __clang__ #pragma clang diagnostic push +// -Wdeprecated-literal-operator is only available in Clang 18+, ignore unknown warnings for Apple Clang +#pragma clang diagnostic ignored "-Wunknown-warning-option" #pragma clang diagnostic ignored "-Wdeprecated-literal-operator" #pragma clang diagnostic ignored "-Wunused-parameter" #endif diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/README.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/README.md new file mode 100644 index 000000000000..c8fc0933a8dc --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/README.md @@ -0,0 +1,40 @@ +# Blake2s + + +## Specification +- https://blake2.net + + +## Overview +This module provides a circuit-friendly implementation of unkeyed BLAKE2s (32-byte digest) over 32-bit words. + +BLAKE2s hashes an arbitrary-length byte string by iterating a compression function over 64-byte input blocks. The implementation maintains a `blake2s_state` with: +- `h[8]`: chaining value (8×32-bit), +- `t[2]`: byte counter (low/high 32-bit limbs), and +- `f[2]`: finalization flags. + +The IV is the standard 8×32-bit initialization vector. + +For each 64-byte block, the compression function does the following: +- parses a 64-byte block into 16 message words `m[16]` (16×32-bit), +- initializes a working state matrix `v[16]` from the current `h`, `IV` constants, counter `t`, and flags `f`, +- applies 10 rounds of the `round_fn` function (adds, XORs, rotates) using message words from `m[16]` according to the BLAKE2s message schedule, and +- finally updates the chaining value as `h[i] = h[i] XOR v[i] XOR v[i+8]`. + +After all full blocks are processed, the remaining bytes are handled as the final block with padding and the finalization flag set, and the 32-byte digest is produced from `h[0..7]`. + + +## Implementation +XORs and rotates are implemented using lookup tables, additions are performed using field arithmetic with explicit normalization wherever needed to satisfy lookup input bounds. The core `g` mixing step is present in `blake_util.hpp` and uses lookup tables to compute XOR/rotate outputs. As a performance tradeoff, intermediate additions inside `g` may temporarily exceed 32 bits, while all locations that require 32-bit words are enforced either by normalization or by lookup outputs constrained to 32-bit values. + +- 32-bit message words: `byte_array` constrains each input byte to 8 bits. Message words `m[i]` are formed from 4 constrained bytes, so each `m[i]` is a well-defined 32-bit word. While the resulting field element is itself not range-constrained to 32 bits, correct 32-bit semantics are enforced at the boundaries via lookup outputs and normalization. +- 32-bit semantics with overflow: As a performance tradeoff, intermediate additions inside the mixing function `g` may temporarily produce values > `2^32` and are allowed to have an overflow of up to 3 bits. Where the algorithm requires a 32-bit word, the 32-bit semantics are ensured by: + - normalization, using `add_normalize_unsafe(a, b, overflow_bits=3)`, which forces the result to the low 32 bits of the sum, and introduces an overflow witness constrained to `overflow_bits` (here 3). + - lookup tables, where outputs are constrained to the intended 32-bit result (normalization is applied as needed to keep lookup keys within the bound of up to 35-bits.) +- 32-bit chaining/output words: The chaining update `h[i] = h[i] XOR v[i] XOR v[i+8]` is computed via lookup tables, where the lookup outputs are constrained to the correct 32-bit results. When producing the final digest, converting each `h[i]` into 4 bytes (via `byte_array(field, 4)`) range-constrains the output bytes to 8 bits each. + + +### API +- The following is the BLAKE2s hash interface: + - `bb::stdlib::Blake2s::hash(const byte_array& input)` +- Inputs/outputs are modeled as `byte_array`, i.e., an in-circuit byte vector (each element range-constrained to 8 bits). diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/README.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/README.md new file mode 100644 index 000000000000..60a1e990f96d --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/README.md @@ -0,0 +1,64 @@ +# Blake3 + +## Specification +- https://github.com/BLAKE3-team/BLAKE3 + +## Overview +This module provides a circuit-friendly implementation of BLAKE3 over 32-bit words, which supports hashing of up to 1024 bytes (one chunk) and produces a 32-byte digest. + +The implementation supports hashing a single chunk where: +- input is split into 64-byte blocks and processed sequentially, +- the chunk chaining value (CV) is updated per full block, and +- the final output is computed by applying the BLAKE3 output function to the last (possibly partial) block. + +It does not implement the full tree-hashing mode (i.e., no parent-node chaining / Merkle-tree reduction), and it does not use keyed hashing or key-derivation modes. +BLAKE3’s compression is similar to BLAKE2s, wherein it operates on a 16-word state, mixes message words using additions, XORs and rotations, and updates an 8-word chaining value (CV). + +## Implementation +The implementation maintains a `blake3_hasher` with: +- `cv[8]`: chaining value (8×32-bit), +- `buf`: a 64-byte buffer for the current block, +- `buf_len`: number of bytes currently buffered, +- `blocks_compressed`: number of full 64-byte blocks processed so far, and +- `flags`: domain-separation flags (e.g., chunk start/end, root). + +The IV is the standard 8×32-bit initialization vector. + +For each 64-byte block, the implementation first runs the core compression (`compress_pre`) to compute the mixed internal state. For intermediate blocks this state is folded back into the chaining value via `compress_in_place`. For the final block, the state is fed into the BLAKE3 output function (`compress_xof`) to produce the hash output. + +### Compression function (`compress_pre`) +The compression function mixes an 8-word CV, a 16-word internal state, a 64-byte message block, the block length, and flags. It +- loads 16 message words from the 64-byte block via `field_ct(block.slice(i * 4, 4).reverse())`, +- initializes the 16-word working state from the CV, IV, block length, and flags, and +- runs 7 rounds of the shared `round_fn` helper in `blake_util.hpp` with the BLAKE3 message schedule. + +### CV update (`compress_in_place`) +`compress_in_place` computes the next chaining value as: +- `cv[i] = state[i] XOR state[i+8]` for `i = 0..7`, implemented via the `BLAKE_XOR` lookup table, +- the lookup output is constrained to the correct 32-bit result, so any intermediate overflow in `state` is discarded at this boundary. + +### Output function (`compress_xof`) and finalization +`compress_xof` produces a 64-byte output (16×32-bit words) written into a `byte_array`: +- words `0-7` are `state[i] XOR state[i+8]` (same as CV update), +- words `8-15` are `state[i+8] XOR cv[i]`, +where each 32-bit word is converted to 4 bytes via `byte_array(field, 4)` (which range-constrains each output byte). + +Finalization (`hasher_finalize`) sets `CHUNK_START` iff `blocks_compressed == 0` (via `maybe_start_flag`), applies the BLAKE3 output function to the final block by setting the `CHUNK_END` and `ROOT` flags, computing the 64-byte output via `compress_xof`. It returns the first 32 bytes as the hash digest. + +### 32-bit semantics +XORs and rotates are implemented using lookup tables, and additions are performed using field arithmetic with explicit normalization wherever needed to satisfy lookup input bounds. The core `g` mixing step is shared with Blake2s and uses lookup tables to compute XOR/rotate outputs. As a performance tradeoff, intermediate additions inside `g` may temporarily exceed 32 bits, while all locations that require 32-bit words are enforced either by normalization or by lookup outputs constrained to 32-bit values. + +- 32-bit message words: `byte_array` constrains each input byte to 8 bits. Message words are formed from 4 constrained bytes, so each message word is a well-defined 32-bit value. While the resulting field element is itself not range-constrained to 32 bits, correct 32-bit semantics are enforced at the boundaries via lookup outputs and normalization. +- 32-bit semantics with overflow: As a performance tradeoff, intermediate additions inside the mixing function `g` may temporarily produce values > `2^32` and are allowed to have an overflow of up to 3 bits. Where the algorithm requires a 32-bit word, the 32-bit semantics are ensured by + - normalization, using `add_normalize_unsafe(a, b, overflow_bits=3)`, which forces the result to the low 32 bits of the sum, and introduces an overflow witness constrained to `overflow_bits` (here 3). + - lookup tables, where outputs are constrained to the intended 32-bit result (normalization is applied as needed to keep lookup keys within the bound of up to 35-bits.) +- 32-bit chaining/output words: + - CV updates in `compress_in_place()` are computed via XOR lookup tables, whose outputs are constrained to the correct 32-bit results. + - When producing output bytes in `compress_xof()`, each 32-bit word is converted into 4 bytes using `byte_array(field, 4)`, which range-constrains each output byte to 8 bits. + +## API +- The following is the BLAKE3 hash interface: + - `bb::stdlib::Blake3s::hash(const byte_array& input)` +- Inputs/outputs are modeled as `byte_array`, i.e., an in-circuit byte vector (each element range-constrained to 8 bits). +- Inputs can be at most 1024 bytes long. + diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp index b7c50f8be6db..55ed170d8645 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp @@ -128,7 +128,7 @@ void AvmProver::execute_log_derivative_inverse_round() Relation::Settings::DST_SELECTOR); AVM_TRACK_TIME(std::string("prove/log_derivative_inverse_round/") + std::string(Relation::NAME), - (compute_logderivative_inverse( + (compute_logderivative_inverse( prover_polynomials, relation_parameters, ProvingKey::circuit_size))); }); }); diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/interactions_base.hpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/interactions_base.hpp index 84e05bba509f..eaeffc52b18b 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/interactions_base.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/interactions_base.hpp @@ -11,14 +11,20 @@ namespace bb::avm2 { /////////////////// LOOKUPS /////////////////// +/** + * @brief Settings to be passed ot GenericLookupRelationImpl + * + * @note For every template parameter Setting_, lookup_settings must satify the concept GenericLookupSettings + * defined in generic_lookup_relation.hpp + */ template struct lookup_settings : public Settings_ { - static constexpr size_t READ_TERMS = 1; - static constexpr size_t WRITE_TERMS = 1; - static constexpr size_t READ_TERM_TYPES[READ_TERMS] = { 0 }; - static constexpr size_t WRITE_TERM_TYPES[WRITE_TERMS] = { 0 }; - static constexpr size_t INVERSE_EXISTS_POLYNOMIAL_DEGREE = 4; - static constexpr size_t READ_TERM_DEGREE = 0; - static constexpr size_t WRITE_TERM_DEGREE = 0; + static constexpr size_t NUM_LOOKUP_TERMS = 1; + static constexpr size_t NUM_TABLE_TERMS = 1; + static constexpr std::array LOOKUP_TYPES = { BASIC_LOOKUP }; + static constexpr std::array TABLE_TYPES = { BASIC_TABLE }; + static constexpr std::array LOOKUP_TERM_DEGREES = { 0 }; + static constexpr std::array TABLE_TERM_DEGREES = { 0 }; + static constexpr size_t INVERSE_EXISTS_POLYNOMIAL_DEGREE = 2; template static inline auto inverse_polynomial_is_computed_at_row(const AllEntities& in) { @@ -94,6 +100,8 @@ template struct lookup_relation_base : public /////////////////// PERMUTATIONS /////////////////// template struct permutation_settings : public Settings_ { + static constexpr size_t INVERSE_EXISTS_POLYNOMIAL_DEGREE = 2; + template static inline auto inverse_polynomial_is_computed_at_row(const AllEntities& in) { return (in.get(static_cast(Settings_::SRC_SELECTOR)) == 1 || diff --git a/barretenberg/rust/barretenberg-rs/README.md b/barretenberg/rust/barretenberg-rs/README.md index ae4e122bf16b..5b182c7ec465 100644 --- a/barretenberg/rust/barretenberg-rs/README.md +++ b/barretenberg/rust/barretenberg-rs/README.md @@ -66,8 +66,10 @@ The FFI backend provides pre-built static libraries for: - Linux x86_64 (glibc) - Linux arm64 (glibc) - -Additional targets (iOS, Android, macOS) are planned for future releases. +- macOS x86_64 +- macOS arm64 (Apple Silicon) +- iOS arm64 (device) +- iOS arm64 Simulator (Apple Silicon) ## API diff --git a/barretenberg/rust/barretenberg-rs/build.rs b/barretenberg/rust/barretenberg-rs/build.rs index fdedb61d7977..02c7d10144c7 100644 --- a/barretenberg/rust/barretenberg-rs/build.rs +++ b/barretenberg/rust/barretenberg-rs/build.rs @@ -9,7 +9,14 @@ fn main() { // libbb-external.a contains everything needed: barretenberg + env + vm2_stub println!("cargo:rustc-link-lib=static=bb-external"); - println!("cargo:rustc-link-lib=dylib=stdc++"); + + // Link C++ standard library (different name on macOS/iOS vs Linux) + let target = std::env::var("TARGET").unwrap(); + if target.contains("apple") { + println!("cargo:rustc-link-lib=dylib=c++"); + } else { + println!("cargo:rustc-link-lib=dylib=stdc++"); + } } } @@ -43,10 +50,21 @@ fn get_lib_dir() -> PathBuf { fn download_lib(out_dir: &PathBuf) { let target = std::env::var("TARGET").unwrap(); let arch = match target.as_str() { + // Linux t if t.contains("x86_64") && t.contains("linux") => "amd64-linux", t if t.contains("aarch64") && t.contains("linux") => "arm64-linux", + // macOS + t if t.contains("x86_64") && t.contains("apple") && t.contains("darwin") => "amd64-darwin", + t if t.contains("aarch64") && t.contains("apple") && t.contains("darwin") => "arm64-darwin", + // iOS simulator (must check before ios since "ios-sim" contains "ios") + t if t.contains("aarch64") && t.contains("apple") && t.contains("ios-sim") => { + "arm64-ios-sim" + } + // iOS device + t if t.contains("aarch64") && t.contains("apple") && t.contains("ios") => "arm64-ios", _ => panic!( - "Unsupported target for FFI backend: {}. Supported: x86_64-linux, aarch64-linux", + "Unsupported target for FFI backend: {}. \ + Supported: x86_64-linux, aarch64-linux, x86_64-apple-darwin, aarch64-apple-darwin, aarch64-apple-ios, aarch64-apple-ios-sim", target ), }; diff --git a/barretenberg/sol/src/honk/Relations.sol b/barretenberg/sol/src/honk/Relations.sol index 77d3ecf31fc5..a0abef6a7144 100644 --- a/barretenberg/sol/src/honk/Relations.sol +++ b/barretenberg/sol/src/honk/Relations.sol @@ -208,42 +208,42 @@ library RelationsLib { Fr[NUMBER_OF_SUBRELATIONS] memory evals, Fr domainSep ) internal pure { - Fr write_term; - Fr read_term; + Fr table_term; + Fr lookup_term; // Calculate the write term (the table accumulation) - // write_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ + // table_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ { Fr beta_sqr = rp.beta * rp.beta; - write_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + table_term = wire(p, WIRE.TABLE_1) + rp.gamma + (wire(p, WIRE.TABLE_2) * rp.beta) + (wire(p, WIRE.TABLE_3) * beta_sqr) + (wire(p, WIRE.TABLE_4) * beta_sqr * rp.beta); } // Calculate the read term - // read_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ + // lookup_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ { Fr beta_sqr = rp.beta * rp.beta; Fr derived_entry_1 = wire(p, WIRE.W_L) + rp.gamma + (wire(p, WIRE.Q_R) * wire(p, WIRE.W_L_SHIFT)); Fr derived_entry_2 = wire(p, WIRE.W_R) + wire(p, WIRE.Q_M) * wire(p, WIRE.W_R_SHIFT); Fr derived_entry_3 = wire(p, WIRE.W_O) + wire(p, WIRE.Q_C) * wire(p, WIRE.W_O_SHIFT); - read_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + lookup_term = derived_entry_1 + (derived_entry_2 * rp.beta) + (derived_entry_3 * beta_sqr) + (wire(p, WIRE.Q_O) * beta_sqr * rp.beta); } - Fr read_inverse = wire(p, WIRE.LOOKUP_INVERSES) * write_term; - Fr write_inverse = wire(p, WIRE.LOOKUP_INVERSES) * read_term; + Fr lookup_inverse = wire(p, WIRE.LOOKUP_INVERSES) * table_term; + Fr table_inverse = wire(p, WIRE.LOOKUP_INVERSES) * lookup_term; Fr inverse_exists_xor = wire(p, WIRE.LOOKUP_READ_TAGS) + wire(p, WIRE.Q_LOOKUP) - (wire(p, WIRE.LOOKUP_READ_TAGS) * wire(p, WIRE.Q_LOOKUP)); // Inverse calculated correctly relation - Fr accumulatorNone = read_term * write_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; + Fr accumulatorNone = lookup_term * table_term * wire(p, WIRE.LOOKUP_INVERSES) - inverse_exists_xor; accumulatorNone = accumulatorNone * domainSep; // Inverse - Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * read_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * write_inverse; + Fr accumulatorOne = wire(p, WIRE.Q_LOOKUP) * lookup_inverse - wire(p, WIRE.LOOKUP_READ_COUNTS) * table_inverse; Fr read_tag = wire(p, WIRE.LOOKUP_READ_TAGS); diff --git a/barretenberg/sol/src/honk/optimised/honk-optimized.sol.template b/barretenberg/sol/src/honk/optimised/honk-optimized.sol.template index 66a4b677aee6..c8b03fb7cce8 100644 --- a/barretenberg/sol/src/honk/optimised/honk-optimized.sol.template +++ b/barretenberg/sol/src/honk/optimised/honk-optimized.sol.template @@ -2047,7 +2047,7 @@ contract BlakeOptHonkVerifier is IVerifier { let beta_sqr := mulmod(beta, beta, p) let beta_cube := mulmod(beta_sqr, beta, p) - // write_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ + // table_term = table_1 + γ + table_2 * β + table_3 * β² + table_4 * β³ let t0 := addmod(addmod(mload(TABLE1_EVAL_LOC), gamma, p), mulmod(mload(TABLE2_EVAL_LOC), beta, p), p) let t1 := @@ -2056,9 +2056,9 @@ contract BlakeOptHonkVerifier is IVerifier { mulmod(mload(TABLE4_EVAL_LOC), beta_cube, p), p ) - let write_term := addmod(t0, t1, p) + let table_term := addmod(t0, t1, p) - // read_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ + // lookup_term = derived_entry_1 + γ + derived_entry_2 * β + derived_entry_3 * β² + q_index * β³ t0 := addmod( addmod(mload(W1_EVAL_LOC), gamma, p), mulmod(mload(QR_EVAL_LOC), mload(W1_SHIFT_EVAL_LOC), p), @@ -2067,12 +2067,12 @@ contract BlakeOptHonkVerifier is IVerifier { t1 := addmod(mload(W2_EVAL_LOC), mulmod(mload(QM_EVAL_LOC), mload(W2_SHIFT_EVAL_LOC), p), p) let t2 := addmod(mload(W3_EVAL_LOC), mulmod(mload(QC_EVAL_LOC), mload(W3_SHIFT_EVAL_LOC), p), p) - let read_term := addmod(t0, mulmod(t1, beta, p), p) - read_term := addmod(read_term, mulmod(t2, beta_sqr, p), p) - read_term := addmod(read_term, mulmod(mload(QO_EVAL_LOC), beta_cube, p), p) + let lookup_term := addmod(t0, mulmod(t1, beta, p), p) + lookup_term := addmod(lookup_term, mulmod(t2, beta_sqr, p), p) + lookup_term := addmod(lookup_term, mulmod(mload(QO_EVAL_LOC), beta_cube, p), p) - let read_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), write_term, p) - let write_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), read_term, p) + let lookup_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), table_term, p) + let table_inverse := mulmod(mload(LOOKUP_INVERSES_EVAL_LOC), lookup_term, p) let inverse_exists_xor := addmod(mload(LOOKUP_READ_TAGS_EVAL_LOC), mload(QLOOKUP_EVAL_LOC), p) inverse_exists_xor := addmod( @@ -2081,14 +2081,14 @@ contract BlakeOptHonkVerifier is IVerifier { p ) - let accumulator_none := mulmod(mulmod(read_term, write_term, p), mload(LOOKUP_INVERSES_EVAL_LOC), p) + let accumulator_none := mulmod(mulmod(lookup_term, table_term, p), mload(LOOKUP_INVERSES_EVAL_LOC), p) accumulator_none := addmod(accumulator_none, sub(p, inverse_exists_xor), p) accumulator_none := mulmod(accumulator_none, mload(POW_PARTIAL_EVALUATION_LOC), p) - let accumulator_one := mulmod(mload(QLOOKUP_EVAL_LOC), read_inverse, p) + let accumulator_one := mulmod(mload(QLOOKUP_EVAL_LOC), lookup_inverse, p) accumulator_one := addmod( accumulator_one, - sub(p, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), write_inverse, p)), + sub(p, mulmod(mload(LOOKUP_READ_COUNTS_EVAL_LOC), table_inverse, p)), p ) diff --git a/bb-pilcom/bb-pil-backend/src/lookup_builder.rs b/bb-pilcom/bb-pil-backend/src/lookup_builder.rs index c8429ab5225e..1ce18d44042f 100644 --- a/bb-pilcom/bb-pil-backend/src/lookup_builder.rs +++ b/bb-pilcom/bb-pil-backend/src/lookup_builder.rs @@ -181,16 +181,16 @@ fn create_lookup_settings_data(lookup: &Lookup) -> Json { ); // NOTE: these are hardcoded as 1 for now until more optimizations are required - let read_terms = 1; - let write_terms = 1; + let lookup_terms = 1; + let table_terms = 1; let lookup_tuple_size = columns_per_set; // NOTE: hardcoded until optimizations required - let inverse_degree = 4; - let read_term_degree = 0; - let write_term_degree = 0; - let read_term_types = "{0}".to_owned(); - let write_term_types = "{0}".to_owned(); + let inverse_degree = 2; + let lookup_term_degree = 0; + let table_term_degree = 0; + let lookup_term_types = "{0}".to_owned(); + let table_term_types = "{0}".to_owned(); json!({ "lookup_name": lookup.name, @@ -201,14 +201,14 @@ fn create_lookup_settings_data(lookup: &Lookup) -> Json { "rhs_cols": rhs_cols, "inverses_col": lookup.inverse.clone(), "counts_col": lookup.counts_poly, - "read_terms": read_terms, - "write_terms": write_terms, + "lookup_terms": lookup_terms, + "table_terms": table_terms, "lookup_tuple_size": lookup_tuple_size, "inverse_degree": inverse_degree, - "read_term_degree": read_term_degree, - "write_term_degree": write_term_degree, - "read_term_types": read_term_types, - "write_term_types": write_term_types, + "lookup_term_degree": lookup_term_degree, + "table_term_degree": table_term_degree, + "lookup_term_types": lookup_term_types, + "table_term_types": table_term_types, }) }