diff --git a/BUILD.bazel b/BUILD.bazel index 022464c..db31e1f 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -20,11 +20,19 @@ licenses(["notice"]) exports_files(["LICENSE"]) +cc_library( + name = "basics", + hdrs = ["util/basics.h"], + deps = [ + "@highway//:hwy", + ], +) + cc_library( name = "allocator", hdrs = ["util/allocator.h"], deps = [ - "@hwy//:hwy", + "@highway//:hwy", ], ) @@ -32,9 +40,9 @@ cc_library( name = "test_util", hdrs = ["util/test_util.h"], deps = [ - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:stats", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:stats", ], ) @@ -42,9 +50,9 @@ cc_library( name = "threading", hdrs = ["util/threading.h"], deps = [ - "@hwy//:hwy", - "@hwy//:thread_pool", - "@hwy//:topology", + "@highway//:hwy", + "@highway//:thread_pool", + "@highway//:topology", ], ) @@ -54,8 +62,8 @@ cc_test( deps = [ ":threading", "@googletest//:gtest_main", - "@hwy//:hwy", - "@hwy//:hwy_test_util", + "@highway//:hwy", + "@highway//:hwy_test_util", ], ) @@ -66,6 +74,7 @@ cc_library( ], textual_hdrs = [ "ops/dot-inl.h", + "ops/sum-inl.h", "ops/fp_arith-inl.h", "ops/matmul-inl.h", "ops/matvec-inl.h", @@ -73,16 +82,17 @@ cc_library( ], deps = [ ":allocator", + ":basics", ":threading", "//compression:compress", "//compression:sfp", - "@hwy//:algo", - "@hwy//:dot", - "@hwy//:hwy", - "@hwy//:math", - "@hwy//:matvec", - "@hwy//:profiler", - "@hwy//:thread_pool", + "@highway//:algo", + "@highway//:dot", + "@highway//:hwy", + "@highway//:math", + "@highway//:matvec", + "@highway//:profiler", + "@highway//:thread_pool", ], ) @@ -104,12 +114,12 @@ cc_test( "@googletest//:gtest_main", # buildcleaner: keep "//compression:compress", "//compression:test_util", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", #buildcleaner: keep - "@hwy//:profiler", - "@hwy//:stats", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", #buildcleaner: keep + "@highway//:profiler", + "@highway//:stats", + "@highway//:thread_pool", ], ) @@ -129,9 +139,9 @@ cc_test( ":test_util", "@googletest//:gtest_main", # buildcleaner: keep "//compression:compress", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", #buildcleaner: keep + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", #buildcleaner: keep ], ) @@ -147,10 +157,10 @@ cc_test( ":ops", "@googletest//:gtest_main", # buildcleaner: keep "//compression:compress", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", + "@highway//:thread_pool", ], ) @@ -167,10 +177,10 @@ cc_test( ":threading", "@googletest//:gtest_main", # buildcleaner: keep "//compression:compress", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", + "@highway//:thread_pool", ], ) @@ -183,8 +193,8 @@ cc_library( ], deps = [ "//compression:compress", - "@hwy//:hwy", # base.h - "@hwy//:thread_pool", + "@highway//:hwy", # base.h + "@highway//:thread_pool", ], ) @@ -197,10 +207,10 @@ cc_library( ":common", "//compression:compress", "//compression:io", - "@hwy//:hwy", - "@hwy//:profiler", - "@hwy//:stats", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:profiler", + "@highway//:stats", + "@highway//:thread_pool", ], ) @@ -211,9 +221,9 @@ cc_library( deps = [ ":common", "//compression:io", - "@hwy//:hwy", - "@hwy//:nanobenchmark", # timer - "@hwy//:profiler", + "@highway//:hwy", + "@highway//:nanobenchmark", # timer + "@highway//:profiler", "@com_google_sentencepiece//:sentencepiece_processor", ], ) @@ -224,7 +234,7 @@ cc_library( hdrs = ["gemma/kv_cache.h"], deps = [ ":common", - "@hwy//:hwy", + "@highway//:hwy", ], ) @@ -271,6 +281,7 @@ cc_library( ], deps = [ ":allocator", + ":basics", ":common", ":ops", ":tokenizer", @@ -279,13 +290,13 @@ cc_library( ":threading", "//compression:io", "//paligemma:image", - "@hwy//:hwy", - "@hwy//:bit_set", - "@hwy//:matvec", - "@hwy//:nanobenchmark", # timer - "@hwy//:profiler", - "@hwy//:thread_pool", - "@hwy//:topology", + "@highway//:hwy", + "@highway//:bit_set", + "@highway//:matvec", + "@highway//:nanobenchmark", # timer + "@highway//:profiler", + "@highway//:thread_pool", + "@highway//:topology", ], ) @@ -297,7 +308,7 @@ cc_library( ":common", ":gemma_lib", ":ops", - "@hwy//:hwy", + "@highway//:hwy", ], ) @@ -306,7 +317,7 @@ cc_library( hdrs = ["util/args.h"], deps = [ "//compression:io", - "@hwy//:hwy", + "@highway//:hwy", ], ) @@ -319,9 +330,9 @@ cc_library( ":gemma_lib", ":threading", "//compression:io", - "@hwy//:hwy", - "@hwy//:thread_pool", - "@hwy//:topology", + "@highway//:hwy", + "@highway//:thread_pool", + "@highway//:topology", ], ) @@ -338,12 +349,12 @@ cc_library( ":kv_cache", ":threading", # Placeholder for internal dep, do not remove., - "@benchmark//:benchmark", + "@google_benchmark//:benchmark", "//compression:compress", - "@hwy//:hwy", - "@hwy//:nanobenchmark", - "@hwy//:thread_pool", - "@hwy//:topology", + "@highway//:hwy", + "@highway//:nanobenchmark", + "@highway//:thread_pool", + "@highway//:topology", ], ) @@ -362,8 +373,8 @@ cc_test( ":gemma_lib", ":tokenizer", "@googletest//:gtest_main", - "@hwy//:hwy", - "@hwy//:hwy_test_util", + "@highway//:hwy", + "@highway//:hwy_test_util", ], ) @@ -379,9 +390,9 @@ cc_binary( ":threading", # Placeholder for internal dep, do not remove., "//paligemma:image", - "@hwy//:hwy", - "@hwy//:profiler", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:profiler", + "@highway//:thread_pool", ], ) @@ -396,9 +407,9 @@ cc_binary( ":cross_entropy", ":gemma_lib", "//compression:io", - "@hwy//:hwy", - "@hwy//:nanobenchmark", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:nanobenchmark", + "@highway//:thread_pool", "@nlohmann_json//:json", ], ) @@ -408,7 +419,7 @@ cc_binary( srcs = ["evals/benchmarks.cc"], deps = [ ":benchmark_helper", - "@benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -423,8 +434,8 @@ cc_binary( ":benchmark_helper", ":gemma_lib", "//compression:io", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", "@nlohmann_json//:json", ], ) @@ -438,9 +449,9 @@ cc_binary( ":benchmark_helper", ":gemma_lib", "//compression:io", - "@hwy//:hwy", - "@hwy//:profiler", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:profiler", + "@highway//:thread_pool", "@nlohmann_json//:json", ], ) @@ -481,9 +492,9 @@ cc_library( ":ops", ":prompt", ":weights", - "@hwy//:dot", - "@hwy//:hwy", # base.h - "@hwy//:thread_pool", + "@highway//:dot", + "@highway//:hwy", # base.h + "@highway//:thread_pool", ], ) @@ -543,9 +554,9 @@ cc_test( ":sampler", "@googletest//:gtest_main", "//compression:weights_raw", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:thread_pool", ], ) @@ -558,8 +569,8 @@ cc_library( ":common", ":weights", "//compression:compress", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", ], ) @@ -582,6 +593,6 @@ cc_test( ":threading", ":weights", "@googletest//:gtest_main", - "@hwy//:thread_pool", + "@highway//:thread_pool", ], ) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6e1c70a..3ac5392 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -106,11 +106,13 @@ set(SOURCES ops/matmul-inl.h ops/matvec-inl.h ops/ops-inl.h + ops/sum-inl.h paligemma/image.cc paligemma/image.h util/allocator.h util/app.h util/args.h + util/basics.h util/test_util.h util/threading.h ) diff --git a/MODULE.bazel b/MODULE.bazel index 43b33a5..fb622d3 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -3,37 +3,33 @@ module( version = "0.1.0", ) -bazel_dep(name = "rules_license", version = "0.0.7") -bazel_dep(name = "googletest", version = "1.14.0") - -# Copied from Highway because Bazel does not load them transitively -bazel_dep(name = "bazel_skylib", version = "1.4.1") +bazel_dep(name = "abseil-cpp", version = "20240722.0") +bazel_dep(name = "bazel_skylib", version = "1.6.1") +bazel_dep(name = "googletest", version = "1.15.2") +bazel_dep(name = "highway", version = "1.1.0") +bazel_dep(name = "nlohmann_json", version = "3.11.3") +bazel_dep(name = "platforms", version = "0.0.10") bazel_dep(name = "rules_cc", version = "0.0.9") -bazel_dep(name = "platforms", version = "0.0.7") - -http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +bazel_dep(name = "rules_license", version = "0.0.7") +bazel_dep(name = "google_benchmark", version = "1.8.5") -http_archive( - name = "hwy", - urls = ["https://github.com/google/highway/archive/refs/tags/1.2.0.zip"], - integrity = "sha256-fbtKAGj5hhhBr5Bggtsrj4aIodC2OHb1njB8LGfom8A=", strip_prefix = "highway-1.2.0", +# Require a more recent version for HWY_RCAST_ALIGNED +git_override( + module_name = "highway", + commit = "bb6c3f36b0c8dde8a8ef98b0f0884f4de820a7ca", + remote = "https://github.com/google/highway", ) -http_archive( - name = "nlohmann_json", - urls = ["https://github.com/nlohmann/json/archive/refs/tags/v3.11.3.zip"], - integrity = "sha256-BAIrBdgG61/3MCPCgLaGl9Erk+G3JnoLIqGjnsdXgGk=", - strip_prefix = "json-3.11.3", -) +http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "com_google_sentencepiece", + build_file = "@//bazel:sentencepiece.bazel", + patch_args = ["-p1"], + patches = ["@//bazel:sentencepiece.patch"], sha256 = "8409b0126ebd62b256c685d5757150cf7fcb2b92a2f2b98efb3f38fc36719754", strip_prefix = "sentencepiece-0.1.96", urls = ["https://github.com/google/sentencepiece/archive/refs/tags/v0.1.96.zip"], - build_file = "@//bazel:sentencepiece.bazel", - patches = ["@//bazel:sentencepiece.patch"], - patch_args = ["-p1"], ) # For sentencepiece @@ -56,17 +52,3 @@ cc_library( "https://github.com/s-yata/darts-clone/archive/e40ce4627526985a7767444b6ed6893ab6ff8983.zip", ], ) -# ABSL on 2023-10-18 -http_archive( - name = "com_google_absl", - sha256 = "f841f78243f179326f2a80b719f2887c38fe226d288ecdc46e2aa091e6aa43bc", - strip_prefix = "abseil-cpp-9687a8ea750bfcddf790372093245a1d041b21a3", - urls = ["https://github.com/abseil/abseil-cpp/archive//9687a8ea750bfcddf790372093245a1d041b21a3.tar.gz"], -) -# Benchmark -http_archive( - name = "benchmark", - urls = ["https://github.com/google/benchmark/archive/refs/tags/v1.8.2.tar.gz"], - integrity = "sha256-KqspgNA3YTf5adkoSPu2gharsHYzA0U0/IxlzE56DpM=", - strip_prefix = "benchmark-1.8.2", -) diff --git a/compression/BUILD.bazel b/compression/BUILD.bazel index c826d43..f948390 100644 --- a/compression/BUILD.bazel +++ b/compression/BUILD.bazel @@ -33,7 +33,7 @@ cc_library( ], hdrs = ["io.h"], deps = [ - "@hwy//:hwy", + "@highway//:hwy", ] + FILE_DEPS, ) @@ -43,8 +43,8 @@ cc_library( hdrs = ["blob_store.h"], deps = [ ":io", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", ], ) @@ -55,9 +55,9 @@ cc_library( "shared.h", ], deps = [ - "@hwy//:hwy", - "@hwy//:stats", - "@hwy//hwy/contrib/sort:vqsort", + "@highway//:hwy", + "@highway//:stats", + "@highway//hwy/contrib/sort:vqsort", ], ) @@ -69,8 +69,8 @@ cc_test( ":distortion", "@googletest//:gtest_main", # buildcleaner: keep "//:test_util", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", # Unpredictable1 + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", # Unpredictable1 ], ) @@ -79,7 +79,7 @@ cc_library( hdrs = ["shared.h"], textual_hdrs = ["sfp-inl.h"], deps = [ - "@hwy//:hwy", + "@highway//:hwy", ], ) @@ -89,9 +89,9 @@ cc_library( textual_hdrs = ["nuq-inl.h"], deps = [ ":sfp", - "//:allocator", - "@hwy//:hwy", - "@hwy//hwy/contrib/sort:vqsort", + "//:basics", + "@highway//:hwy", + "@highway//hwy/contrib/sort:vqsort", ], ) @@ -103,8 +103,8 @@ cc_library( deps = [ ":compress", ":distortion", - "@hwy//:hwy", - "@hwy//:hwy_test_util", + "@highway//:hwy", + "@highway//:hwy_test_util", ], ) @@ -122,9 +122,9 @@ cc_test( ":sfp", "@googletest//:gtest_main", # buildcleaner: keep "//:test_util", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", ], ) @@ -144,9 +144,9 @@ cc_test( ":sfp", "@googletest//:gtest_main", # buildcleaner: keep "//:test_util", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:nanobenchmark", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:nanobenchmark", ], ) @@ -163,11 +163,11 @@ cc_library( ":io", ":nuq", ":sfp", - "@hwy//:hwy", - "@hwy//:nanobenchmark", - "@hwy//:profiler", - "@hwy//:stats", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:nanobenchmark", + "@highway//:profiler", + "@highway//:stats", + "@highway//:thread_pool", ], ) @@ -187,9 +187,9 @@ cc_test( ":test_util", "@googletest//:gtest_main", # buildcleaner: keep "//:test_util", - "@hwy//:hwy", - "@hwy//:hwy_test_util", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:hwy_test_util", + "@highway//:thread_pool", ], ) @@ -200,10 +200,10 @@ cc_library( deps = [ ":nuq", ":sfp", - "@hwy//:hwy", - "@hwy//:stats", - "@hwy//:thread_pool", - "@hwy//hwy/contrib/sort:vqsort", + "@highway//:hwy", + "@highway//:stats", + "@highway//:thread_pool", + "@highway//hwy/contrib/sort:vqsort", ], ) @@ -213,8 +213,8 @@ cc_library( deps = [ "//:allocator", "//:common", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", ], ) @@ -229,8 +229,8 @@ cc_binary( "//:args", "//:common", "//:weights", - "@hwy//:hwy", - "@hwy//:profiler", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:profiler", + "@highway//:thread_pool", ], ) diff --git a/compression/compress-inl.h b/compression/compress-inl.h index e47a979..9f80527 100644 --- a/compression/compress-inl.h +++ b/compression/compress-inl.h @@ -24,7 +24,7 @@ #include // lroundf, only if COMPRESS_STATS #include "compression/blob_store.h" -#include "compression/compress.h" +#include "compression/compress.h" // IWYU pragma: export #include "compression/distortion.h" #include "hwy/aligned_allocator.h" #include "hwy/base.h" diff --git a/compression/nuq-inl.h b/compression/nuq-inl.h index faa5ba7..63c4255 100644 --- a/compression/nuq-inl.h +++ b/compression/nuq-inl.h @@ -22,7 +22,7 @@ #include #include "compression/shared.h" -#include "util/allocator.h" +#include "util/basics.h" #include "hwy/base.h" #endif // THIRD_PARTY_GEMMA_CPP_COMPRESSION_NUQ_INL_H_ diff --git a/compression/python/BUILD b/compression/python/BUILD index bdf67f9..89e2222 100644 --- a/compression/python/BUILD +++ b/compression/python/BUILD @@ -16,8 +16,8 @@ cc_library( "//third_party/absl/types:span", "//compression:compress", "//compression:io", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", ], ) diff --git a/examples/hello_world/BUILD.bazel b/examples/hello_world/BUILD.bazel index ca7f426..52af610 100644 --- a/examples/hello_world/BUILD.bazel +++ b/examples/hello_world/BUILD.bazel @@ -17,7 +17,7 @@ cc_binary( "//:gemma_lib", "//:threading", "//:tokenizer", - "@hwy//:hwy", - "@hwy//:thread_pool", + "@highway//:hwy", + "@highway//:thread_pool", ], ) diff --git a/gemma/gemma-inl.h b/gemma/gemma-inl.h index d2d9cd0..6129158 100644 --- a/gemma/gemma-inl.h +++ b/gemma/gemma-inl.h @@ -1176,14 +1176,15 @@ SampleFunc ChooseSampleFunc(const RuntimeConfig& runtime_config) { // Fast path for top-1 with no accept_token. if (kTopK == 1 && !runtime_config.accept_token) { - return [](float* logits, size_t vocab_size) -> TokenAndProb { + return [](float* logits, size_t vocab_size) HWY_ATTR -> TokenAndProb { PROFILER_ZONE("Gen.Sample Top1"); return Top1OfSoftmax(logits, vocab_size); }; } // General case: Softmax with top-k sampling. - return [&runtime_config](float* logits, size_t vocab_size) -> TokenAndProb { + return [&runtime_config](float* logits, + size_t vocab_size) HWY_ATTR -> TokenAndProb { PROFILER_ZONE("Gen.Sample general"); Softmax(logits, vocab_size); const int token = SampleTopK(logits, vocab_size, *runtime_config.gen, diff --git a/gemma/gemma.h b/gemma/gemma.h index ea25281..3ea8b07 100644 --- a/gemma/gemma.h +++ b/gemma/gemma.h @@ -28,7 +28,8 @@ #include "gemma/kv_cache.h" #include "gemma/tokenizer.h" #include "paligemma/image.h" -#include "util/allocator.h" +#include "util/allocator.h" // RowVectorBatch +#include "util/basics.h" // TokenAndProb #include "util/threading.h" #include "hwy/contrib/thread_pool/thread_pool.h" #include "hwy/timer.h" diff --git a/ops/dot_test.cc b/ops/dot_test.cc index 4a29540..6c9f8c9 100644 --- a/ops/dot_test.cc +++ b/ops/dot_test.cc @@ -840,7 +840,7 @@ class DotStats { ASSERT_INSIDE(kPairwise, 4.5E-4, s_rels[kPairwise].GeometricMean(), 1.5E-2); // Extremely high error on aarch64. - ASSERT_INSIDE(kPairwise, 1.1E-3f, s_rels[kPairwise].Max(), 1250.f); + ASSERT_INSIDE(kPairwise, 1.1E-3f, s_rels[kPairwise].Max(), 2E3f); } // Backward relative error, lower is better. diff --git a/ops/matmul.h b/ops/matmul.h index ecc72b1..4510588 100644 --- a/ops/matmul.h +++ b/ops/matmul.h @@ -19,9 +19,10 @@ #include #include "util/allocator.h" // RowVectorBatch -#include "util/threading.h" // PerClusterPools +#include "util/threading.h" +#include "hwy/aligned_allocator.h" // IWYU pragma: export #include "hwy/base.h" -#include "hwy/contrib/thread_pool/thread_pool.h" +#include "hwy/contrib/thread_pool/thread_pool.h" // IWYU pragma: export #include "hwy/per_target.h" namespace gcpp { diff --git a/ops/ops-inl.h b/ops/ops-inl.h index f03159a..79f77bb 100644 --- a/ops/ops-inl.h +++ b/ops/ops-inl.h @@ -28,7 +28,7 @@ #include // std::enable_if_t #include "compression/compress.h" -#include "util/allocator.h" // TokenAndProb +#include "util/basics.h" // TokenAndProb #include "hwy/base.h" #include "hwy/contrib/thread_pool/thread_pool.h" #include "hwy/detect_targets.h" @@ -44,6 +44,7 @@ #include "compression/compress-inl.h" #include "ops/dot-inl.h" +#include "ops/sum-inl.h" #include "hwy/contrib/algo/transform-inl.h" #include "hwy/contrib/math/math-inl.h" #include "hwy/profiler.h" // also uses SIMD @@ -507,183 +508,6 @@ static HWY_INLINE HWY_MAYBE_UNUSED void MulByConstAndAdd( MulByConstAndAdd(c, x, out, size, size); } -// f64 Add, called for f32 inputs promoted to f64. Runs at about half the speed -// of f32 sums. -struct SumKernelDouble { - // Only `CompressTraits` can `Decompress2` to `double`, so both have - // to be `float` in order to have `Raw = double`. Note that if either type is - // smaller than `float`, we may demote the other type from `float` to `BF16`. - template - using Raw = hwy::If() && IsF32(), double, BF16>; - using State = double; - - // Raw = double - template , HWY_IF_F64_D(DRaw)> - HWY_INLINE void Update4(DRaw /*dd*/, const VR w0, const VR w1, const VR w2, - const VR w3, VR, VR, VR, VR, VR& sum0, VR& sum1, - VR& sum2, VR& sum3, VR&, VR&, VR&, VR&) const { - sum0 = hn::Add(sum0, w0); - sum1 = hn::Add(sum1, w1); - sum2 = hn::Add(sum2, w2); - sum3 = hn::Add(sum3, w3); - } - - // Raw = BF16 - template , HWY_IF_BF16_D(DRaw), - class DS = hn::Repartition, class VS = hn::Vec> - HWY_INLINE void Update4(DRaw dr, const VR w0, const VR w1, const VR w2, - const VR w3, VR, VR, VR, VR, VS& sum0, VS& sum1, - VS& sum2, VS& sum3, VS&, VS&, VS&, VS&) const { - const hn::Repartition df; - using VF = hn::Vec; - // Reduce to two f32 sums so we can promote them to four f64 vectors. - VF sum02, sum13; - if constexpr (HWY_NATIVE_DOT_BF16) { - const VR k1 = hn::Set(dr, hwy::ConvertScalarTo(1.0f)); - const VF prod0 = hn::WidenMulPairwiseAdd(df, w0, k1); - const VF prod1 = hn::WidenMulPairwiseAdd(df, w1, k1); - // Fuse WidenMulPairwiseAdd plus Add into ReorderWidenMulAccumulate. - VF unused0 = hn::Zero(df); - VF unused1 = hn::Zero(df); - sum02 = hn::ReorderWidenMulAccumulate(df, w2, k1, prod0, unused0); - sum13 = hn::ReorderWidenMulAccumulate(df, w3, k1, prod1, unused1); - } else { - // If not native, the multiplication costs extra, so convert to f32. - // PromoteEvenTo is cheaper than PromoteUpperTo especially on `SVE`. - const VF fe0 = hn::PromoteEvenTo(df, w0); - const VF fe1 = hn::PromoteEvenTo(df, w1); - const VF fe2 = hn::PromoteEvenTo(df, w2); - const VF fe3 = hn::PromoteEvenTo(df, w3); - const VF fo0 = hn::PromoteOddTo(df, w0); - const VF fo1 = hn::PromoteOddTo(df, w1); - const VF fo2 = hn::PromoteOddTo(df, w2); - const VF fo3 = hn::PromoteOddTo(df, w3); - const VF fe01 = hn::Add(fe0, fe1); - const VF fe23 = hn::Add(fe2, fe3); - const VF fo01 = hn::Add(fo0, fo1); - const VF fo23 = hn::Add(fo2, fo3); - sum02 = hn::Add(fe01, fe23); - sum13 = hn::Add(fo01, fo23); - } - - const DS ds; - const VS d0 = hn::PromoteLowerTo(ds, sum02); - const VS d1 = hn::PromoteUpperTo(ds, sum02); - const VS d2 = hn::PromoteLowerTo(ds, sum13); - const VS d3 = hn::PromoteUpperTo(ds, sum13); - - sum0 = hn::Add(sum0, d0); - sum1 = hn::Add(sum1, d1); - sum2 = hn::Add(sum2, d2); - sum3 = hn::Add(sum3, d3); - } - - // Raw = double - template , HWY_IF_F64_D(DRaw)> - HWY_INLINE void Update1(DRaw /*dd*/, const VR w0, const VR v0, VR& sum0, - VR& comp0) const { - sum0 = hn::Add(sum0, w0); - } - - // Raw = BF16 - template , HWY_IF_BF16_D(DRaw), - class DS = hn::Repartition, class VS = hn::Vec> - HWY_INLINE void Update1(DRaw dr, const VR w0, VR, VS& sum0, - VS& extra0) const { - const hn::Repartition df; - using VF = hn::Vec; - VF f0; - if constexpr (HWY_NATIVE_DOT_BF16) { - const VR k1 = hn::Set(dr, hwy::ConvertScalarTo(1.0f)); - f0 = hn::WidenMulPairwiseAdd(df, w0, k1); - } else { - const VF fe0 = hn::PromoteEvenTo(df, w0); - const VF fo0 = hn::PromoteOddTo(df, w0); - f0 = hn::Add(fe0, fo0); - } - - const DS ds; - const VS d0 = hn::PromoteLowerTo(ds, f0); - const VS d1 = hn::PromoteUpperTo(ds, f0); - - sum0 = hn::Add(sum0, d0); - extra0 = hn::Add(extra0, d1); - } - - template > - HWY_INLINE float Reduce(DState dd, VS& sum0, VS& sum1, VS& sum2, VS& sum3, - VS& extra0, VS&, VS&, VS&) const { - // Reduction tree: sum of all accumulators by pairs, then across lanes. - sum0 = hn::Add(sum0, sum1); - sum2 = hn::Add(sum2, sum3); - sum0 = hn::Add(sum0, extra0); // from Update1 - sum0 = hn::Add(sum0, sum2); - return static_cast(hn::ReduceSum(dd, sum0)); - } -}; - -// ORO Cascaded Summation, algorithm 6.11 from Handbook of Floating-Point -// Arithmetic. Note that Algorithm 6.7 (KBN) appears erroneous. We use TwoSums -// instead of FastTwoSums because the magnitude of the initial sum is not -// always greater than the next input, and this does actually change the e2e -// generation results. Note that Kahan summation differs in that it first adds -// comp* to w*, so each operation is serially dependent. By contrast, the sum* -// and comp* here have shorter dependency chains. -// -// This about as accurate as SumKernelDouble but slower, hence we only use this -// if f64 is not supported on this target. -struct SumKernelCascaded { - template - using Raw = float; - using State = float; - - template , HWY_IF_F32_D(DF)> - HWY_INLINE void Update4(DF df, const VF w0, const VF w1, const VF w2, - const VF w3, VF, VF, VF, VF, VF& sum0, VF& sum1, - VF& sum2, VF& sum3, VF& comp0, VF& comp1, VF& comp2, - VF& comp3) const { - VF serr0, serr1, serr2, serr3; - sum0 = TwoSums(df, sum0, w0, serr0); - sum1 = TwoSums(df, sum1, w1, serr1); - sum2 = TwoSums(df, sum2, w2, serr2); - sum3 = TwoSums(df, sum3, w3, serr3); - - comp0 = hn::Add(comp0, serr0); - comp1 = hn::Add(comp1, serr1); - comp2 = hn::Add(comp2, serr2); - comp3 = hn::Add(comp3, serr3); - } - - template , HWY_IF_F32_D(DF)> - HWY_INLINE void Update1(DF df, const VF w0, const VF v0, VF& sum0, - VF& comp0) const { - VF serr0; - sum0 = TwoSums(df, sum0, w0, serr0); - - comp0 = hn::Add(comp0, serr0); - } - - template > - HWY_INLINE float Reduce(DF df, VF& sum0, VF& sum1, VF& sum2, VF& sum3, - VF& comp0, VF& comp1, VF& comp2, VF& comp3) const { - // Reduction tree: sum of all accumulators by pairs, then across lanes. - AssimilateCascadedSums(df, sum1, comp1, sum0, comp0); - AssimilateCascadedSums(df, sum3, comp3, sum2, comp2); - AssimilateCascadedSums(df, sum2, comp2, sum0, comp0); - return ReduceCascadedSums(df, sum0, comp0); - } -}; - -using SumKernelDefault = - hwy::If; - -template -HWY_INLINE float Sum(D d, const VT* HWY_RESTRICT vec, size_t num) { - using Raw = hwy::If; - const hn::Repartition d_raw; - return DecompressAndCall(d_raw, MakeSpan(vec, num), SumKernelDefault()); -} - // See below for a specialized version for top-1 sampling. static HWY_NOINLINE void Softmax(float* HWY_RESTRICT x, const size_t size, const size_t mask_pos) { diff --git a/ops/sum-inl.h b/ops/sum-inl.h new file mode 100644 index 0000000..3f5d1de --- /dev/null +++ b/ops/sum-inl.h @@ -0,0 +1,217 @@ +// Copyright 2024 Google LLC +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "hwy/base.h" + +// Include guard for SIMD code. +#if defined(THIRD_PARTY_GEMMA_CPP_SUM_TOGGLE) == defined(HWY_TARGET_TOGGLE) +#ifdef THIRD_PARTY_GEMMA_CPP_SUM_TOGGLE +#undef THIRD_PARTY_GEMMA_CPP_SUM_TOGGLE +#else +#define THIRD_PARTY_GEMMA_CPP_SUM_TOGGLE +#endif + +#include "compression/compress-inl.h" + +HWY_BEFORE_NAMESPACE(); +namespace gcpp { +namespace HWY_NAMESPACE { +namespace hn = hwy::HWY_NAMESPACE; + +// f64 Add, called for f32 inputs promoted to f64. Runs at about half the speed +// of f32 sums. +struct SumKernelDouble { + // Only `CompressTraits` can `Decompress2` to `double`, so both have + // to be `float` in order to have `Raw = double`. Note that if either type is + // smaller than `float`, we may demote the other type from `float` to `BF16`. + template + using Raw = hwy::If() && IsF32(), double, BF16>; + using State = double; + + // Raw = double + template , HWY_IF_F64_D(DRaw)> + HWY_INLINE void Update4(DRaw /*dd*/, const VR w0, const VR w1, const VR w2, + const VR w3, VR, VR, VR, VR, VR& sum0, VR& sum1, + VR& sum2, VR& sum3, VR&, VR&, VR&, VR&) const { + sum0 = hn::Add(sum0, w0); + sum1 = hn::Add(sum1, w1); + sum2 = hn::Add(sum2, w2); + sum3 = hn::Add(sum3, w3); + } + + // Raw = BF16 + template , HWY_IF_BF16_D(DRaw), + class DS = hn::Repartition, class VS = hn::Vec> + HWY_INLINE void Update4(DRaw dr, const VR w0, const VR w1, const VR w2, + const VR w3, VR, VR, VR, VR, VS& sum0, VS& sum1, + VS& sum2, VS& sum3, VS&, VS&, VS&, VS&) const { + const hn::Repartition df; + using VF = hn::Vec; + // Reduce to two f32 sums so we can promote them to four f64 vectors. + VF sum02, sum13; + if constexpr (HWY_NATIVE_DOT_BF16) { + const VR k1 = hn::Set(dr, hwy::ConvertScalarTo(1.0f)); + const VF prod0 = hn::WidenMulPairwiseAdd(df, w0, k1); + const VF prod1 = hn::WidenMulPairwiseAdd(df, w1, k1); + // Fuse WidenMulPairwiseAdd plus Add into ReorderWidenMulAccumulate. + VF unused0 = hn::Zero(df); + VF unused1 = hn::Zero(df); + sum02 = hn::ReorderWidenMulAccumulate(df, w2, k1, prod0, unused0); + sum13 = hn::ReorderWidenMulAccumulate(df, w3, k1, prod1, unused1); + } else { + // If not native, the multiplication costs extra, so convert to f32. + // PromoteEvenTo is cheaper than PromoteUpperTo especially on `SVE`. + const VF fe0 = hn::PromoteEvenTo(df, w0); + const VF fe1 = hn::PromoteEvenTo(df, w1); + const VF fe2 = hn::PromoteEvenTo(df, w2); + const VF fe3 = hn::PromoteEvenTo(df, w3); + const VF fo0 = hn::PromoteOddTo(df, w0); + const VF fo1 = hn::PromoteOddTo(df, w1); + const VF fo2 = hn::PromoteOddTo(df, w2); + const VF fo3 = hn::PromoteOddTo(df, w3); + const VF fe01 = hn::Add(fe0, fe1); + const VF fe23 = hn::Add(fe2, fe3); + const VF fo01 = hn::Add(fo0, fo1); + const VF fo23 = hn::Add(fo2, fo3); + sum02 = hn::Add(fe01, fe23); + sum13 = hn::Add(fo01, fo23); + } + + const DS ds; + const VS d0 = hn::PromoteLowerTo(ds, sum02); + const VS d1 = hn::PromoteUpperTo(ds, sum02); + const VS d2 = hn::PromoteLowerTo(ds, sum13); + const VS d3 = hn::PromoteUpperTo(ds, sum13); + + sum0 = hn::Add(sum0, d0); + sum1 = hn::Add(sum1, d1); + sum2 = hn::Add(sum2, d2); + sum3 = hn::Add(sum3, d3); + } + + // Raw = double + template , HWY_IF_F64_D(DRaw)> + HWY_INLINE void Update1(DRaw /*dd*/, const VR w0, const VR v0, VR& sum0, + VR& comp0) const { + sum0 = hn::Add(sum0, w0); + } + + // Raw = BF16 + template , HWY_IF_BF16_D(DRaw), + class DS = hn::Repartition, class VS = hn::Vec> + HWY_INLINE void Update1(DRaw dr, const VR w0, VR, VS& sum0, + VS& extra0) const { + const hn::Repartition df; + using VF = hn::Vec; + VF f0; + if constexpr (HWY_NATIVE_DOT_BF16) { + const VR k1 = hn::Set(dr, hwy::ConvertScalarTo(1.0f)); + f0 = hn::WidenMulPairwiseAdd(df, w0, k1); + } else { + const VF fe0 = hn::PromoteEvenTo(df, w0); + const VF fo0 = hn::PromoteOddTo(df, w0); + f0 = hn::Add(fe0, fo0); + } + + const DS ds; + const VS d0 = hn::PromoteLowerTo(ds, f0); + const VS d1 = hn::PromoteUpperTo(ds, f0); + + sum0 = hn::Add(sum0, d0); + extra0 = hn::Add(extra0, d1); + } + + template > + HWY_INLINE float Reduce(DState dd, VS& sum0, VS& sum1, VS& sum2, VS& sum3, + VS& extra0, VS&, VS&, VS&) const { + // Reduction tree: sum of all accumulators by pairs, then across lanes. + sum0 = hn::Add(sum0, sum1); + sum2 = hn::Add(sum2, sum3); + sum0 = hn::Add(sum0, extra0); // from Update1 + sum0 = hn::Add(sum0, sum2); + return static_cast(hn::ReduceSum(dd, sum0)); + } +}; + +// ORO Cascaded Summation, algorithm 6.11 from Handbook of Floating-Point +// Arithmetic. Note that Algorithm 6.7 (KBN) appears erroneous. We use TwoSums +// instead of FastTwoSums because the magnitude of the initial sum is not +// always greater than the next input, and this does actually change the e2e +// generation results. Note that Kahan summation differs in that it first adds +// comp* to w*, so each operation is serially dependent. By contrast, the sum* +// and comp* here have shorter dependency chains. +// +// This about as accurate as SumKernelDouble but slower, hence we only use this +// if f64 is not supported on this target. +struct SumKernelCascaded { + template + using Raw = float; + using State = float; + + template , HWY_IF_F32_D(DF)> + HWY_INLINE void Update4(DF df, const VF w0, const VF w1, const VF w2, + const VF w3, VF, VF, VF, VF, VF& sum0, VF& sum1, + VF& sum2, VF& sum3, VF& comp0, VF& comp1, VF& comp2, + VF& comp3) const { + VF serr0, serr1, serr2, serr3; + sum0 = TwoSums(df, sum0, w0, serr0); + sum1 = TwoSums(df, sum1, w1, serr1); + sum2 = TwoSums(df, sum2, w2, serr2); + sum3 = TwoSums(df, sum3, w3, serr3); + + comp0 = hn::Add(comp0, serr0); + comp1 = hn::Add(comp1, serr1); + comp2 = hn::Add(comp2, serr2); + comp3 = hn::Add(comp3, serr3); + } + + template , HWY_IF_F32_D(DF)> + HWY_INLINE void Update1(DF df, const VF w0, const VF v0, VF& sum0, + VF& comp0) const { + VF serr0; + sum0 = TwoSums(df, sum0, w0, serr0); + + comp0 = hn::Add(comp0, serr0); + } + + template > + HWY_INLINE float Reduce(DF df, VF& sum0, VF& sum1, VF& sum2, VF& sum3, + VF& comp0, VF& comp1, VF& comp2, VF& comp3) const { + // Reduction tree: sum of all accumulators by pairs, then across lanes. + AssimilateCascadedSums(df, sum1, comp1, sum0, comp0); + AssimilateCascadedSums(df, sum3, comp3, sum2, comp2); + AssimilateCascadedSums(df, sum2, comp2, sum0, comp0); + return ReduceCascadedSums(df, sum0, comp0); + } +}; + +using SumKernelDefault = + hwy::If; + +template +HWY_INLINE float Sum(D d, const VT* HWY_RESTRICT vec, size_t num) { + using Raw = hwy::If; + const hn::Repartition d_raw; + return DecompressAndCall(d_raw, MakeSpan(vec, num), SumKernelDefault()); +} + +// NOLINTNEXTLINE(google-readability-namespace-comments) +} // namespace HWY_NAMESPACE +} // namespace gcpp +HWY_AFTER_NAMESPACE(); + +#endif // NOLINT diff --git a/paligemma/BUILD b/paligemma/BUILD index 335e531..6b303c8 100644 --- a/paligemma/BUILD +++ b/paligemma/BUILD @@ -11,7 +11,7 @@ cc_library( name = "image", srcs = ["image.cc"], hdrs = ["image.h"], - deps = ["@hwy//:hwy"], + deps = ["@highway//:hwy"], ) cc_test( @@ -39,7 +39,7 @@ cc_test( "//:common", "//:gemma_lib", "//:tokenizer", - "@hwy//:hwy", - "@hwy//:hwy_test_util", + "@highway//:hwy", + "@highway//:hwy_test_util", ], ) diff --git a/util/allocator.h b/util/allocator.h index 9e664b5..821268c 100644 --- a/util/allocator.h +++ b/util/allocator.h @@ -19,30 +19,11 @@ #include #include -#include "hwy/aligned_allocator.h" +#include "hwy/aligned_allocator.h" // IWYU pragma: export #include "hwy/base.h" -#if HWY_IS_MSAN -#include -#endif - namespace gcpp { -static inline void MaybeCheckInitialized(const void* ptr, size_t size) { -#if HWY_IS_MSAN - __msan_check_mem_is_initialized(ptr, size); -#else - (void)ptr; - (void)size; -#endif -} - -// Shared between gemma.h and ops-inl.h. -struct TokenAndProb { - int token; - float prob; -}; - using ByteStorageT = hwy::AlignedFreeUniquePtr; template diff --git a/util/app.h b/util/app.h index b3786c0..69a1f88 100644 --- a/util/app.h +++ b/util/app.h @@ -82,6 +82,9 @@ class AppArgs : public ArgsBase { visitor(max_threads, "num_threads", size_t{0}, "Maximum number of threads to use; default 0 = unlimited.", 2); visitor(pin, "pin", -1, "Pin threads? -1 = auto, 0 = no, 1 = yes.", 2); + // These can be used to partition CPU sockets/packages and their + // clusters/CCXs across several program instances. The default is to use + // all available resources. visitor(skip_packages, "skip_packages", size_t{0}, "Index of the first socket to use; default 0 = unlimited.", 2); visitor(max_packages, "max_packages", size_t{0}, diff --git a/util/basics.h b/util/basics.h new file mode 100644 index 0000000..7f433e2 --- /dev/null +++ b/util/basics.h @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef THIRD_PARTY_GEMMA_CPP_UTIL_BASICS_H_ +#define THIRD_PARTY_GEMMA_CPP_UTIL_BASICS_H_ + +// IWYU pragma: begin_exports +#include +#include + +#include "hwy/base.h" // HWY_IS_MSAN +// IWYU pragma: end_exports + +#if HWY_IS_MSAN +#include +#endif + +namespace gcpp { + +static inline void MaybeCheckInitialized(const void* ptr, size_t size) { +#if HWY_IS_MSAN + __msan_check_mem_is_initialized(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +// Shared between gemma.h and ops-inl.h. +struct TokenAndProb { + int token; + float prob; +}; + +} // namespace gcpp + +#endif // THIRD_PARTY_GEMMA_CPP_UTIL_BASICS_H_ diff --git a/util/threading.h b/util/threading.h index f3dab6a..bf26ca0 100644 --- a/util/threading.h +++ b/util/threading.h @@ -381,7 +381,7 @@ class BoundedTopology { LPS enabled_lps; // LPs not disabled via OS, taskset, or numactl. bool missing_cluster = false; - if (HWY_LIKELY(have_threading_support)) { + if (HWY_LIKELY(have_threading_support && !topology_.packages.empty())) { (void)GetThreadAffinity(enabled_lps); // failure = all disabled // No effect if topology is unknown or `enabled_lps` is empty.