Radix/examples/llama/BUILD.bazel

212 lines
6.3 KiB
Python

load("@aspect_bazel_lib//lib:expand_template.bzl", "expand_template")
load("@aspect_bazel_lib//lib:tar.bzl", "mtree_spec", "tar")
load("@aspect_bazel_lib//lib:transitions.bzl", "platform_transition_filegroup")
load("@bazel_skylib//rules:native_binary.bzl", "native_binary")
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load", "oci_push")
load("@zml//bazel:zig.bzl", "zig_cc_binary")
zig_cc_binary(
name = "llama",
srcs = [
"llama.zig",
],
main = "main.zig",
deps = [
"@com_github_hejsil_clap//:clap",
"@zml//async",
"@zml//stdx",
"@zml//zml",
],
)
cc_binary(
name = "TinyLlama-120M-scratch",
args = [
"--config=$(location @TinyLlama-120M-scratch//:config.json)",
"--weights=$(location @TinyLlama-120M-scratch//:model.safetensors)",
"--tokenizer=$(location @TinyLlama-120M-scratch//:tokenizer.json)",
"--no-llama3=true", # don't do llama3 template prompt encoding
"--sharding=false", # don't shard this
],
data = [
"@TinyLlama-120M-scratch",
"@TinyLlama-120M-scratch//:config.json",
"@TinyLlama-120M-scratch//:model.safetensors",
"@TinyLlama-120M-scratch//:tokenizer.json",
],
deps = [":llama_lib"],
)
cc_binary(
name = "Llama-3.1-8B-Instruct",
args = [
"--config=$(location @Meta-Llama-3.1-8B-Instruct//:config.json)",
"--weights=$(location @Meta-Llama-3.1-8B-Instruct//:model.safetensors.index.json)",
"--tokenizer=$(location @Meta-Llama-3.1-8B-Instruct//:tokenizer.json)",
],
data = [
"@Meta-Llama-3.1-8B-Instruct",
"@Meta-Llama-3.1-8B-Instruct//:config.json",
"@Meta-Llama-3.1-8B-Instruct//:model.safetensors.index.json",
"@Meta-Llama-3.1-8B-Instruct//:tokenizer.json",
],
deps = [":llama_lib"],
)
cc_binary(
name = "Llama-3.1-70B-Instruct",
args = [
"--config=$(location @Meta-Llama-3.1-70B-Instruct//:config.json)",
"--weights=$(location @Meta-Llama-3.1-70B-Instruct//:model.safetensors.index.json)",
"--tokenizer=$(location @Meta-Llama-3.1-70B-Instruct//:tokenizer.json)",
],
data = [
"@Meta-Llama-3.1-70B-Instruct",
"@Meta-Llama-3.1-70B-Instruct//:config.json",
"@Meta-Llama-3.1-70B-Instruct//:model.safetensors.index.json",
"@Meta-Llama-3.1-70B-Instruct//:tokenizer.json",
],
deps = [":llama_lib"],
)
cc_binary(
name = "Llama-3.2-1B-Instruct",
args = [
"--config=$(location @Meta-Llama-3.2-1B-Instruct//:config.json)",
"--weights=$(location @Meta-Llama-3.2-1B-Instruct//:model.safetensors)",
"--tokenizer=$(location @Meta-Llama-3.2-1B-Instruct//:tokenizer.json)",
],
data = [
"@Meta-Llama-3.2-1B-Instruct",
"@Meta-Llama-3.2-1B-Instruct//:config.json",
"@Meta-Llama-3.2-1B-Instruct//:model.safetensors",
"@Meta-Llama-3.2-1B-Instruct//:tokenizer.json",
],
deps = [":llama_lib"],
)
cc_binary(
name = "Llama-3.2-3B-Instruct",
args = [
"--config=$(location @Meta-Llama-3.2-3B-Instruct//:config.json)",
"--weights=$(location @Meta-Llama-3.2-3B-Instruct//:model.safetensors.index.json)",
"--tokenizer=$(location @Meta-Llama-3.2-3B-Instruct//:tokenizer.json)",
],
data = [
"@Meta-Llama-3.2-3B-Instruct",
"@Meta-Llama-3.2-3B-Instruct//:config.json",
"@Meta-Llama-3.2-3B-Instruct//:model.safetensors.index.json",
"@Meta-Llama-3.2-3B-Instruct//:tokenizer.json",
],
deps = [":llama_lib"],
)
#
zig_cc_binary(
name = "test-implementation",
srcs = ["llama.zig"],
args = [
"--weights=$(location @Meta-Llama-3.1-8B-Instruct//:model.safetensors.index.json)",
"--config=$(location @Meta-Llama-3.1-8B-Instruct//:config.json)",
],
data = [
"@Meta-Llama-3.1-8B-Instruct//:model",
"@Meta-Llama-3.1-8B-Instruct//:model.safetensors.index.json",
],
main = "test.zig",
deps = [
"//third_party/tigerbeetle:flags",
"@zml//async",
"@zml//metax",
"@zml//zml",
],
)
zig_cc_binary(
name = "test_tokenizer",
main = "test_tokenizer.zig",
deps = [
"//third_party/tigerbeetle:flags",
"@zml//stdx",
"@zml//zml",
],
# Note: all Llama-3.x tokenizers are the same,
# but using the 3.2-1B version because downloading the tokenizer triggers downloading the model.
args = [
"--tokenizer=$(location @Meta-Llama-3.2-1B-Instruct//:tokenizer)",
],
data = ["@Meta-Llama-3.2-1B-Instruct//:tokenizer"],
)
mtree_spec(
name = "mtree",
srcs = [":Llama-3.2-1B-Instruct"],
)
tar(
name = "archive",
srcs = [":Llama-3.2-1B-Instruct"],
args = [
"--options",
"zstd:compression-level=9",
],
compress = "zstd",
mtree = ":mtree",
)
expand_template(
name = "entrypoint",
data = [
":Llama-3.2-1B-Instruct",
"@Meta-Llama-3.2-1B-Instruct",
"@Meta-Llama-3.2-1B-Instruct//:config.json",
"@Meta-Llama-3.2-1B-Instruct//:model.safetensors",
"@Meta-Llama-3.2-1B-Instruct//:tokenizer.json",
],
substitutions = {
":config": "$(rlocationpath @Meta-Llama-3.2-1B-Instruct//:config.json)",
":weights": "$(rlocationpath @Meta-Llama-3.2-1B-Instruct//:model.safetensors)",
":tokenizer": "$(rlocationpath @Meta-Llama-3.2-1B-Instruct//:tokenizer.json)",
},
template = [
"./{}/Llama-3.2-1B-Instruct".format(package_name()),
"--config=./{}/Llama-3.2-1B-Instruct.runfiles/:config".format(package_name()),
"--weights=./{}/Llama-3.2-1B-Instruct.runfiles/:weights".format(package_name()),
"--tokenizer=./{}/Llama-3.2-1B-Instruct.runfiles/:tokenizer".format(package_name()),
],
)
oci_image(
name = "image_",
base = "@distroless_cc_debian12_debug",
# entrypoint = ["./{}/Llama-3.2-1B-Instruct".format(package_name())],
entrypoint = ":entrypoint",
tars = [
"@zml//runtimes:layers",
":archive",
],
)
platform_transition_filegroup(
name = "image",
srcs = [":image_"],
target_platform = "@zml//platforms:linux_amd64",
)
oci_load(
name = "load",
image = ":image",
repo_tags = [
"distroless/llama-3.2-1b-instruct:latest",
],
)
oci_push(
name = "push",
image = ":image",
remote_tags = ["latest"],
repository = "index.docker.io/steeve/llama-3.2-1b-instruct",
)