Clean up aarch64 tests.

This bumps us to gcc 6 (rather than 4.8), which fully supports C++14, and migrates the Python tests to our Bazel-based system.  C#, PHP, and Ruby will remain on CMake + alternate build system for now.

PiperOrigin-RevId: 495501807
pull/11307/head
Mike Kruskal 2022-12-14 21:51:34 -08:00 committed by Copybara-Service
parent b5f83ddb90
commit 9baae6adcb
17 changed files with 56 additions and 246 deletions

View File

@ -24,13 +24,12 @@ def _arch_test_impl(
name = name,
tools = bazel_binaries,
cmd = """
for binary in "$(rootpaths %s) %s"; do
for binary in "%s"; do
(file -L $$binary | grep -q "%s") \
|| (echo "Test binary is not an %s binary: "; file -L $$binary; exit 1)
done
""" % (
" ".join(bazel_binaries),
" ".join(system_binaries),
" ".join(["$(rootpaths %s)" % b for b in bazel_binaries] + system_binaries),
file_platform,
platform,
),
@ -44,13 +43,13 @@ def _arch_test_impl(
def aarch64_test(**kwargs):
_arch_test_impl(
platform = "aarch64",
file_platform = "ELF 64-bit LSB executable, ARM aarch64",
file_platform = "ELF 64-bit LSB.* ARM aarch64",
**kwargs
)
def x86_64_test(**kwargs):
_arch_test_impl(
platform = "x86_64",
file_platform = "ELF 64-bit LSB executable, ARM x86_64",
file_platform = "ELF 64-bit LSB.*, ARM x86_64",
**kwargs
)

View File

@ -9,7 +9,6 @@ def inline_sh_binary(
tools = [],
deps = [],
cmd = "",
testonly = None,
**kwargs):
"""Bazel rule to wrap up an inline bash script in a binary.
@ -28,8 +27,6 @@ def inline_sh_binary(
deps: a list of dependency labels that are required to run this binary.
cmd: the inline sh command to run.
**kwargs: other keyword arguments that are passed to sh_binary.
testonly: common rule attribute (see:
https://bazel.build/reference/be/common-definitions#common-attributes)
"""
native.genrule(
@ -38,15 +35,16 @@ def inline_sh_binary(
exec_tools = tools,
outs = [name + ".sh"],
cmd = "cat <<'EOF' >$(OUTS)\n#!/bin/bash -exu\n%s\nEOF\n" % cmd,
testonly = testonly,
visibility = ["//visibility:private"],
tags = kwargs["tags"] if "tags" in kwargs else None,
target_compatible_with = kwargs["target_compatible_with"] if "target_compatible_with" in kwargs else None,
testonly = kwargs["testonly"] if "testonly" in kwargs else None,
)
native.sh_binary(
name = name,
srcs = [name + "_genrule"],
data = srcs + tools + deps,
testonly = testonly,
**kwargs
)
@ -83,6 +81,9 @@ def inline_sh_test(
outs = [name + ".sh"],
cmd = "cat <<'EOF' >$(OUTS)\n#!/bin/bash -exu\n%s\nEOF\n" % cmd,
visibility = ["//visibility:private"],
tags = kwargs["tags"] if "tags" in kwargs else None,
target_compatible_with = kwargs["target_compatible_with"] if "target_compatible_with" in kwargs else None,
testonly = kwargs["testonly"] if "testonly" in kwargs else None,
)
native.sh_test(

View File

@ -1,18 +0,0 @@
#!/bin/bash
#
# Builds protobuf C++ with aarch64 crosscompiler and runs a basic set of tests under an emulator.
# NOTE: This script is expected to run under the dockcross/linux-arm64 docker image.
set -ex
# the build commands are expected to run under dockcross docker image
# where the CC, CXX and other toolchain variables already point to the crosscompiler
cmake .
make -j8
# check that the resulting test binary is indeed an aarch64 ELF
(file ./tests | grep -q "ELF 64-bit LSB executable, ARM aarch64") || (echo "Test binary in not an aarch64 binary"; exit 1)
# run the basic set of C++ tests under QEMU
# there are other tests we could run (e.g. ./lite-test), but this is sufficient as a smoketest
qemu-aarch64 ./tests

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# Pin the dockcross image since newer versions of the image break the build
PINNED_DOCKCROSS_IMAGE_VERSION=dockcross/linux-arm64:20210625-795dd4d
# running dockcross image without any arguments generates a wrapper
# scripts that can be used to run commands under the dockcross image
# easily.
# See https://github.com/dockcross/dockcross#usage for details
docker run $DOCKER_TTY_ARGS --rm $PINNED_DOCKCROSS_IMAGE_VERSION >dockcross-linux-arm64.sh
chmod +x dockcross-linux-arm64.sh
# the wrapper script has CRLF line endings and bash doesn't like that
# so we change CRLF line endings into LF.
sed -i 's/\r//g' dockcross-linux-arm64.sh
# The dockcross wrapper script runs arbitrary commands under the selected dockcross
# image with the following properties which make its use very convenient:
# * the current working directory is mounted under /work so the container can easily
# access the current workspace
# * the processes in the container run under the same UID and GID as the host process so unlike
# vanilla "docker run" invocations, the workspace doesn't get polluted with files
# owned by root.
./dockcross-linux-arm64.sh --image $PINNED_DOCKCROSS_IMAGE_VERSION -- "$@"

View File

@ -1,9 +1,10 @@
#!/bin/bash
set -e
set -ex
# go to the repo root
cd $(dirname $0)/../../../..
GIT_REPO_ROOT=`pwd`
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
@ -17,24 +18,10 @@ fi
# before https://github.com/dockcross/dockcross/pull/449
# Thanks to that, wheel build with this image aren't actually
# compliant with manylinux2014, but only with manylinux_2_24
PINNED_DOCKCROSS_IMAGE_VERSION=dockcross/manylinux2014-aarch64:20200929-608e6ac
PINNED_DOCKCROSS_IMAGE_VERSION=quay.io/pypa/manylinux_2_24_aarch64
# running dockcross image without any arguments generates a wrapper
# scripts that can be used to run commands under the dockcross image
# easily.
# See https://github.com/dockcross/dockcross#usage for details
docker run $DOCKER_TTY_ARGS --rm $PINNED_DOCKCROSS_IMAGE_VERSION >dockcross-manylinux2014-aarch64.sh
chmod +x dockcross-manylinux2014-aarch64.sh
# the wrapper script has CRLF line endings and bash doesn't like that
# so we change CRLF line endings into LF.
sed -i 's/\r//g' dockcross-manylinux2014-aarch64.sh
# The dockcross wrapper script runs arbitrary commands under the selected dockcross
# image with the following properties which make its use very convenient:
# * the current working directory is mounted under /work so the container can easily
# access the current workspace
# * the processes in the container run under the same UID and GID as the host process so unlike
# vanilla "docker run" invocations, the workspace doesn't get polluted with files
# owned by root.
./dockcross-manylinux2014-aarch64.sh --image $PINNED_DOCKCROSS_IMAGE_VERSION -- "$@"
docker run -v $GIT_REPO_ROOT:/workspace --rm $PINNED_DOCKCROSS_IMAGE_VERSION /bin/bash -c "cd /workspace; git config --global --add safe.directory '*'; $@"

View File

@ -1,44 +0,0 @@
#!/bin/bash
#
# Builds protobuf python including the C++ extension with aarch64 crosscompiler.
# The outputs of this script are laid out so that we can later test them under an aarch64 emulator.
# NOTE: This script is expected to run under the dockcross/manylinux2014-aarch64 docker image.
set -ex
PYTHON="/opt/python/cp38-cp38/bin/python"
# Initialize any submodules.
git submodule update --init --recursive
# Build protoc and libprotobuf
cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_WITH_ZLIB=0 -Dprotobuf_BUILD_TESTS=OFF .
make -j8
# create a simple shell wrapper that runs crosscompiled protoc under qemu
echo '#!/bin/bash' >protoc_qemu_wrapper.sh
echo 'exec qemu-aarch64 "../protoc" "$@"' >>protoc_qemu_wrapper.sh
chmod ugo+x protoc_qemu_wrapper.sh
# PROTOC variable is by build_py step that runs under ./python directory
export PROTOC=../protoc_qemu_wrapper.sh
pushd python
# NOTE: this step will use protoc_qemu_wrapper.sh to generate protobuf files.
${PYTHON} setup.py build_py
# when crosscompiling for aarch64, --plat-name needs to be set explicitly
# to end up with correctly named wheel file
# the value should be manylinuxABC_ARCH and dockcross docker image
# conveniently provides the value in the AUDITWHEEL_PLAT env
plat_name_flag="--plat-name=$AUDITWHEEL_PLAT"
# override the value of EXT_SUFFIX to make sure the crosscompiled .so files in the wheel have the correct filename suffix
export PROTOCOL_BUFFERS_OVERRIDE_EXT_SUFFIX="$(${PYTHON} -c 'import sysconfig; print(sysconfig.get_config_var("EXT_SUFFIX").replace("-x86_64-linux-gnu.so", "-aarch64-linux-gnu.so"))')"
# Build the python extension inplace to be able to python unittests later
${PYTHON} setup.py build_ext --cpp_implementation --compile_static_extension --inplace
# Build the binary wheel (to check it with auditwheel)
${PYTHON} setup.py bdist_wheel --cpp_implementation --compile_static_extension $plat_name_flag

View File

@ -1,28 +0,0 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
cd python
PYTHON="/opt/python/cp38-cp38/bin/python"
${PYTHON} -m pip install --user pytest auditwheel numpy
# check that we are really using aarch64 python
(${PYTHON} -c 'import sysconfig; print(sysconfig.get_platform())' | grep -q "linux-aarch64") || (echo "Wrong python platform, needs to be aarch64 python."; exit 1)
# step 1: run all python unittests
# we've built the python extension previously with --inplace option
# so we can just discover all the unittests and run them directly under
# the python/ directory.
LD_LIBRARY_PATH=. PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -m pytest google/protobuf
# step 2: run auditwheel show to check that the wheel is manylinux2014 compatible.
# auditwheel needs to run on wheel's target platform (or under an emulator)
${PYTHON} -m auditwheel show dist/protobuf-*-manylinux2014_aarch64.whl
# step 3: smoketest that the wheel can be installed and run a smokecheck
${PYTHON} -m pip install dist/protobuf-*-manylinux2014_aarch64.whl
# when python cpp extension is on, simply importing a message type will trigger loading the cpp extension
PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -c 'import google.protobuf.timestamp_pb2; print("Successfully loaded the python cpp extension!")'

View File

@ -1,14 +0,0 @@
#!/bin/bash
#
# Crosscompiles protobuf C++ under dockcross docker image and runs the tests under an emulator.
set -e
# go to the repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
# run the C++ build and test script under dockcross/linux-arm64 image
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_linux_aarch64.sh kokoro/linux/aarch64/cpp_crosscompile_and_run_tests_with_qemu_aarch64.sh

View File

@ -1,32 +0,0 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile protoc as we will later need it for the java build.
# we build it under the dockcross/manylinux2014-aarch64 image so that the resulting protoc binary is compatible
# with a wide range of linux distros (including any docker images we will use later to build and test java)
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/protoc_crosscompile_aarch64.sh
# the command that will be used to build and test java under an emulator
# * IsValidUtf8Test and DecodeUtf8Test tests are skipped because they take very long under an emulator.
TEST_JAVA_COMMAND="mvn --batch-mode -DskipTests install && mvn --batch-mode -Dtest='**/*Test, !**/*IsValidUtf8Test, !**/*DecodeUtf8Test' -DfailIfNoTests=false -Dsurefire.failIfNoSpecifiedTests=false surefire:test"
# use an actual aarch64 docker image (with a real aarch64 java and maven) to run build & test protobuf java under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
# * the JAVA_OPTS and MAVEN_CONFIG variables are being set mostly to silence warnings about non-existent home directory
# and to avoid polluting the workspace.
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -e "JAVA_OPTS=-Duser.home=/home/fake-user" -e "MAVEN_CONFIG=/home/fake-user/.m2" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work arm64v8/maven:3.8-openjdk-11 bash -c "cd java && $TEST_JAVA_COMMAND"

View File

@ -1,26 +0,0 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile python extension and the binary wheel under dockcross/manylinux2014-aarch64 image
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/python_crosscompile_aarch64.sh
# once crosscompilation is done, use an actual aarch64 docker image (with a real aarch64 python) to run all the tests under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * intentionally use a different image than manylinux2014 so that we don't build and test on the same linux distribution
# (manylinux_2_24 is debian-based while manylinux2014 is centos-based)
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work quay.io/pypa/manylinux_2_24_aarch64 kokoro/linux/aarch64/python_run_tests_with_qemu_aarch64.sh

View File

@ -6,12 +6,12 @@ timeout_mins: 120
env_vars {
key: "CONTAINER_IMAGE"
value: "gcr.io/protobuf-build/emulation/linux:aarch64-4e847d7a01c1792471b6dd985ab0bf2677332e6f"
value: "gcr.io/protobuf-build/emulation/linux:aarch64-8c600b1add46ab66e9cb15b893be175fe464dcbb"
}
env_vars {
key: "BAZEL_TARGETS"
value: "//src/..."
value: "//src/... //src/google/protobuf/compiler:protoc_aarch64_test"
}
action {

View File

@ -6,12 +6,12 @@ timeout_mins: 120
env_vars {
key: "CONTAINER_IMAGE"
value: "gcr.io/protobuf-build/emulation/linux:aarch64-4e847d7a01c1792471b6dd985ab0bf2677332e6f"
value: "gcr.io/protobuf-build/emulation/linux:aarch64-8c600b1add46ab66e9cb15b893be175fe464dcbb"
}
env_vars {
key: "BAZEL_TARGETS"
value: "//java/..."
value: "//java/... //src/google/protobuf/compiler:protoc_aarch64_test"
}
action {

View File

@ -1,13 +0,0 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
kokoro/linux/aarch64/qemu_helpers/prepare_qemu.sh
kokoro/linux/aarch64/test_python_aarch64.sh

View File

@ -1,11 +1,28 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python_aarch64/build.sh"
build_file: "protobuf/kokoro/linux/bazel.sh"
timeout_mins: 120
env_vars {
key: "CONTAINER_IMAGE"
value: "gcr.io/protobuf-build/emulation/linux:aarch64-8c600b1add46ab66e9cb15b893be175fe464dcbb"
}
env_vars {
key: "BAZEL_TARGETS"
value: "//python/... //python:aarch64_test"
}
env_vars {
key: "BAZEL_EXTRA_FLAGS"
value: "--define=use_fast_cpp_protos=true "
# TODO(b/262628111) Enable this once conformance tests are fixed.
"--test_tag_filters=-conformance"
}
action {
define_artifacts {
regex: "**/sponge_log.xml"
regex: "**/sponge_log.*"
}
}

View File

@ -10,6 +10,7 @@ load("@rules_pkg//:mappings.bzl", "pkg_files", "strip_prefix")
load("@rules_python//python:defs.bzl", "py_library")
load("@pip_deps//:requirements.bzl", "requirement")
load("//:protobuf.bzl", "internal_py_proto_library")
load("//build_defs:arch_tests.bzl", "aarch64_test", "x86_64_test")
load("//build_defs:cpp_opts.bzl", "COPTS")
load("//conformance:defs.bzl", "conformance_test")
load(":internal.bzl", "internal_copy_files", "internal_py_test")
@ -118,6 +119,22 @@ cc_binary(
}),
)
aarch64_test(
name = "aarch64_test",
bazel_binaries = [
"google/protobuf/internal/_api_implementation.so",
"google/protobuf/pyext/_message.so",
],
)
x86_64_test(
name = "x86_64_test",
bazel_binaries = [
"google/protobuf/internal/_api_implementation.so",
"google/protobuf/pyext/_message.so",
],
)
py_library(
name = "python_srcs",
srcs = glob(

View File

@ -168,8 +168,8 @@ set(libprotobuf_hdrs
${protobuf_SOURCE_DIR}/src/google/protobuf/reflection_ops.h
${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_field.h
${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_ptr_field.h
${protobuf_SOURCE_DIR}/src/google/protobuf/serial_arena.h
${protobuf_SOURCE_DIR}/src/google/protobuf/service.h
${protobuf_SOURCE_DIR}/src/google/protobuf/serial_arena.h
${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/callback.h
${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/common.h
${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/logging.h

View File

@ -145,7 +145,7 @@ struct ArenaAlign {
constexpr size_t Ceil(size_t n) const { return (n + align - 1) & -align; }
constexpr size_t Floor(size_t n) const { return (n & ~(align - 1)); }
size_t Padded(size_t n) const {
constexpr size_t Padded(size_t n) const {
// TODO(mvels): there are direct callers of AllocateAligned() that violate
// `size` being a multiple of `align`: that should be an error / assert.
// ABSL_ASSERT(IsAligned(n));