forked from tensorflow/quantum
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfigure.sh
More file actions
executable file
·282 lines (237 loc) · 9.9 KB
/
configure.sh
File metadata and controls
executable file
·282 lines (237 loc) · 9.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
#!/bin/bash
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -uo pipefail
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
ARCH=$(uname -m)
# --- helpers ---------------------------------------------------------------
function write_bazelrc() {
echo "${1}" >> .bazelrc
}
function write_tf_rc() {
echo "${1}" >> .tf_configure.bazelrc
}
function die() {
echo "ERROR: $*" >&2
exit 1
}
function is_macos() {
[[ "${PLATFORM}" == "darwin" ]]
}
function is_windows() {
[[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]]
}
function inside_docker() {
if [[ -f /.dockerenv ]]; then
return 1
elif [[ "${PLATFORM}" = "linux" ]] && grep -q docker /proc/1/cgroup; then
return 1
else
return 0
fi
}
function write_legacy_python_repo() {
mkdir -p third_party/python_legacy
# empty WORKSPACE
cat > third_party/python_legacy/WORKSPACE <<'EOF'
# AUTOGENERATED by configure.sh.
# This file is intentionally empty.
EOF
# simple BUILD that exports defs.bzl
cat > third_party/python_legacy/BUILD <<'EOF'
# AUTOGENERATED by configure.sh.
package(default_visibility = ["//visibility:public"])
exports_files(["defs.bzl"])
EOF
# defs.bzl MUST define 'interpreter' as a string, not a function.
# We also export py_runtime to satisfy older loads.
cat > third_party/python_legacy/defs.bzl <<EOF
# AUTOGENERATED by configure.sh.
load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair")
# Absolute path to the python interpreter Bazel/TF should use:
interpreter = "${PYTHON_BIN_PATH}"
py_runtime = native.py_runtime
EOF
echo
echo "Created third_party/python_legacy."
echo "Python interpreter = ${PYTHON_BIN_PATH}"
}
# --- parse args ------------------------------------------------------------
USER_PY=""
for arg in "$@"; do
case "$arg" in
--python=*) USER_PY="${arg#--python=}" ;;
*) echo "Unknown arg: $arg" ;;
esac
done
# --- choose interpreter (venv/conda/system) --------------------------------
if [[ -n "${USER_PY}" ]]; then
# 1) Explicit --python=... flag
PY="${USER_PY}"
elif [[ -n "${PYTHON_BIN_PATH:-}" ]]; then
# 2) Explicit environment override
PY="${PYTHON_BIN_PATH}"
elif [[ -n "${CONDA_PREFIX:-}" && -x "${CONDA_PREFIX}/bin/python" ]]; then
# 3) Conda environment python, if available
PY="${CONDA_PREFIX}/bin/python"
else
# 4) Fallback: system python3, but require >= 3.10
if ! command -v python3 >/dev/null 2>&1; then
die "python3 not found. Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH."
fi
if ! python3 - <<'PY'
import sys
raise SystemExit(0 if sys.version_info[:2] >= (3, 10) else 1)
PY
then
die "Python 3.10+ required for TensorFlow Quantum, but found " \
"$(python3 -V 2>&1). Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH."
fi
PY="$(command -v python3)"
fi
# Normalize to an absolute path. Use Python to print sys.executable because
# tools like pyenv use shim scripts that readlink would resolve to the script
# itself, not the actual interpreter binary.
PY_ABS="$("${PY}" -c 'import os,sys; print(os.path.abspath(sys.executable))')"
PYTHON_BIN_PATH="${PY_ABS}"
# --- identify ourselves (useful mainly when redirecting output) ------------
echo "Configuring TensorFlow Quantum build."
info_string="Running on a ${ARCH} $(uname -s) system"
inside_docker && echo "${info_string}." || echo "${info_string} inside Docker."
echo
# --- choose CPU/GPU like upstream script (default CPU) ---------------------
TF_NEED_CUDA=""
y_for_cpu='Build against TensorFlow CPU backend? (Type n to use GPU) [Y/n] '
while [[ -z "${TF_NEED_CUDA}" ]]; do
read -p "${y_for_cpu}" INPUT || true
case "${INPUT:-Y}" in
[Yy]* ) echo "CPU build selected."; TF_NEED_CUDA=0;;
[Nn]* ) echo "GPU build selected."; TF_NEED_CUDA=1;;
* ) echo "Please answer y or n.";;
esac
done
# For TF >= 2.1 this value isn’t actually consulted by TFQ,
# but we keep a compatible prompt/flag.
TF_CUDA_VERSION="12"
# --- sanity: python is importable and has TF -------------------------------
if [[ ! -x "${PYTHON_BIN_PATH}" ]]; then
die "${PYTHON_BIN_PATH} not found/executable."
fi
# Ensure TF is importable from system python (user should have installed it).
echo "Next, you may see warnings printed by loading TensorFlow packages."
echo "Do not be alarmed unless there are errors."
tf_output=$("${PYTHON_BIN_PATH}" - <<'PY'
import sys
import os
import glob
try:
import tensorflow as tf
import tensorflow.sysconfig as sc
except ImportError:
sys.exit(1)
print(sc.get_include())
lib_path = sc.get_lib()
lib_dir = lib_path if os.path.isdir(lib_path) else os.path.dirname(lib_path)
print(lib_dir)
cands = (glob.glob(os.path.join(lib_dir, 'libtensorflow_framework.so*')) or
glob.glob(os.path.join(lib_dir, 'libtensorflow.so*')) or
glob.glob(os.path.join(lib_dir, '_pywrap_tensorflow_internal.*')))
print(os.path.basename(cands[0]) if cands else 'libtensorflow_framework.so.2')
PY
)
if [[ $? -ne 0 ]]; then
echo "ERROR: tensorflow not importable by Python (${PYTHON_BIN_PATH})" >&2
exit 1
fi
{
read -r HDR
read -r LIBDIR
read -r LIBNAME
} <<< "${tf_output}"
echo
echo "Configuration values detected:"
echo " PYTHON_BIN_PATH=${PYTHON_BIN_PATH}"
echo " TF_HEADER_DIR=${HDR}"
echo " TF_SHARED_LIBRARY_DIR=${LIBDIR}"
echo " TF_SHARED_LIBRARY_NAME=${LIBNAME}"
# --- start fresh -----------------------------------------------------------
rm -f .bazelrc .tf_configure.bazelrc
# --- write .tf_configure.bazelrc (repo_env for repository rules) -----------
write_tf_rc "build --repo_env=PYTHON_BIN_PATH=${PYTHON_BIN_PATH}"
write_tf_rc "build --repo_env=TF_HEADER_DIR=${HDR}"
write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_DIR=${LIBDIR}"
write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_NAME=${LIBNAME}"
write_tf_rc "build --repo_env=TF_NEED_CUDA=${TF_NEED_CUDA}"
# Make sure repo rules and sub-config see legacy Keras (keras 2 instead of Keras 3)
write_tf_rc "build --repo_env=TF_USE_LEGACY_KERAS=1"
# --- write third_party/python_legacy/ with interpreter --------------------
write_legacy_python_repo
# --- write .bazelrc (imports TF config usual flags) -----------------
write_bazelrc "# WARNING: this file (.bazelrc) is AUTOGENERATED and overwritten"
write_bazelrc "# when configure.sh runs. Put customizations in .bazelrc.user."
write_bazelrc ""
write_bazelrc "try-import %workspace%/.tf_configure.bazelrc"
write_bazelrc "common --experimental_repo_remote_exec"
write_bazelrc "build --spawn_strategy=standalone"
write_bazelrc "build --strategy=Genrule=standalone"
write_bazelrc "build -c opt"
write_bazelrc "build --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=1"
write_bazelrc "build --cxxopt=-std=c++17"
write_bazelrc "build --action_env=TF_USE_LEGACY_KERAS=1"
write_bazelrc "build --action_env=PYTHON_BIN_PATH=${PYTHON_BIN_PATH}"
# rpath so the dynamic linker finds TF’s shared lib
if ! is_windows; then
write_bazelrc "build --linkopt=-Wl,-rpath,${LIBDIR}"
fi
write_bazelrc ""
# The following supressions are for warnings coming from external dependencies.
# They're most likely inconsequential or false positives. Since we can't fix
# them, we suppress the warnings to reduce noise. Note: single quotes are needed
# for the first two so that the $ anchors are preserved in the .bazelrc file.
write_bazelrc 'build --per_file_copt=external/.*[.]c$@-Wno-deprecated-non-prototype'
write_bazelrc 'build --host_per_file_copt=external/.*[.]c$@-Wno-deprecated-non-prototype'
write_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"
write_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"
write_bazelrc "build --per_file_copt=external/eigen/.*@-Wno-maybe-uninitialized"
write_bazelrc "build --host_per_file_copt=external/eigen/.*@-Wno-maybe-uninitialized"
# The following warnings come from qsim.
# TODO: fix the code in qsim & update TFQ to use the updated version.
write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"
write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"
# CUDA toggle
if [[ "${TF_NEED_CUDA}" == "1" ]]; then
write_tf_rc "build --repo_env=TF_CUDA_VERSION=${TF_CUDA_VERSION}"
write_bazelrc ""
write_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true"
write_bazelrc "build:cuda --@local_config_cuda//:enable_cuda"
write_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain"
if is_windows; then
write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
else
write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=/usr/lib/x86_64-linux-gnu"
write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=/usr/local/cuda"
fi
write_bazelrc "build --config=cuda"
write_bazelrc "test --config=cuda"
else
write_bazelrc "build --define=using_cuda=false"
fi
# Follow TensorFlow's approach and load an optional user bazelrc file.
write_bazelrc ""
write_bazelrc "try-import %workspace%/.bazelrc.user"
echo "Wrote .tf_configure.bazelrc and .bazelrc successfully."