Building pytorch 1.8.1 from source code in docker container

This is my docker script

FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu16.04 

ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
    && apt-get install -y build-essential \
    && apt-get install -y ca-certificates \
    && apt-get install -y ccache \
    && apt-get install -y cmake \
    && apt-get install -y curl \
    && apt-get install -y file \
    && apt-get install -y sudo \
    && apt-get install -y git \
    && apt-get install -y locales 
    # && locale-gen ja_JP.UTF-8
# ENV LANG ja_JP.UTF-8
# ENV LANGUAGE ja_JP:ja
# ENV LC_ALL=ja_JP.UTF-8
# RUN localedef -f UTF-8 -i ja_JP ja_JP.utf8

# Install nodejs
# RUN curl -sL https://deb.nodesource.com/setup_14.x | sed ’s|https://|http://|’ | bash - \
#     && sudo apt-get install -y nodejs

# Install MeCab, IPA, NEologd
# RUN apt-get install -y mecab \
#     && apt-get install -y libmecab-dev \
#     && apt-get install -y mecab-ipadic \
#     && apt-get install -y mecab-ipadic-utf8
# RUN git clone --depth 1 https://github.com/neologd/mecab-ipadic-neologd.git \
#     && mecab-ipadic-neologd/bin/install-mecab-ipadic-neologd -n -y \
#     && cp /etc/mecabrc /usr/local/etc/

# remove files
RUN apt-get clean \
    && rm -rf /var/lib/apt/lists/*

# install miniforge
RUN  curl -LO https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh && \
bash Miniforge3-Linux-x86_64.sh -b -p /opt/conda && \
rm Miniforge3-Linux-x86_64.sh

# Add Miniforge to the PATH
ENV PATH="/opt/conda/bin:$PATH"

# install packages by pip
# COPY requirements_pip.txt /tmp
# COPY requirements.txt /tmp
# RUN pip3 install --upgrade pip \
#     && pip3 install --no-cache-dir -r /tmp/requirements_pip.txt \
#     && rm -rf ~/.cache/pip

# install packages by conda
    RUN conda install python=3.8\
        && conda install astunparse numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests dataclasses \
        && conda install -c pytorch magma-cuda101 \
        && conda clean --all

# install torch from source
RUN git clone  https://github.com/pytorch/pytorch  && \
    cd pytorch && \
    git checkout v1.8.1 && \
    git submodule sync && \
    git submodule update --init --recursive

WORKDIR 

ENV PYTORCH_BUILD_VERSION=1.8.1
ENV PYTORCH_BUILD_NUMBER=1
ENV USE_CUDA=1 USE_CUDNN=1
ENV TORCH_CUDA_ARCH_LIST="3.5" TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
ENV MAX_JOBS=2
RUN python setup.py clean \
    && python setup.py install

I am getting this error

1/242] Building CXX object test_jit/CMakeFiles/test_jit.dir/test_irparser.cpp.o
FAILED: test_jit/CMakeFiles/test_jit.dir/test_irparser.cpp.o 
/usr/bin/ccache /usr/bin/c++ -DHAVE_MALLOC_USABLE_SIZE=1 -DHAVE_MMAP=1 -DHAVE_SHM_OPEN=1 -DHAVE_SHM_UNLINK=1 -DIDEEP_USE_MKL -DMAGMA_V2 -DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 -DONNX_NAMESPACE=onnx_torch -DTH_BLAS_MKL -DUSE_CUDA -DUSE_EXTERNAL_MZCRC -DUSE_GTEST -D_FILE_OFFSET_BITS=64 -I/pytorch/build/aten/src -I/pytorch/aten/src -I/pytorch/build -I/pytorch -I/pytorch/cmake/../third_party/benchmark/include -I/pytorch/third_party/onnx -I/pytorch/build/third_party/onnx -I/pytorch/third_party/foxi -I/pytorch/build/third_party/foxi -I/pytorch/build/caffe2/../aten/src -I/pytorch/build/caffe2/../aten/src/ATen -I/pytorch/torch/csrc/api -I/pytorch/torch/csrc/api/include -I/pytorch/c10/.. -I/pytorch/c10/cuda/../.. -isystem /pytorch/build/third_party/gloo -isystem /pytorch/cmake/../third_party/gloo -isystem /pytorch/cmake/../third_party/googletest/googlemock/include -isystem /pytorch/cmake/../third_party/googletest/googletest/include -isystem /pytorch/third_party/protobuf/src -isystem /opt/conda/include -isystem /pytorch/third_party/gemmlowp -isystem /pytorch/third_party/neon2sse -isystem /pytorch/third_party/XNNPACK/include -isystem /pytorch/third_party -isystem /pytorch/cmake/../third_party/eigen -isystem /opt/conda/include/python3.8 -isystem /opt/conda/lib/python3.8/site-packages/numpy/core/include -isystem /pytorch/cmake/../third_party/pybind11/include -isystem /pytorch/cmake/../third_party/cub -isystem /pytorch/third_party/ideep/mkl-dnn/include -isystem /pytorch/third_party/ideep/include -isystem /usr/local/cuda/include -isystem /pytorch/third_party/googletest/googletest/include -isystem /pytorch/third_party/googletest/googletest -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -DHAVE_AVX_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION -O3 -DNDEBUG -DNDEBUG -std=gnu++14 -fPIE -DCAFFE2_USE_GLOO -DCUDA_HAS_FP16=1 -DHAVE_GCC_GET_CPUID -DUSE_AVX -DUSE_AVX2 -DTH_HAVE_THREAD -pthread -MD -MT test_jit/CMakeFiles/test_jit.dir/test_irparser.cpp.o -MF test_jit/CMakeFiles/test_jit.dir/test_irparser.cpp.o.d -o test_jit/CMakeFiles/test_jit.dir/test_irparser.cpp.o -c /pytorch/test/cpp/jit/test_irparser.cpp
/pytorch/test/cpp/jit/test_irparser.cpp:243:2: warning: missing terminating " character
   EXPECT_ANY_THROW(checkRoundtrip(
  ^
/pytorch/test/cpp/jit/test_irparser.cpp:243:2: error: missing terminating " character
/pytorch/test/cpp/jit/test_irparser.cpp:249:1: error: stray ‘\’ in program
   return (%3)
 ^
/pytorch/test/cpp/jit/test_irparser.cpp:304:2: warning: missing terminating " character
   EXPECT_ANY_THROW(parseIR(
  ^
/pytorch/test/cpp/jit/test_irparser.cpp:304:2: error: missing terminating " character
/pytorch/test/cpp/jit/test_irparser.cpp:307:1: error: stray ‘\’ in program
   return (%a)
 ^
/pytorch/test/cpp/jit/test_irparser.cpp: In member function ‘virtual void torch::jit::IRParserTest_MalformedShapeAnnotation_Test::TestBody()’:
/pytorch/test/cpp/jit/test_irparser.cpp:243:180: error: expected primary-expression before ‘(’ token
/pytorch/test/cpp/jit/test_irparser.cpp:244:1: error: expected ‘)’ before ‘graph’
       R"IR(
 ^
/pytorch/test/cpp/jit/test_irparser.cpp: In member function ‘virtual void torch::jit::IRParserTest_MalformedStrides_Test::TestBody()’:
/pytorch/test/cpp/jit/test_irparser.cpp:304:182: error: expected primary-expression before ‘(’ token
/pytorch/test/cpp/jit/test_irparser.cpp:305:1: error: expected ‘)’ before ‘graph’
       R"IR(
 ^
ninja: build stopped: subcommand failed.
Traceback (most recent call last):
  File "setup.py", line 818, in <module>
    build_deps()
  File "setup.py", line 315, in build_deps
    build_caffe2(version=version,
  File "/pytorch/tools/build_pytorch_libs.py", line 58, in build_caffe2
    cmake.build(my_env)
  File "/pytorch/tools/setup_helpers/cmake.py", line 345, in build
    self.run(build_args, my_env)
  File "/pytorch/tools/setup_helpers/cmake.py", line 140, in run
    check_call(command, cwd=self.build_dir, env=env)
  File "/opt/conda/lib/python3.8/subprocess.py", line 364, in check_call
    raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['cmake', '--build', '.', '--target', 'install', '--config', 'Release', '--', '-j', '1']' returned non-zero exit status 1.

I am not able to find out what can be the issue ?

I was having a different, but similar issue. This ran for me:

# Install miniconda
# conda at end of path in order to use OS python in container
ENV PATH="${PATH}:/root/miniconda3/bin"
ARG PATH="${PATH}:/root/miniconda3/bin"

RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
    && mkdir /root/.conda \
    && bash Miniconda3-latest-Linux-x86_64.sh -b \
    && rm -f Miniconda3-latest-Linux-x86_64.sh \
    && conda update conda \
    && conda install astunparse numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests dataclasses && \
    conda install -y intel::mkl-static intel::mkl-include && \
    conda install -y -c pytorch magma-cuda121

ENV TORCH_CUDA_ARCH_LIST="6.1"
ENV CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}

RUN git clone --recursive https://github.com/pytorch/pytorch && \
    cd pytorch && \
    make triton && \
    # git checkout v2.2.0 && \
    # if you are updating an existing checkout
    git submodule sync && \
    git submodule update --init --recursive && \
    python setup.py develop

The main difference being make triton after you clone the repo.