0

I compiled the tensorrt python binding for a specific version of python. It doesn't have other python dependencies. Now I would like to install the compiled .whl file in a yocto image. I tried with the following recipe:

SUMMARY = "NVIDIA® TensorRT™, an SDK for high-performance deep learning inference, includes a deep learning inference optimizer and runtime that delivers low latency and high throughput for inference applications."
HOMEPAGE = "https://github.com/NVIDIA/TensorRT"

SRC_URI = "file://tensorrt-8.2.1.9-cp38-none-linux_aarch64.whl"

LICENSE = "Proprietary"
LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/Proprietary;md5=0557f9d92cf58f2ccdd50f62f8ac0b28"
                                                       
DEPENDS += "python3 python3-pip"

FILES_${PN} += "\
    ${libdir}/${PYTHON_DIR}/site-packages/* \
"

do_install() {                                                                                      
    pip install ${S}/tensorrt-8.2.1.9-cp38-none-linux_aarch64.whl
}

Unfortunately the recipe fails at the do_install step: ERROR: Execution of '/home/user/Desktop/tegra-demo-distro/build/tmp/work/aarch64-oe4t-linux/tensorrt/8.2.1-r0/temp/run.do_install.56472' failed with exit code 127

Damien
  • 921
  • 4
  • 13
  • 31
  • https://stackoverflow.com/questions/48660051/yocto-recipe-python-whl-package - This question has some example recipes about installing a local whl file. Basically you have to extract it, and then copy the pieces to the final locations. – skandigraun Jun 17 '23 at 10:04
  • I'm able to replicate that setup (they download it from somewhere). But i'm not able to do the same thing with a local wheel – Damien Jun 17 '23 at 16:06
  • Note that they download it, and change its extension to zip (`downloadfilename=...` in SRC_URI), to have it extracted by bitbake automatically. Try to change the extension to `.zip` instead of keeping as `.whl` – skandigraun Jun 17 '23 at 16:32

1 Answers1

0

I solved by manually renaming the .whl file to .zip and using the following recipe:

SUMMARY = "NVIDIA® TensorRT™, an SDK for high-performance deep learning inference, includes a deep learning inference optimizer and runtime that delivers low latency and high throughput for inference applications."
HOMEPAGE = "https://github.com/NVIDIA/TensorRT"

SRC_URI = "file://tensorrt-${PV}-cp38-none-linux_aarch64.zip"

SRC_URI[md5sum] = "184c1081fcdd1089540814a4e098a726"                                                
SRC_URI[sha256sum] = "920f964ee462b7326e09b0bfd518d5adff7a42e224b714fb44fcbe3e8df9cd84"             

inherit python3-dir

LICENSE = "Proprietary"
LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/Proprietary;md5=0557f9d92cf58f2ccdd50f62f8ac0b28"

do_unpack[depends] += "unzip-native:do_populate_sysroot"
                                                       
DEPENDS += "python3"

FILES_${PN} += "\
    ${libdir}/${PYTHON_DIR}/site-packages/tensorrt \
    ${libdir}/${PYTHON_DIR}/site-packages/tensorrt-${PV}.dist-info \
"

do_install() {
    install -d ${D}${libdir}/${PYTHON_DIR}/site-packages/tensorrt
    install -d ${D}${libdir}/${PYTHON_DIR}/site-packages/tensorrt-${PV}.dist-info

    install -m 644 ${WORKDIR}/tensorrt/* ${D}${libdir}/${PYTHON_DIR}/site-packages/tensorrt/
    install -m 644 ${WORKDIR}/tensorrt-${PV}.dist-info/* ${D}${libdir}/${PYTHON_DIR}/site-packages/tensorrt-${PV}.dist-info/
}
Damien
  • 921
  • 4
  • 13
  • 31