Code for a hand
Write Image (Jetson Nano 2gb) to SD Card using balenaEtcher
sudo docker pull nvcr.io/nvidia/l4t-ml:r32.7.1-py3mkdir ~/l4t-datalsusbsudo docker run --runtime nvidia -it --network host --volume ~/l4t-data --device /dev/video0 nvcr.io/nvidia/l4t-ml:r32.7.1-py3After Jupyter Notebook is running, open a terminal and run the following commands:
git clone https://github.com/NVIDIA-AI-IOT/torch2trtcd torch2trtpython3 setup.py install --pluginspip3 install tqdm cython pycocotoolsapt-get install python3-matplotlibcd /git clone https://github.com/NVIDIA-AI-IOT/trt_posecd trt_posepython3 setup.py installcd /git clone https://github.com/NVIDIA-AI-IOT/jetcamcd jetcampython3 setup.py installcd /git clone https://github.com/NVIDIA-AI-IOT/trt_pose_hand.gitcd trt_pose_handpip3 install scikit-learnsudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video* \
--device /dev/ttyUSB* \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7\
python3 gesture_classification_print.pysudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video0 \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7 \
python3 gesture_classification_print.pysudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video0 \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7 \xhost +local:docker