Script started on Mon 14 Oct 2019 10:51:34 AM CDT

]0;eae@ducc-saicluster-gpu-801 ~]$ podman info
host:
  BuildahVersion: 1.9.0
  Conmon:
    package: podman-1.4.4-4.el7.centos.x86_64
    path: /usr/libexec/podman/conmon
    version: 'conmon version 0.3.0, commit: unknown'
  Distribution:
    distribution: '"centos"'
    version: "7"
  MemFree: 52633743360
  MemTotal: 63154479104
  OCIRuntime:
    package: runc-1.0.0-65.rc8.el7.centos.x86_64
    path: /usr/bin/runc
    version: 'runc version spec: 1.0.1-dev'
  SwapFree: 2146758656
  SwapTotal: 2146758656
  arch: amd64
  cpus: 8
  hostname: ducc-saicluster-gpu-801.sl.cloud9.ibm.com
  kernel: 3.10.0-1062.1.2.el7.x86_64
  os: linux
  rootless: true
  uptime: 145h 40m 2.43s (Approximately 6.04 days)
registries:
  blocked: null
  insecure: null
  search:
  - registry.access.redhat.com
  - docker.io
  - registry.fedoraproject.org
  - quay.io
  - registry.centos.org
store:
  ConfigFile: /home/eae/.config/containers/storage.conf
  ContainerStore:
    number: 0
  GraphDriverName: vfs
  GraphOptions: null
  GraphRoot: /tmp/eae/containers/storage
  GraphStatus: {}
  ImageStore:
    number: 2
  RunRoot: /run/user/13642
  VolumePath: /tmp/eae/containers/storage/volumes


]0;eae@ducc-saicluster-gpu-801:~ [eae@ducc-saicluster-gpu-801 ~]$ cat .config/containers/libpod.conf
volume_path = "/tmp/eae/containers/storage/volumes"
image_default_transport = "docker://"
runtime = "runc"
conmon_path = ["/usr/libexec/podman/conmon", "/usr/local/lib/podman/conmon", "/usr/bin/conmon", "/usr/sbin/conmon", "/usr/local/bin/conmon", "/usr/local/sbin/conmon"]
conmon_env_vars = ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
cgroup_manager = "cgroupfs"
init_path = "/usr/libexec/podman/catatonit"
static_dir = "/tmp/eae/containers/storage/libpod"
tmp_dir = "/run/user/13642/libpod/tmp"
max_log_size = -1
no_pivot_root = false
cni_config_dir = "/etc/cni/net.d/"
cni_plugin_dir = ["/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"]
infra_image = "k8s.gcr.io/pause:3.1"
infra_command = "/pause"
enable_port_reservation = true
label = true
network_cmd_path = ""
num_locks = 2048
events_logger = "journald"
EventsLogFilePath = ""
detach_keys = "ctrl-p,ctrl-q"
hooks_dir = ["/usr/share/containers/oci/hooks.d"]
[runtimes]
  runc = ["/usr/bin/runc", "/usr/sbin/runc", "/usr/local/bin/runc", "/usr/local/sbin/runc", "/sbin/runc", "/bin/runc", "/usr/lib/cri-o-runc/sbin/runc"]


]0;eae@ducc-saicluster-gpu-801:~ [eae@ducc-saicluster-gpu-801 ~]$ cat /usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
{
    "version": "1.0.0",
    "hook": {
        "path": "/usr/bin/nvidia-container-toolkit",
        "args": ["nvidia-container-toolkit", "prestart"],
        "env": [
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
        ]
    },
    "when": {
        "always": true,
"commands": [".*"]
    },
    "stages": ["prestart"]
}


[eae@ducc-saicluster-gpu-801 ~]$ podman --log-level=debug  run  --rm nvidia/cuda nvidia-smi
[36mINFO [0m[0000] running as rootless                          
[37mDEBU [0m[0000] Initializing boltdb state at /tmp/eae/containers/storage/libpod/bolt_state.db
[37mDEBU [0m[0000] Using graph driver vfs                      
[37mDEBU [0m[0000] Using graph root /tmp/eae/containers/storage
[37mDEBU [0m[0000] Using run root /run/user/13642              
[37mDEBU [0m[0000] Using static dir /tmp/eae/containers/storage/libpod
[37mDEBU [0m[0000] Using tmp dir /run/user/13642/libpod/tmp    
[37mDEBU [0m[0000] Using volume path /tmp/eae/containers/storage/volumes
[37mDEBU [0m[0000] Set libpod namespace to ""                  
[37mDEBU [0m[0000] [graphdriver] trying provided driver "vfs"  
[37mDEBU [0m[0000] Initializing event backend journald          
[37mDEBU [0m[0000] parsed reference into "[vfs@/tmp/eae/containers/storage+/run/user/13642]docker.io/nvidia/cuda:latest"
[37mDEBU [0m[0000] parsed reference into "[vfs@/tmp/eae/containers/storage+/run/user/13642]@946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] exporting opaque data as blob "sha256:946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] parsed reference into "[vfs@/tmp/eae/containers/storage+/run/user/13642]@946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] exporting opaque data as blob "sha256:946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] parsed reference into "[vfs@/tmp/eae/containers/storage+/run/user/13642]@946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] Got mounts: []                              
[37mDEBU [0m[0000] Got volumes: []                              
[37mDEBU [0m[0000] Using slirp4netns netmode                    
[37mDEBU [0m[0000] created OCI spec and options for new container
[37mDEBU [0m[0000] Allocated lock 0 for container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8
[37mDEBU [0m[0000] parsed reference into "[vfs@/tmp/eae/containers/storage+/run/user/13642]@946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0000] exporting opaque data as blob "sha256:946e78c7b2984354477ae4b75bf519940f4df648c092564d1d9c83ea8c92c8f3"
[37mDEBU [0m[0009] created container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8"
[37mDEBU [0m[0009] container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8" has work directory "/tmp/eae/containers/storage/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata"
[37mDEBU [0m[0009] container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8" has run directory "/run/user/13642/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata"
[37mDEBU [0m[0009] New container created "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8"
[37mDEBU [0m[0009] container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8" has CgroupParent "/libpod_parent/libpod-75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8"
[37mDEBU [0m[0009] Not attaching to stdin                      
[37mDEBU [0m[0009] mounted container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8" at "/tmp/eae/containers/storage/vfs/dir/dfce96c6e34e6c12aad6da967a42c56db04a30664bf8c6a081ee5efb1dcb7b19"
[37mDEBU [0m[0009] Created root filesystem for container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 at /tmp/eae/containers/storage/vfs/dir/dfce96c6e34e6c12aad6da967a42c56db04a30664bf8c6a081ee5efb1dcb7b19
[37mDEBU [0m[0009] /etc/system-fips does not exist on host, not mounting FIPS mode secret
[37mDEBU [0m[0009] reading hooks from /usr/share/containers/oci/hooks.d
[37mDEBU [0m[0009] added hook /usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
[37mDEBU [0m[0009] hook oci-nvidia-hook.json matched; adding to stages [prestart]
[37mDEBU [0m[0009] Created OCI spec for container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 at /tmp/eae/containers/storage/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata/config.json
[37mDEBU [0m[0009] /usr/libexec/podman/conmon messages will be logged to syslog
[37mDEBU [0m[0009] running conmon: /usr/libexec/podman/conmon     [37margs [0m="[-c 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 -u 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 -n flamboyant_pare -r /usr/bin/runc -b /tmp/eae/containers/storage/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata -p /run/user/13642/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata/pidfile --exit-dir /run/user/13642/libpod/tmp/exits --conmon-pidfile /run/user/13642/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /tmp/eae/containers/storage --exit-command-arg --runroot --exit-command-arg /run/user/13642 --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg cgroupfs --exit-command-arg --tmpdir --exit-command-arg /run/user/13642/libpod/tmp --exit-command-arg --runtime --exit-command-arg runc --exit-command-arg --storage-driver --exit-command-arg vfs --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 --socket-dir-path /run/user/13642/libpod/tmp/socket -l k8s-file:/tmp/eae/containers/storage/vfs-containers/75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8/userdata/ctr.log --log-level debug --syslog]"
[33mWARN [0m[0009] Failed to add conmon to cgroupfs sandbox cgroup: error creating cgroup for blkio: mkdir /sys/fs/cgroup/blkio/libpod_parent: permission denied
[conmon:d]: failed to write to /proc/self/oom_score_adj: Permission denied

[37mDEBU [0m[0010] Received container pid: -1                  
[37mDEBU [0m[0010] Cleaning up container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8
[37mDEBU [0m[0010] Network is already cleaned up, skipping...  
[37mDEBU [0m[0010] unmounted container "75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8"
[37mDEBU [0m[0010] Cleaning up container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8
[37mDEBU [0m[0010] Network is already cleaned up, skipping...  
[37mDEBU [0m[0010] Container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 storage is already unmounted, skipping...
[37mDEBU [0m[0010] Container 75bb8e197bea3d0c56f5060ab5e1388a1bdcab354e9820bd5554d3bf273a54d8 storage is already unmounted, skipping...
[31mERRO [0m[0010] container_linux.go:345: starting container process caused "process_linux.go:430: container init caused \"process_linux.go:413: running prestart hook 0 caused \\\"error running hook: exit status 1, stdout: , stderr: nvidia-container-cli: mount error: open failed: /sys/fs/cgroup/devices/user.slice/devices.allow: permission denied\\\\n\\\"\""
: OCI runtime error


Script done on Mon 14 Oct 2019 10:53:23 AM CDT