Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
k3ai is a lightweight infrastructure-in-a-box solution specifically built to
install and configure AI tools and platforms in production environments on Edge
and IoT devices as easily as local test environments.
Usage:
k3ai [command]
Available Commands:
apply Apply a plugin or a plugin group
delete Delete a plugin or a plugin group
help Help about any command
init Initialize K3ai Client
list List all plugins or plugin groups
version Print CLI version
Flags:
-h, --help help for k3ai-cli
--repo string URI for the plugins repository.
Use "k3ai-cli [command] --help" for more information about a command.Apply a plugin or a plugin group
Usage:
k3ai apply <plugin_name> [flags]
Flags:
-g, --group Apply a plugin group
-h, --help help for apply
Global Flags:
--repo string URI for the plugins repository.
(default "https://api.github.com/repos/kf5i/k3ai-plugins/contents/core/")Initialize K3ai Client, allowing user to deploy a new K8's cluster,
list plugins and groups
Usage:
k3ai init [flags]
Examples:
k3ai init #Will use config from $HOME/.k3ai/config.yaml and use interactive menus
k3ai init --config /myfolder/myconfig.yaml #Use a custom config.yaml in another location(local or remote)
k3ai init --local k3s #Use config target marked local and of type k3s
k3ai init --cloud civo #Use config target marked as cloud and of type civo
Flags:
--cloud string Options availabe for cloud providers
--config string Custom config file [default is $HOME/.k3ai/config.yaml] (default "/.k3ai/config.yaml")
-h, --help help for init
--local string Options availabe k3s,k0s,kind
Global Flags:
--repo string URI for the plugins repository.
(default "https://api.github.com/repos/kf5i/k3ai-plugins/contents/core/")List all plugins or plugin groups
Usage:
k3ai list [flags]
Flags:
-g, --group List the plugin groups
-h, --help help for list
Global Flags:
--repo string URI for the plugins repository.
(default "https://api.github.com/repos/kf5i/k3ai-plugins/contents/core/")Delete a plugin or a plugin group
Usage:
k3ai delete <plugin_name> [flags]
Flags:
-g, --group Delete a plugin group
-h, --help help for delete
Global Flags:
--repo string URI for the plugins repository.
(default "https://api.github.com/repos/kf5i/k3ai-plugins/contents/core/")kind: cluster
targetCustomizations:
- name: localK3s #name of the cluster instance not the name of the cluster
enabled: false
type: k3s
config: "/etc/rancher/k3s/k3s.yaml" #default location of config file or your existing config file to copy
clusterName: demo-wsl-k3s #name of the cluster (this need to be the same as in a config file)
clusterDeployment: local
clusterStart: "sudo bash -ic 'k3s server --write-kubeconfig-mode 644 > /dev/null 2>&1 &'"
spec:
# If the OS is not needed may be removed so the three below are mutually exclusive, if not needed set them to null or remove it
wsl: "https://github.com/rancher/k3s/releases/download/v1.19.4%2Bk3s1/k3s"
mac:
linux: "https://get.k3s.io | K3S_KUBECONFIG_MODE=644 sh -s -"
windows:
# Everything from this repo will be ran in this cluster. You trust me right?
plugins:
- repo:
name:
- repo:
name:
- name: localK0s #name of the cluster instance not the name of the cluster
enabled: false
type: k0s
config: "${HOME}/.k3ai/kubeconfig" #default location of config file or your existing config file to copy
clusterName: demo-wsl-k0s #name of the cluster (this need to be the same as in a config file)
clusterDeployment: local
clusterStart: "k0s default-config | tee ${HOME}/.k3ai/k0s.yaml && sudo bash -ic 'k0s server -c ${HOME}/.k3ai/k0s.yaml --enable-worker > /dev/null 2>&1 &' && sudo cat /var/lib/k0s/pki/admin.conf > $HOME/.k3ai/k0s-config"
spec:
# If the OS is not needed may be removed so the three below are mutually exclusive, if not needed set them to null or remove it
wsl: "https://github.com/k0sproject/k0s/releases/download/v0.8.1/k0s-v0.8.1-amd64"
mac:
linux: "https://github.com/k0sproject/k0s/releases/download/v0.8.1/k0s-v0.8.1-amd64"
windows:
# Everything from this repo will be ran in this cluster. You trust me right?
plugins:
- repo:
name:
- repo:
name:
- name: localKind #name of the cluster instance not the name of the cluster
enabled: true
type: kind
config: #default location of config file or your existing config file to copy
clusterName: demo-win-kind #name of the cluster (this need to be the same as in a config file)
clusterDeployment: local
clusterStart: "kind create cluster"
spec:
# If the OS is not needed may be removed so the three below are mutually exclusive, if not needed set them to null or remove it
wsl: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
mac: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-darwin-amd64"
linux: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
windows: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-windows-amd64"
# Everything from this repo will be ran in this cluster. You trust me right?
plugins:
- repo:
name: jupyter-minimal
- repo:
name:
- name: localK3d #name of the cluster instance not the name of the cluster
enabled: false
type: k3d
config: #default location of config file or your existing config file to copy
clusterName: demo-win-k3d #name of the cluster (this need to be the same as in a config file)
clusterDeployment: local
clusterStart: "k3d cluster create"
spec:
# If the OS is not needed may be removed so the three below are mutually exclusive, if not needed set them to null or remove it
wsl: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
mac: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-darwin-amd64"
linux: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
windows: "https://github.com/rancher/k3d/releases/download/v3.4.0-test.0/k3d-windows-amd64.exe"
# Everything from this repo will be ran in this cluster. You trust me right?
plugins:
- repo:
name: jupyter-minimal
- repo:
name:
- name: remoteK3s #name of the cluster instance not the name of the cluster
enabled: false
type: k3s
config: remote #default location of config file or your existing config file to copy if Remote will be copy from remote location
clusterName: demo-cluster-remote #name of the cluster (this need to be the same as in a config file)
clusterDeployment: cloud
clusterStart:
spec:
# If the OS is not needed may be removed so the three below are mutually exclusive, if not needed set them to null or remove it
wsl:
mac:
linux:
windows:
cloudType: civo
cloudNodes: 1
cloudSecretPath: $HOME/.k3ai/secret.txt
# Everything from this repo will be ran in this cluster. You trust me right?
plugins:
- repo: "https://github.com/alfsuse/demo-plugins"
name: "demo"
- repo: "https://github.com/alfsuse/demo-plugins-2"
name: "demo2"kind: cluster
targetCustomizations:kind: cluster
targetCustomizations:
- name: localK3s #name of the cluster instance not the name of the cluster
enabled: false
type: k3s
...
clusterName: demo-wsl-k3s
clusterDeployment: local
spec:
wsl: "https://github.com/rancher/k3s/releases/download/v1.19.4%2Bk3s1/k3s"
mac:
linux: "https://get.k3s.io | K3S_KUBECONFIG_MODE=644 sh -s -"
windows:
plugins:
- repo:
name:
- repo:
name: ...
type: k3s
#default location of config file or your existing config file to copy
config: "/etc/rancher/k3s/k3s.yaml"
...
clusterStart: "sudo bash -ic 'k3s server --write-kubeconfig-mode 644 > /dev/null 2>&1 &'"
type: k3d
...
clusterStart: "k3d cluster create"
spec:
wsl: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
mac: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-darwin-amd64"
linux: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
windows: "https://github.com/rancher/k3d/releases/download/v3.4.0-test.0/k3d-windows-amd64.exe"type: k0s
#default location of config file or your existing config file to copy
config: "${HOME}/.k3ai/kubeconfig"
...
clusterStart: "k0s default-config | tee ${HOME}/.k3ai/k0s.yaml &&
sudo bash -ic 'k0s server -c ${HOME}/.k3ai/k0s.yaml --enable-worker > /dev/null 2>&1 &' &&
sudo cat /var/lib/k0s/pki/admin.conf > $HOME/.k3ai/k0s-config"
spec:type: kind
config:
...
clusterStart: "kind create cluster"
spec:
wsl: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
mac: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-darwin-amd64"
linux: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64"
windows: "https://kind.sigs.k8s.io/dl/v0.9.0/kind-windows-amd64"enabled: false
clusterDeployment: cloud
clusterStart:
spec:
wsl:
mac:
linux:
windows:
cloudType: civo
cloudNodes: 1
cloudSecretPath: $HOME/.k3ai/secret.txtk3ai-cli deletecurl -fL "https://get.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/binInvoke-WebRequest -Uri "https://get-win.k3ai.in" -OutFile k3ai.zip Expand-Archive -Path .\k3ai.zipcurl -fL "https://get-mac.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/bincurl -fL "https://get-arm.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/bin#Set a variable to grab latest version
Version=$(curl -s "https://api.github.com/repos/kf5i/k3ai-core/releases/latest" | awk -F '"' '/tag_name/{print $4}' | cut -c 2-6)
# get the binaries
wget https://github.com/kf5i/k3ai-core/releases/download/v$Version/k3ai-core_${Version}_linux_amd64.tar.gzerror: timed out waiting for the condition on xxxxxxx#Set a variable to grab latest version
Version=$(curl -s "https://api.github.com/repos/kf5i/k3ai-core/releases/latest" | awk -F '"' '/tag_name/{print $4}' | cut -c 2-6)
# get the binaries
wget https://github.com/kf5i/k3ai-core/releases/download/v$Version/k3ai-core_${Version}_linux_amd64.tar.gzcurl -sfL "https://get.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/binInvoke-WebRequest -Uri "https://get-win.k3ai.in" -OutFile k3ai.zip Expand-Archive -Path .\k3ai.zipcurl -sfL "https://get-mac.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/bincurl -sfL "https://get-arm.k3ai.in" -o k3ai.tar.gztar -xvzf k3ai.tar.gz \
&& chmod +x ./k3ai \
&& sudo mv ./k3ai /usr/local/bin#Set a variable to grab latest version
Version=$(curl -s "https://api.github.com/repos/kf5i/k3ai-core/releases/latest" | awk -F '"' '/tag_name/{print $4}' | cut -c 2-6)
# get the binaries
wget https://github.com/kf5i/k3ai-core/releases/download/v$Version/k3ai-core_${Version}_linux_amd64.tar.gzWelcome to the K3ai project! We took the freedom to take these rules from other great OSS projects like Kubeflow, Kubernetes, and so on.
k3ai listk3ai list
Name Description
argo-workflow Argo Workflow plugin
h2o-single H2O.ai
jupyter-minimal Minimal Jupyter Configuration
katib Kubeflow Katib
kf-pipelines-tekton Kubeflow Pipelines based on Tekton
kubeflow-pipelines Kubeflow Pipelines platform agnostic
mpi-op MPI-Operator
nvidia-gpu Nvidia GPU support
pytorch-op Pytorch op
tekton Kubeflow Pipelines based on Tekton
tensorflow-op Kubeflow Tensorflow
...
...k3ai list -gk3ai list -g
Name Description
argo-workflow-traefik Expose Argo Workflow using traefik HTTP
jupyter-minimal-traefik Expose Jupyter Notebooks using traefik HTTP
katib-traefik Expose Katib using traefik HTTP
kf-pipelines-tekton-traefik Expose Kubeflow pipelines with Tekton backend using traefik HTTP
kubeflow-pipelines-traefik Expose Kubeflow Workflow using traefik HTTP
pytorch-op-traefik Expose Pytorch using traefik HTTP
tensorflow-op-traefik Expose Tensorflow using traefik HTTP
...
...k3ai apply jupyter-minimalk3ai apply -g jupyter-minimal-traefikkubectl port-forward -n jupyter deployment/jupyter-minimal 8888:8888k3ai apply mpi-opoptions: a map of options for how to interpret this OWNERS file, currently only one:lgtmk3ai initk3ai init --local <YourClusterFlavor># The first two (2) lines are used to indicate what the section does.
# We use them to group stuff, if you need multi-cluster just copy,paste and
# rewrite everything after the first 2 lines
kind: cluster
targetCustomizations:
# This is what you change typically: name is k3ai internal instance name,
# enabled is to tell k3ai if you want to install it or not
# type means what need to be installed
# config if the cluster flavor has it's own config file (kubeconfig)
- name: localK3s
enabled: false # Set it to True to enable the section
type: k3s
config: "/etc/rancher/k3s/k3s.yaml"
clusterName: demo-wsl-k3s # This is the name of your cluster
clusterDeployment: local
# clusterStart is helpful when you install on things like WSL that do not have
# services etc..
clusterStart: "sudo bash -ic 'k3s server --write-kubeconfig-mode 644 ...'"
spec:
# If the OS is not needed may be removed so the three below
# are mutually exclusive, if not needed set them to null or remove it
wsl: "https://github.com/rancher/k3s/releases/download/v1.19.4%2Bk3s1/k3s"
mac:
linux: "https://get.k3s.io | K3S_KUBECONFIG_MODE=644 sh -s -"
windows:
# If you want to add automatically some plugins you may use the group below
plugins:
- repo: #where is your plugin located?
name: #how it is called?
- repo:
name: - name: remoteK3s
enabled: false
type: k3s
config: remote #currently we do not copy and merge the kubeconfig
clusterName: demo-cluster-remote
clusterDeployment: cloud #change from local to cloud
clusterStart:
spec:
wsl:
mac:
linux:
windows:
# Cloud section
cloudType: civo
cloudNodes: 1
cloudSecretPath: $HOME/.k3ai/secret.txt
# ---end----
plugins:
- repo:
name:
- repo:
name: print ("Hello, Earth")k3ai apply kubeflow-pipelinesk3ai apply -f kubeflow-pipelines-traefikkubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80# Specify pipeline argument values
arguments = {'a': '7', 'b': '8'}
# Launch a pipeline run given the pipeline function definition
kfp.Client().create_run_from_pipeline_func(calc_pipeline, arguments=arguments,
experiment_name=EXPERIMENT_NAME)
# The generated links below lead to the Experiment page and the pipeline run...# Specify pipeline argument values
arguments = {'a': '7', 'b': '8'}
# Launch a pipeline run given the pipeline function definition
kfp.Client("<YOUR CLUSTER IP>").create_run_from_pipeline_func(calc_pipeline, arguments=arguments,
experiment_name=EXPERIMENT_NAME)
# The generated links below lead to the Experiment page and the pipeline run...approvers:
- alice
- bob
# this is a comment
reviewers:
- alice
- carol
# this is another comment
- sig-foo # this is an aliasaliases:
sig-foo:
- david
- erin
sig-bar:
- bob
- frankk3ai apply tensorflow-op


k3ai apply nvidia-gpuk3ai apply argo-workflowkubectl apply -f - << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: tfevent-volume
labels:
type: local
app: tfjob
spec:
capacity:
storage: 10Gi
storageClassName: local-path
accessModes:
- ReadWriteOnce
hostPath:
path: /tmp/data
EOFkubectl apply -f - << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tfevent-volume
namespace: kubeflow
labels:
type: local
app: tfjob
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
EOFkubectl apply -f https://raw.githubusercontent.com/kubeflow/tf-operator/master/examples/v1/mnist_with_summaries/tf_job_mnist.yamlkubectl logs -l tf-job-name=mnist -n kubeflow --tail=-1...
Adding run metadata for 799
Accuracy at step 800: 0.957
Accuracy at step 810: 0.9698
Accuracy at step 820: 0.9676
Accuracy at step 830: 0.9676
Accuracy at step 840: 0.9677
Accuracy at step 850: 0.9673
Accuracy at step 860: 0.9676
Accuracy at step 870: 0.9654
Accuracy at step 880: 0.9694
Accuracy at step 890: 0.9708
Adding run metadata for 899
Accuracy at step 900: 0.9737
Accuracy at step 910: 0.9708
Accuracy at step 920: 0.9721
Accuracy at step 930: 0.972
Accuracy at step 940: 0.9639
Accuracy at step 950: 0.966
Accuracy at step 960: 0.9654
Accuracy at step 970: 0.9683
Accuracy at step 980: 0.9685
Accuracy at step 990: 0.9666
Adding run metadata for 999plugin-name: demo-plugin
plugin-description: Demo of a custom local plugin
namespace: "default"
yaml:
- url: "./commons/demo-plugin/deployment.yaml"
type: "file"apiVersion: v1
kind: Pod
metadata:
name: shell-demo
spec:
volumes:
- name: shared-data
emptyDir: {}
containers:
- name: nginx
image: nginx
volumeMounts:
- name: shared-data
mountPath: /usr/share/nginx/html
hostNetwork: true
dnsPolicy: Defaultk3ai list --repo "<absolute path to root folder>/"
#k3ai list --repo "home/user/core/"k3ai list --repo "<absolute path to root folder>/"
Name Description
demo-plugin A simple demo of a local plugink3ai apply --repo "<absolute path to root folder>/"kubectl get pod shell-demo
#Output
NAME READY STATUS RESTARTS AGE
shell-demo 1/1 Running 0 3m9skubectl exec --stdin --tty shell-demo -- /bin/bash -c "apt-get update > /dev/null && apt-get -y install boxes > /dev/null && echo 'Hey, this is K3ai! Thanks for use this.' | boxes -d peek"/* _\|/_
(o o)
+----oOO-{_}-OOo------------------------+
|Hey, this is K3ai! Thanks for use this.|
+--------------------------------------*/k3ai delete --repo "<absolute path to root folder>/"k3ai apply pytorch-opkubectl apply -f - << EOF
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: "pytorch-dist-mnist-gloo"
namespace: kubeflow
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
Worker:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
EOFkubectl apply -f - << EOF
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: "pytorch-dist-mnist-gloo"
namespace: kubeflow
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
# Change the value of nvidia.com/gpu based on your configuration
resources:
limits:
nvidia.com/gpu: 1
Worker:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
# Change the value of nvidia.com/gpu based on your configuration
resources:
limits:
nvidia.com/gpu: 1
EOFkubectl get pod -l pytorch-job-name=pytorch-dist-mnist-gloo -n kubeflowNAME READY STATUS RESTARTS AGE
pytorch-dist-mnist-gloo-master-0 1/1 Running 0 2m26s
pytorch-dist-mnist-gloo-worker-0 1/1 Running 0 2m26s kubectl logs -l pytorch-job-name=pytorch-dist-mnist-gloo -n kubeflowTrain Epoch: 1 [55680/60000 (93%)] loss=0.0341
Train Epoch: 1 [56320/60000 (94%)] loss=0.0357
Train Epoch: 1 [56960/60000 (95%)] loss=0.0774
Train Epoch: 1 [57600/60000 (96%)] loss=0.1186
Train Epoch: 1 [58240/60000 (97%)] loss=0.1927
Train Epoch: 1 [58880/60000 (98%)] loss=0.2050
Train Epoch: 1 [59520/60000 (99%)] loss=0.0642
accuracy=0.9660
Train Epoch: 1 [55680/60000 (93%)] loss=0.0341
Train Epoch: 1 [56320/60000 (94%)] loss=0.0357
Train Epoch: 1 [56960/60000 (95%)] loss=0.0774
Train Epoch: 1 [57600/60000 (96%)] loss=0.1186
Train Epoch: 1 [58240/60000 (97%)] loss=0.1927
Train Epoch: 1 [58880/60000 (98%)] loss=0.2050
Train Epoch: 1 [59520/60000 (99%)] loss=0.0642
accuracy=0.9660#To install KF based on Argo Workflows
k3ai apply kubeflow-pipelines
#To install KF based on TektonCD
k3ai apply kf-pipelines-tektonSites that mention k3ai. If you spoke about k3ai and want to be listed let us know.
k3ai init --cloud civokubectl config --kubeconfig="civo-k3ai-kubeconfig" k3ai apply <your favorite plugin>
k3ai apply jupyter-minimal--plugin_jupyter-tfk3ai apply h2o-singlekubectl get pod -n h2o
#Output should be similar to this
NAME READY STATUS RESTARTS AGE
h2o-stateful-set-0 1/1 Running 0 2m19s kubectl port-forward -n h2o svc/h2o-service 54321:54321