Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
k3s-uninstall.shcurl -sfL https://get.k3ai.in | bash -s -- --gpuerror: timed out waiting for the condition on xxxxxxxcurl -sfL https://get.k3ai.in | bash -s -- --cpu --plugin_kfp_sdkcurl -sfL https://get.k3ai.in | bash -s -- --cpu --pipelines --plugin_jupyter-minimalcurl -sfL https://get.k3ai.in | bash -s -- --wsl --pipelines --plugin_jupyter-minimalcurl -sfL https://get.k3ai.in | bash -s -- --skipk3s --plugin_jupyter-minimalk3s kubectl get service/traefik -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -n kube-systemimport kfp
import json
# 'host' is your Kubeflow Pipelines API server's host address.
host="http://<K3AI IP>/"
# 'pipeline_name' is the name of the pipeline you want to list. We provide you
# here a pre-set name to test immediately
pipeline_name = "[Demo] TFX - Iris classification pipeline"
client = kfp.Client(host)
# To filter on pipeline name, you can use a predicate indicating that the pipeline
# name is equal to the given name.
# A predicate includes 'key', 'op' and 'string_value' fields.
# The 'key' specifies the property you want to apply the filter to. For example,
# if you want to filter on the pipeline name, then 'key' is set to 'name' as
# shown below.
# The 'op' specifies the operator used in a predicate. The operator can be
# EQUALS, NOT_EQUALS, GREATER_THAN, etc. The complete list is at [filter.proto](https://github.com/kubeflow/pipelines/blob/master/backend/api/filter.proto#L32)
# When using the operator in a string-typed predicate, you need to use the
# corresponding integer value of the enum. For Example, you can use the integer
# value 1 to indicate EQUALS as shown below.
# The 'string_value' specifies the value you want to filter with.
filter = json.dumps({'predicates': [{'key': 'name', 'op': 1, 'string_value': '{}'.format(pipeline_name)}]})
pipelines = client.pipelines.list_pipelines(filter=filter)
# The pipeline with the given pipeline_name, if exists, is in pipelines.pipelines[0].
print (pipelines)python demo.pyIP=$(kubectl get service/traefik -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -n kube-system) \
&& echo "http://"$IP":8888"import kfp
import json
# 'host' is your Kubeflow Pipelines API server's host address.
host="http://ml-pipeline-ui.kubeflow/"
# 'pipeline_name' is the name of the pipeline you want to list. We provide you
# here a pre-set name to test immediately
pipeline_name = "[Demo] TFX - Iris classification pipeline"
client = kfp.Client(host)
# To filter on pipeline name, you can use a predicate indicating that the pipeline
# name is equal to the given name.
# A predicate includes 'key', 'op' and 'string_value' fields.
# The 'key' specifies the property you want to apply the filter to. For example,
# if you want to filter on the pipeline name, then 'key' is set to 'name' as
# shown below.
# The 'op' specifies the operator used in a predicate. The operator can be
# EQUALS, NOT_EQUALS, GREATER_THAN, etc. The complete list is at [filter.proto](https://github.com/kubeflow/pipelines/blob/master/backend/api/filter.proto#L32)
# When using the operator in a string-typed predicate, you need to use the
# corresponding integer value of the enum. For Example, you can use the integer
# value 1 to indicate EQUALS as shown below.
# The 'string_value' specifies the value you want to filter with.
filter = json.dumps({'predicates': [{'key': 'name', 'op': 1, 'string_value': '{}'.format(pipeline_name)}]})
pipelines = client.pipelines.list_pipelines(filter=filter)
# The pipeline with the given pipeline_name, if exists, is in pipelines.pipelines[0].
print (pipelines){'next_page_token': None,
'pipelines': [{'created_at': datetime.datetime(2020, 10, 14, 13, 27, 18, tzinfo=tzlocal()),
'default_version': {'code_source_url': None,
'created_at': datetime.datetime(2020, 10, 14, 13, 27, 18, tzinfo=tzlocal()),
'id': '8a53981e-7c3e-4897-8c75-26f710c20f7a',
'name': '[Demo] TFX - Iris classification '
'pipeline',
'package_url': None,
'parameters': [{'name': 'pipeline-root',
'value': 'gs://{{kfp-default-bucket}}/tfx_iris/{{workflow.uid}}'},
{'name': 'data-root',
'value': 'gs://ml-pipeline/sample-data/iris/data'},
{'name': 'module-file',
'value': '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py'}],
'resource_references': [{'key': {'id': '8a53981e-7c3e-4897-8c75-26f710c20f7a',
'type': 'PIPELINE'},
'name': None,
'relationship': 'OWNER'}]},
'description': '[source '
'code](https://github.com/kubeflow/pipelines/tree/c84f4da0f7b534e1884f6696f161dc1375206ec2/samples/core/iris). '
'Example pipeline that classifies Iris flower '
'subspecies and how to use native Keras within '
'TFX.',
'error': None,
'id': '8a53981e-7c3e-4897-8c75-26f710c20f7a',
'name': '[Demo] TFX - Iris classification pipeline',
'parameters': [{'name': 'pipeline-root',
'value': 'gs://{{kfp-default-bucket}}/tfx_iris/{{workflow.uid}}'},
{'name': 'data-root',
'value': 'gs://ml-pipeline/sample-data/iris/data'},
{'name': 'module-file',
'value': '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py'}],
'url': None}],
'total_size': 1}curl -sfL https://get.k3ai.in | bash -s -- --cpu --plugin_tfs-resnetcurl -sfL https://get.k3ai.in | bash -s -- --gpu --plugin_tfs-resnetpip install tensorflow-serving-apigit clone https://github.com/tensorflow/serving
cd servingkubectl describe service tf-server-service -n tf-servingName: tf-server-service
Namespace: tf-serving
Labels: <none>
Annotations: Selector: app=tf-serv-resnet
Type: LoadBalancer
IP: 10.43.200.139
LoadBalancer Ingress: 172.21.190.98
Port: grpc 8500/TCP
TargetPort: 8500/TCP
NodePort: grpc 30525/TCP
Endpoints: 10.42.0.139:8500
Port: rest 8501/TCP
TargetPort: 8501/TCP
NodePort: rest 30907/TCP
Endpoints: 10.42.0.139:8501
Session Affinity: None
External Traffic Policy: Clusterpython \
tensorflow_serving/example/resnet_client_grpc.py \
--server=<LOADBALANCER INGRESS>:8500python \
tensorflow_serving/example/resnet_client.py \
--server=<LOADBALANCER INGRESS>:8501#REST Api
Prediction class: 286, avg latency: 87.9074 ms
#gRPC
[INFO] app=tf-server isn't ready yet. This may take a few minutes... ││ kubeflow metadata-envoy-deployment-6d776695d9-24xc7 ● 1/1 3 Running 5 11 │
float_val: 2.1751149688498117e-05
float_val: 4.679726407630369e-05
float_val: 6.22767993263551e-06
float_val: 2.4046405087574385e-05
float_val: 0.00013994085020385683
float_val: 5.0004531658487394e-05
float_val: 1.670094752626028e-05
float_val: 2.148277962987777e-05
float_val: 0.0004090495640411973
float_val: 3.3705742680467665e-05
float_val: 3.318636345284176e-06
float_val: 8.649761730339378e-05
float_val: 3.984206159657333e-06
float_val: 3.7564968806691468e-06
float_val: 3.2912407732510474e-06
float_val: 3.6244309740141034e-06
float_val: 2.5648103019193513e-06
float_val: 2.7759107979363762e-05
float_val: 1.5157910638663452e-05
float_val: 1.8459862758390955e-06
float_val: 8.704301990292151e-07
float_val: 2.724335217862972e-06
float_val: 3.3186615837621503e-06
float_val: 1.455540314054815e-06
float_val: 8.736999006941915e-06
float_val: 2.299477728229249e-06
float_val: 2.0985182800359325e-06
float_val: 0.00026371944113634527
float_val: 1.0347321222070605e-05
float_val: 3.660013362605241e-06
float_val: 2.0003653844469227e-05
float_val: 6.355750429065665e-06
float_val: 2.255582785437582e-06
float_val: 1.5940782986945123e-06
float_val: 1.2315674666751875e-06
float_val: 1.1781222610807163e-06
float_val: 1.4636576452176087e-05
float_val: 5.812105996483297e-07
float_val: 6.599811604246497e-05
float_val: 0.0012952699325978756
}
}
model_spec {
name: "resnet"
version {
value: 1538687457
}
signature_name: "serving_default"
}

kubectl config --kubeconfig="civo-k3ai-kubeconfig" curl -sfL https://get.k3ai.in | bash -s - --skipk3s --plugin_civo_kfpipelinescurl -sfL https://get.k3ai.in | bash -s -- --wsl --pipelinescurl -sfL https://get.k3ai.in | bash -s -- --skipk3s --plugin_tfs-resnetstartk3scurl -sfL https://get.k3ai.in | INSTALL_K3S_BIN_DIR=/usr/bin bash -s -- --wsl --pipelinescurl -sfL https://get.k3ai.in | bash -s -- --cpu --plugin_pytorch-operatorcurl -sfL https://get.k3ai.in | bash -s -- --gpu --plugin_pytorch-operatork3s kubectl apply -f - << EOF
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: "pytorch-dist-mnist-gloo"
namespace: kubeflow
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
Worker:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
EOFk3s kubectl apply -f - << EOF
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: "pytorch-dist-mnist-gloo"
namespace: kubeflow
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
# Change the value of nvidia.com/gpu based on your configuration
resources:
limits:
nvidia.com/gpu: 1
Worker:
replicas: 1
restartPolicy: OnFailure
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
containers:
- name: pytorch
image: pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime
command: ['sh','-c','pip install tensorboardX==1.6.0 && mkdir -p /opt/mnist/src && cd /opt/mnist/src && curl -O https://raw.githubusercontent.com/kubeflow/pytorch-operator/master/examples/mnist/mnist.py && chgrp -R 0 /opt/mnist && chmod -R g+rwX /opt/mnist && python /opt/mnist/src/mnist.py']
args: ["--backend", "gloo"]
# Change the value of nvidia.com/gpu based on your configuration
resources:
limits:
nvidia.com/gpu: 1
EOFkubectl get pod -l pytorch-job-name=pytorch-dist-mnist-gloo -n kubeflowNAME READY STATUS RESTARTS AGE
pytorch-dist-mnist-gloo-master-0 1/1 Running 0 2m26s
pytorch-dist-mnist-gloo-worker-0 1/1 Running 0 2m26s kubectl logs -l pytorch-job-name=pytorch-dist-mnist-gloo -n kubeflowTrain Epoch: 1 [55680/60000 (93%)] loss=0.0341
Train Epoch: 1 [56320/60000 (94%)] loss=0.0357
Train Epoch: 1 [56960/60000 (95%)] loss=0.0774
Train Epoch: 1 [57600/60000 (96%)] loss=0.1186
Train Epoch: 1 [58240/60000 (97%)] loss=0.1927
Train Epoch: 1 [58880/60000 (98%)] loss=0.2050
Train Epoch: 1 [59520/60000 (99%)] loss=0.0642
accuracy=0.9660
Train Epoch: 1 [55680/60000 (93%)] loss=0.0341
Train Epoch: 1 [56320/60000 (94%)] loss=0.0357
Train Epoch: 1 [56960/60000 (95%)] loss=0.0774
Train Epoch: 1 [57600/60000 (96%)] loss=0.1186
Train Epoch: 1 [58240/60000 (97%)] loss=0.1927
Train Epoch: 1 [58880/60000 (98%)] loss=0.2050
Train Epoch: 1 [59520/60000 (99%)] loss=0.0642
accuracy=0.9660
curl -sfL https://get.k3ai.in | bash -s -- --cpu --plugin_kfpipelinescurl -sfL https://get.k3ai.in | bash -s -- --gpu --plugin_kfpipelinescurl -sfL https://get.k3ai.in | bash -s -- --cpu --plugin_tf-operatorcurl -sfL https://get.k3ai.in | bash -s -- --gpu--plugin_tf-operatork3s kubectl apply -f - << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: tfevent-volume
labels:
type: local
app: tfjob
spec:
capacity:
storage: 10Gi
storageClassName: local-path
accessModes:
- ReadWriteOnce
hostPath:
path: /tmp/data
EOFk3s kubectl apply -f - << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tfevent-volume
namespace: kubeflow
labels:
type: local
app: tfjob
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
EOFkubectl apply -f https://raw.githubusercontent.com/kubeflow/tf-operator/master/examples/v1/mnist_with_summaries/tf_job_mnist.yamlkubectl logs -l tf-job-name=mnist -n kubeflow --tail=-1...
Adding run metadata for 799
Accuracy at step 800: 0.957
Accuracy at step 810: 0.9698
Accuracy at step 820: 0.9676
Accuracy at step 830: 0.9676
Accuracy at step 840: 0.9677
Accuracy at step 850: 0.9673
Accuracy at step 860: 0.9676
Accuracy at step 870: 0.9654
Accuracy at step 880: 0.9694
Accuracy at step 890: 0.9708
Adding run metadata for 899
Accuracy at step 900: 0.9737
Accuracy at step 910: 0.9708
Accuracy at step 920: 0.9721
Accuracy at step 930: 0.972
Accuracy at step 940: 0.9639
Accuracy at step 950: 0.966
Accuracy at step 960: 0.9654
Accuracy at step 970: 0.9683
Accuracy at step 980: 0.9685
Accuracy at step 990: 0.9666
Adding run metadata for 999approvers:
- alice
- bob
# this is a comment
reviewers:
- alice
- carol
# this is another comment
- sig-foo # this is an aliasaliases:
sig-foo:
- david
- erin
sig-bar:
- bob
- frankcurl -sfL https://get.k3ai.in | bash -s -- --plugin_jupyter-minimal/unassignfalsetrue
curl -sfL https://get.k3ai.in | bash -s -- --plugin_jupyter-<YOUR SELECTED FLAVOR> --plugin_kfpipelines

