Commit 8757f8e3 authored by Michal Chruscinski's avatar Michal Chruscinski Committed by Scott Cyphers

nGraph-ONNX CI improvement (#3329)

* nGraph-ONNX CI improvement

- Added parallel CI on many SKUs,
- Jenkinsfile written in Declarative Pipeline which allows displaying sequential stages for parallel branches in Blue Ocean
- Moved proper CI code to ngraph-onnx repository
- Moved base_image_builder to ngraph-onnx repository

* Fix license header

* try catch for git info retrieval

* Add unsupported ops to iGPU backend

* Revert previous change
parent f159e196
This diff is collapsed.
# nGraph-ONNX Continuous Integration Script
The proper script running nGraph-ONNX tests can be found in ngraph-onnx repository:
https://github.com/NervanaSystems/ngraph-onnx/tree/master/.ci/jenkins/ci.groovy
Jenkinsfile in this directory just downloads and runs CI stored in repository mentioned above.
This is due to how Jenkins Multibranch Pipeline jobs are implemented, which don't provide an option to automatically clone different repository than the one for which the build is triggered.
# MANUAL REPRODUCTION INSTRUCTION
From directory containing CI scripts execute runCI.sh bash script:
......
// Set LABEL variable if empty or not declared
try{ if(LABEL.trim() == "") {throw new Exception();} }catch(Exception e){LABEL="onnx && ci"}; echo "${LABEL}"
try{ if(BRANCH.trim() == "") {throw new Exception();} }catch(Exception e){BRANCH="master"}; echo "${BRANCH}"
if(DOCKER_REGISTRY.trim() == "") {throw new Exception("No Docker registry specified!");}
// CI settings and constants
PROJECT_NAME = "ngraph_cpp"
CI_ROOT = ".ci/onnx/jenkins"
DOCKER_CONTAINER_NAME = "jenkins_ngraph-onnx_ci"
NGRAPH_GIT_ADDRESS = "https://github.com/NervanaSystems/ngraph.git"
JENKINS_GITHUB_CREDENTIAL_ID = "7157091e-bc04-42f0-99fd-dc4da2922a55"
def cloneRepository(String jenkins_github_credential_id, String ngraph_git_address) {
stage('Clone Repo') {
checkout([$class: 'GitSCM',
branches: [[name: "${BRANCH}"]],
doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'CloneOption', timeout: 30]], submoduleCfg: [],
userRemoteConfigs: [[credentialsId: "${jenkins_github_credential_id}",
url: "${ngraph_git_address}"]]])
}
}
def buildImage(configurationMaps) {
Closure buildMethod = { configMap ->
sh """
${CI_ROOT}/utils/docker.sh build \
--docker_registry=${DOCKER_REGISTRY} \
--name=${configMap["projectName"]} \
--version=${configMap["name"]} \
--dockerfile_path=${configMap["dockerfilePath"]} || return 1
"""
}
UTILS.createStage("Build_image", buildMethod, configurationMaps)
}
def pushImage(configurationMaps) {
Closure pushMethod = { configMap ->
UTILS.propagateStatus("Build_image", configMap["name"])
withCredentials([usernamePassword(credentialsId: "${DOCKER_CREDENTIALS}",
usernameVariable: 'DOCKER_USERNAME',
passwordVariable: 'DOCKER_PASSWORD')]) {
sh """
docker login ${DOCKER_REGISTRY} --username ${DOCKER_USERNAME} --password-stdin <<< \${DOCKER_PASSWORD}
${CI_ROOT}/utils/docker.sh push \
--docker_registry=${DOCKER_REGISTRY} \
--name=${configMap["projectName"]} \
--version=${configMap["name"]} || return 1
"""
}
}
UTILS.createStage("Push_image", pushMethod, configurationMaps)
}
def cleanup(configurationMaps) {
Closure cleanupMethod = { configMap ->
sh """
rm -rf ${WORKSPACE}/${BUILD_NUMBER}
"""
}
UTILS.createStage("Cleanup", cleanupMethod, configurationMaps)
}
def main(String label, String projectName, String projectRoot, String dockerContainerName, String jenkins_github_credential_id, String ngraph_git_address) {
node(label) {
timeout(activity: true, time: 15) {
WORKDIR = "${WORKSPACE}/${BUILD_NUMBER}"
def configurationMaps;
try {
dir ("${WORKDIR}") {
cloneRepository(jenkins_github_credential_id, ngraph_git_address)
// Load CI API
UTILS = load "${CI_ROOT}/utils/utils.groovy"
result = 'SUCCESS'
// Create configuration maps
configurationMaps = UTILS.getDockerEnvList(projectName, dockerContainerName, projectRoot)
// Build and push base images
buildImage(configurationMaps)
pushImage(configurationMaps)
}
}
catch(e) {
// Set result to ABORTED if exception contains exit code of a process interrupted by SIGTERM
if ("$e".contains("143")) {
currentBuild.result = "ABORTED"
} else {
currentBuild.result = "FAILURE"
}
}
finally {
cleanup(configurationMaps)
}
}
}
}
main(LABEL, PROJECT_NAME, CI_ROOT, DOCKER_CONTAINER_NAME, JENKINS_GITHUB_CREDENTIAL_ID, NGRAPH_GIT_ADDRESS)
FROM ubuntu:16.04
ARG HOME=/root
ARG http_proxy
ARG https_proxy
ENV http_proxy ${http_proxy}
ENV https_proxy ${https_proxy}
# nGraph dependencies
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
clang-3.9 \
git \
curl \
zlib1g \
zlib1g-dev \
libtinfo-dev \
unzip \
autoconf \
automake \
libtool && \
apt-get clean autoclean && apt-get autoremove -y
# Python dependencies
RUN apt-get -y install python3 \
python3-pip \
python3-dev \
python-virtualenv && \
apt-get clean autoclean && \
apt-get autoremove -y
RUN pip3 install --upgrade pip setuptools wheel
# ONNX dependencies
RUN apt-get -y install protobuf-compiler libprotobuf-dev && \
apt-get clean autoclean && \
apt-get autoremove -y
# Install tox
RUN pip3 install tox
# Build nGraph master
ARG NGRAPH_CACHE_DIR=/cache
WORKDIR /root
RUN git clone https://github.com/NervanaSystems/ngraph.git && \
cd ngraph && \
mkdir -p ./build && \
cd ./build && \
cmake ../ -DNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE && \
make -j $(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l)
# Store built nGraph
RUN mkdir -p ${NGRAPH_CACHE_DIR} && \
cp -Rf /root/ngraph/build ${NGRAPH_CACHE_DIR}/
# Cleanup remaining sources
RUN rm -rf /root/ngraph
#!/bin/bash
# INTEL CONFIDENTIAL
# Copyright 2017-2019 Intel Corporation All Rights Reserved.
# The source code contained or described herein and all documents related to the
# source code ("Material") are owned by Intel Corporation or its suppliers or
# licensors. Title to the Material remains with Intel Corporation or its
# suppliers and licensors. The Material may contain trade secrets and proprietary
# and confidential information of Intel Corporation and its suppliers and
# licensors, and is protected by worldwide copyright and trade secret laws and
# treaty provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed, or disclosed
# in any way without Intel's prior express written permission.
# No license under any patent, copyright, trade secret or other intellectual
# property right is granted to or conferred upon you by disclosure or delivery of
# the Materials, either expressly, by implication, inducement, estoppel or
# otherwise. Any license under such intellectual property rights must be express
# and approved by Intel in writing.
set -x
set -e
NGRAPH_CACHE_DIR="/cache"
function build_ngraph() {
set -x
# directory containing ngraph repo
local ngraph_directory="$1"
local func_parameters="$2"
cd "${ngraph_directory}/ngraph"
for parameter in $func_parameters
do
case $parameter in
REBUILD)
rm -rf "${ngraph_directory}/ngraph/build"
rm -rf "${ngraph_directory}/ngraph_dist"
;;
USE_CACHED)
cp -Rf "${NGRAPH_CACHE_DIR}/build" "${ngraph_directory}/ngraph/" || return 1
;;
esac
done
cd "${ngraph_directory}/ngraph"
mkdir -p ./build
cd ./build
cmake ../ -DNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="${ngraph_directory}/ngraph_dist" || return 1
make -j $(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l) || return 1
make install || return 1
cd "${ngraph_directory}/ngraph/python"
if [ ! -d ./pybind11 ]; then
git clone --recursive https://github.com/pybind/pybind11.git
fi
rm -f "${ngraph_directory}"/ngraph/python/dist/ngraph*.whl
rm -rf "${ngraph_directory}/ngraph/python/*.so ${ngraph_directory}/ngraph/python/build"
export PYBIND_HEADERS_PATH="${ngraph_directory}/ngraph/python/pybind11"
export NGRAPH_CPP_BUILD_PATH="${ngraph_directory}/ngraph_dist"
export NGRAPH_ONNX_IMPORT_ENABLE="TRUE"
python3 setup.py bdist_wheel || return 1
# Clean build artifacts
rm -rf "${ngraph_directory}/ngraph_dist"
return 0
}
function main() {
# By default copy stored nGraph master and use it to build PR branch
BUILD_CALL='build_ngraph "/root" "USE_CACHED" || build_ngraph "/root" "REBUILD"'
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"
do
case $i in
--no-incremental)
# Build nGraph from scratch if incremental building is disabled
BUILD_CALL='build_ngraph "/root"'
;;
*)
echo "Parameter $i not recognized."
exit 1
;;
esac
done
# Link Onnx models
mkdir -p /home/onnx_models/.onnx
ln -s /home/onnx_models/.onnx /root/.onnx
eval "${BUILD_CALL}"
}
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
main "${@}"
fi
#!/bin/bash
# INTEL CONFIDENTIAL
# Copyright 2017-2019 Intel Corporation All Rights Reserved.
# The source code contained or described herein and all documents related to the
# source code ("Material") are owned by Intel Corporation or its suppliers or
# licensors. Title to the Material remains with Intel Corporation or its
# suppliers and licensors. The Material may contain trade secrets and proprietary
# and confidential information of Intel Corporation and its suppliers and
# licensors, and is protected by worldwide copyright and trade secret laws and
# treaty provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed, or disclosed
# in any way without Intel's prior express written permission.
# No license under any patent, copyright, trade secret or other intellectual
# property right is granted to or conferred upon you by disclosure or delivery of
# the Materials, either expressly, by implication, inducement, estoppel or
# otherwise. Any license under such intellectual property rights must be express
# and approved by Intel in writing.
readonly PARAMETERS=( 'name' 'version' 'container_name' 'volumes' 'env' 'ports' 'dockerfile_path' 'directory' 'docker_registry'
'options' 'tag' 'engine' 'frontend' 'new_tag' 'image_name' 'repository_type' 'build_cores_number')
readonly WORKDIR="$(git rev-parse --show-toplevel)"
#Example of usage: login ${docker_registry}
docker.login() {
local registry="${1}"
local i
local parameters
for i in $(cat ${HOME}/tokens/docker)
do
parameters+=" --${i}"
done
docker login ${parameters} ${registry}
}
#Example of usage: get_image_name ${docker_registry} ${name} ${version} ${tag} ${engine} ${repository_type} ${frontend}
docker.get_image_name() {
local registry="${1}"
local name="${2}"
local version="${3}"
local tag="${4}"
local engine="${5}"
local repository_type="${6}"
local frontend="${7}"
if [ "${repository_type,,}" == "private" ]; then
repository_type="${repository_type,,}/"
else
repository_type=""
fi
if [ ! -z ${engine} ]; then
engine="/${engine}"
fi
if [ ! -z ${frontend} ]; then
frontend="/${frontend}"
fi
echo "${registry,,}/aibt/aibt/${name,,}/${repository_type,,}${version,,}${engine,,}${frontend,,}:${tag}"
}
docker.get_git_token() {
local token=$(cat ${HOME}/tokens/private_git)
echo "${token}"
}
#Example of usage: build ${image_name} ${dockerfile}
docker.build() {
local image_name="${1}"
local dockerfile_path="${2}"
local repository_type="${3}"
local build_cores_number="${4}"
if [ ${repository_type} == "private" ]; then
BUILD_ARGS="--build-arg REPOSITORY_TYPE=private --build-arg TOKEN=$(docker.get_git_token)"
fi
# Add http_proxy if exists
if [ -n ${http_proxy} ]; then
BUILD_ARGS+="--build-arg http_proxy=${http_proxy} "
fi
# Add https_proxy if exists
if [ -n ${https_proxy} ]; then
BUILD_ARGS+="--build-arg https_proxy=${https_proxy} "
fi
# If build_cores_number was not passed - detect number of build cores
if [ -z ${build_cores_number} ]; then
BUILD_ARGS+="--build-arg BUILD_CORES_NUMBER=$(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l) "
fi
docker build ${BUILD_ARGS} -f "${WORKDIR}/${dockerfile_path}" -t "${image_name}" .
local exit_code=${?}
if [ ${exit_code} != "0" ]; then
exit ${exit_code}
fi
}
#Example of usage: push ${image_name}
docker.push() {
local image_name="${1}"
docker push "${image_name}"
}
#Example of usage: pull ${image_name}
docker.pull() {
local image_name="${1}"
docker pull "${image_name}"
}
#Example of usage: shell ${image_name} ${container_name} ${volumes} ${env} ${ports}
docker.shell() {
local image_name=${1}
local container_name="${2}"
local volumes="${3}"
local env="${4}"
local ports="${5}"
docker run -h "$(hostname)" --rm --privileged --name "${container_name}" -i -t "${ports}" "${volumes}" "${env}" \
"${image_name}" /bin/bash
}
#Example of usage: run ${image_name} ${container_name} ${volumes} ${env} ${ports}
docker.run() {
local image_name="${1}"
local container_name="${2}"
local volumes="${3}"
local env="${4}"
local ports="${5}"
local engine="${6}"
DOCKER_COMMAND="docker"
if [ ${engine,,} == "cudnn" ]; then
DOCKER_COMMAND="nvidia-docker"
fi
${DOCKER_COMMAND} run -h "$(hostname)" --rm --privileged --name "${container_name}" "${ports}" "${volumes}" "${env}" "${image_name}"
}
#Example of usage: start ${image_name} ${container_name} ${volumes} ${env} ${ports}
docker.start() {
local image_name="${1}"
local container_name="${2}"
local volumes="${3}"
local env="${4}"
local ports="${5}"
local engine="${6}"
docker ps -a | grep "${container_name}" &> /dev/null
if [ $? == 0 ]; then
docker.stop "${container_name}"
docker.remove "${container_name}"
fi
DOCKER_COMMAND="docker"
if [ ${engine,,} == "cudnn" ]; then
DOCKER_COMMAND="nvidia-docker"
fi
CMD="${DOCKER_COMMAND} run -h "$(hostname)" -id --privileged --name "${container_name}" "${ports}" "${volumes}" "${env}" "${image_name}" tail -f /dev/null"
eval "${CMD}"
}
#Example of usage: commit ${image_name} ${container_name}
docker.commit() {
local image_name="${1}"
local container_name="${2}"
docker commit "${container_name}" "${image_name}"
}
#Example of usage: tag ${image_name} ${new_tag}
docker.tag() {
local image_name="${1}"
local new_tag="${2}"
docker tag "${image_name}" "${image_name/:*/:${new_tag}}"
}
#Example of usage: release ${image_name}
docker.release() {
local image_name="${1}"
docker.tag "${image_name}" "latest"
}
#Example of usage: stop ${container_name}
docker.stop() {
local container_name="${1}"
docker stop "${container_name}" || true
}
#Example of usage: clean_workdir ${container_name} ${directory} ${options}
docker.chmod() {
local container_name="${1}"
local directory="${2}"
local options="${3}"
docker start ${container_name}
docker exec ${container_name} bash -c "cd ${directory}; chmod ${options} \$(ls -a | tail -n +3)"
}
#Example of usage: remove ${container_name}
docker.remove() {
local container_name="${1}"
docker rm "${container_name}" || true
}
#Example of usage: prune
docker.format() {
docker system prune --all --force
}
#Example of usage: clean_up
docker.clean_up() {
#list of all container
local -r containers_list="$(docker ps -a -q)"
#list of wrongly taged images
local -r images_list="$(docker images --format "{{.Repository}}:{{.Tag}}->{{.ID}}" | grep '<none>')"
if [[ ! -z "${containers_list}" ]]; then
#Stop containers
docker stop ${containers_list}
#Remove containers
docker rm ${containers_list}
fi
if [[ ! -z "${images_list}" ]]; then
# Delete images
for image in ${images_list}
do
docker rmi ${image/->*/} #remove by name
docker rmi ${image/*->/} #remove by id
done
fi
#Clean docker system
printf 'y' | docker system prune
}
#Script help
usage() {
cat <<EOF
Usage: $0 [options]
EOF
}
main() {
local pattern='[-a-zA-Z0-9_]*='
local i
local action=${1}; shift #assign first argument and remove from the argument list
#parse arguments
for i in "${@}"
do
local parameter_name
for parameter_name in "${PARAMETERS[@]}"
do
if [[ ${i} == "--${parameter_name}="* ]]; then
local value="${i//${pattern}/}"
eval "local ${parameter_name}=\"${value}\""
fi
done
done
if [ -z ${image_name} ]; then
local image_name="$(docker.get_image_name ${docker_registry} ${name} ${version} ${tag:-"ci"} ${engine:-"base"} ${repository_type:-"public"} ${frontend})"
fi
case "${action}" in
build)
docker.build "${image_name}" "${dockerfile_path}" "${repository_type:-"public"}" "${build_cores_number}";;
push)
docker.push "${image_name}";;
pull)
docker.pull "${image_name}";;
shell)
docker.shell "${image_name}" "${container_name}" "${volumes}" "${env}" "${ports}";;
run)
docker.run "${image_name}" "${container_name}" "${volumes}" "${env}" "${ports}" "${engine:-"base"}";;
start)
docker.start "${image_name}" "${container_name}" "${volumes}" "${env}" "${ports}" "${engine:-"base"}";;
commit)
docker.commit "${image_name}" "${container_name}";;
tag)
docker.tag "${image_name}" "${new_tag}";;
stop)
docker.stop "${container_name}";;
remove)
docker.remove "${container_name}";;
chmod)
docker.chmod "${container_name}" "${directory}" "${options}";;
format)
docker.format;;
clean_up)
docker.clean_up;;
login)
docker.login "${docker_registry}";;
release)
docker.release "${image_name}";;
*)
usage;;
esac
}
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
main "${@}"
fi
// INTEL CONFIDENTIAL
// Copyright 2017-2019 Intel Corporation All Rights Reserved.
// The source code contained or described herein and all documents related to the
// source code ("Material") are owned by Intel Corporation or its suppliers or
// licensors. Title to the Material remains with Intel Corporation or its
// suppliers and licensors. The Material may contain trade secrets and proprietary
// and confidential information of Intel Corporation and its suppliers and
// licensors, and is protected by worldwide copyright and trade secret laws and
// treaty provisions. No part of the Material may be used, copied, reproduced,
// modified, published, uploaded, posted, transmitted, distributed, or disclosed
// in any way without Intel's prior express written permission.
// No license under any patent, copyright, trade secret or other intellectual
// property right is granted to or conferred upon you by disclosure or delivery of
// the Materials, either expressly, by implication, inducement, estoppel or
// otherwise. Any license under such intellectual property rights must be express
// and approved by Intel in writing.
STAGES_STATUS_MAP = [:]
def getDockerEnvList(String projectName, String dockerContainerNamePrefix, String projectRoot = projectName) {
/**
* This method generates configuration map list using dockerfiles available in dockerfiles directory
*
* @param projectName name of the project used in paths and configuration map.
* @param dockerContainerNamePrefix docker container name prefix.
* @param projectRoot path to project root containing directory with dockerfiles to run
*/
def rawList = findFiles(glob: "${projectRoot}/dockerfiles/*.dockerfile")
def envList = []
for (int i = 0; i < rawList.size(); ++i) {
def name = rawList[i].name - '.dockerfile'
def dockerContainerName = "${dockerContainerNamePrefix}_${name}"
envList.add([name:name, // name is the only obligatory vaiable
dockerfilePath:rawList[i].path,
projectName:projectName,
dockerContainerName:dockerContainerName])
}
return envList
}
def generateMap(Closure method, configurationMaps) {
/**
* Generates map for method using configurationMaps.
*
* @param method Method that will be executed in each map(configuration).
* @param configurationMaps Map of configuration that will be parallelized.
*/
def executionMap = [:]
for (int i = 0; i < configurationMaps.size(); ++i) {
configMap = configurationMaps[i]
executionMap[configMap["name"]] = {
method(configMap)
}
}
return executionMap
}
def createStage(String stageName, Closure method, configurationMaps, force = false) {
/**
* Create pipeline stage.
*
* @param stageName Name of stage that will be create.
* @param method Method that will be executed in each map(configuration).
* @param configurationMaps Map of configuration that will be parallelized.
*/
stage(stageName) {
// Add current stage name to configurationMaps
for (int i = 0; i < configurationMaps.size(); ++i) {
configurationMaps[i]["stageName"] = stageName
}
// Fail current stage If earlier stage got aborted or failed
// unless it's executed with force argument set to true
Closure genericBodyMethod = {}
if (!force && ["FAILURE", "ABORTED"].contains(currentBuild.result)) {
genericBodyMethod = { configMap ->
println("Skipping stage due to earlier stage ${currentBuild.result}")
setConfigurationStatus(configMap["stageName"], configMap["name"], currentBuild.result)
throw new Exception("Skipped due to ${currentBuild.result} in earlier stage")
}
}
else
{
genericBodyMethod = { configMap ->
def status = "SUCCESS"
try {
method(configMap)
} catch(Exception e) {
if (e.toString().contains("FlowInterruptedException")) {
status = "ABORTED"
} else {
status = "FAILURE"
}
currentBuild.result = status
throw e
} finally {
setConfigurationStatus(configMap["stageName"], configMap["name"], status)
}
}
}
try {
def prepareEnvMap = generateMap(genericBodyMethod, configurationMaps)
parallel prepareEnvMap
} catch(Exception e) {
if (e.toString().contains("FlowInterruptedException")) {
currentBuild.result = "ABORTED"
} else {
currentBuild.result = "FAILURE"
}
}
}
}
def setConfigurationStatus(String stageName, String configurationName, String status) {
/**
* Set stage status.
*
* @param stageName The name of the stage in which the configuration is.
* @param configurationName The name of the configuration whose status will be updated.
* @param status Configuration status: SUCCESS or FAILURE.
*/
if (!STAGES_STATUS_MAP.containsKey(stageName)) {
STAGES_STATUS_MAP[stageName] = [:]
}
if (["FAILURE", "SUCCESS", "ABORTED"].contains(status.toUpperCase())) {
STAGES_STATUS_MAP[stageName][configurationName] = status.toUpperCase()
} else {
throw new Exception("Not supported status name.")
}
}
def propagateStatus(String parentStageName, String parentConfigurationName) {
/**
* Popagate status in parent configuration fails.
* This method will throw exeption "Propagating status of $parentStageName"
* if parent configuration name status is FAILURE
*
* @param parentStageName The name of the stage in which the configuration is.
* @param parentConfigurationName The name of the configuration whose status will be propagated.
*/
parentStageStatus = STAGES_STATUS_MAP[parentStageName][parentConfigurationName]
if (parentStageStatus == "FAILURE") {
throw new Exception("Propagating status of ${parentStageName}")
}
}
def showStatusMap() {
/**
* Display status map for every defined stage.
*/
echo "${STAGES_STATUS_MAP}"
}
return this
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment