diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index fc5548cd8d11be82a34076e8801c7c24b94ebcad..5b65a6c50aea2daaf3541309d269501799a98b0d 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -730,9 +730,9 @@ struct DataLayer : public Layer
         biases->set(biasesVec);
 
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
-        InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
-        ieLayer.setWeights(weights);
-        ieLayer.setBiases(biases);
+        InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
+        addConstantData("weights", weights, ieLayer);
+        addConstantData("biases", biases, ieLayer);
 #else
         InferenceEngine::LayerParams lp;
         lp.name = name;
@@ -1638,25 +1638,15 @@ struct Net::Impl
                  preferableTarget == DNN_TARGET_FPGA) && !fused)
             {
 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
-                bool hasWeights = false;
                 for (const std::string& name : {"weights", "biases"})
                 {
                     auto it = ieNode->layer.getParameters().find(name);
                     if (it != ieNode->layer.getParameters().end())
                     {
-                        InferenceEngine::Blob::CPtr bp = it->second.as<InferenceEngine::Blob::CPtr>();
-                        it->second = (InferenceEngine::Blob::CPtr)convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
-                        hasWeights = true;
+                        InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
+                        it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
                     }
                 }
-                if (!hasWeights)
-                {
-                    InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<int16_t>(
-                                                          InferenceEngine::Precision::FP16,
-                                                          InferenceEngine::Layout::C, {1});
-                    blob->allocate();
-                    ieNode->layer.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)blob;
-                }
 #else
                 auto& blobs = ieNode->layer.getConstantData();
                 if (blobs.empty())
diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp
index 522d0229bacc84b85c6bcf1f882adc93a26abf6f..4c69c247c41a3424748d790bddb4e54ab0b89965 100644
--- a/modules/dnn/src/layers/batch_norm_layer.cpp
+++ b/modules/dnn/src/layers/batch_norm_layer.cpp
@@ -350,11 +350,10 @@ public:
     {
 #ifdef HAVE_INF_ENGINE
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
-        InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
-
+        InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
         const size_t numChannels = weights_.total();
-        ieLayer.setWeights(wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C));
-        ieLayer.setBiases(wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C));
+        addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
+        addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
 #else
         InferenceEngine::LayerParams lp;
diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp
index 96336808a0ba74e088f516fa2a88e8149dfa20a8..c3a68a2a420fe02d8079bf62a850a72ae68005bd 100644
--- a/modules/dnn/src/layers/blank_layer.cpp
+++ b/modules/dnn/src/layers/blank_layer.cpp
@@ -125,7 +125,9 @@ public:
             ieLayer.getParameters()["axis"] = input->dims.size() - 1;
             ieLayer.getParameters()["out_sizes"] = input->dims[0];
         }
-        ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
+        std::vector<size_t> shape(input->dims);
+        std::reverse(shape.begin(), shape.end());
+        ieLayer.setInputPorts({InferenceEngine::Port(shape)});
         ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
 #else
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 31665d7dcc9d2317fde119f6a1ba0adf3d6f0fc5..60611b52b2490c2b8373eaba10d79a430df7d707 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -493,11 +493,11 @@ public:
         ieLayer.setGroup((size_t)group);
         ieLayer.setOutDepth((size_t)outCn);
 
-        ieLayer.setWeights(ieWeights);
+        InferenceEngine::Builder::Layer l = ieLayer;
+        addConstantData("weights", ieWeights, l);
         if (ieBiases)
-            ieLayer.setBiases(ieBiases);
+            addConstantData("biases", ieBiases, l);
 
-        InferenceEngine::Builder::Layer l = ieLayer;
         if (!padMode.empty())
             l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
 
@@ -1725,12 +1725,11 @@ public:
         ieLayer.setGroup((size_t)group);
         ieLayer.setOutDepth((size_t)numOutput);
 
-        ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW));
+        InferenceEngine::Builder::Layer l = ieLayer;
+        addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l);
         if (hasBias())
-        {
-            ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C));
-        }
-        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+            addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l);
+        return Ptr<BackendNode>(new InfEngineBackendNode(l));
 #else
         const int outGroupCn = blobs[0].size[1];  // Weights are in IOHW layout
         const int group = numOutput / outGroupCn;
diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp
index b2e0621d6d45f5da479e5adb236ab00729b96d30..a18cce6fa9123cae59a637aa158bafa380e80ef2 100644
--- a/modules/dnn/src/layers/elementwise_layers.cpp
+++ b/modules/dnn/src/layers/elementwise_layers.cpp
@@ -1134,10 +1134,10 @@ struct ChannelsPReLUFunctor
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
     InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
     {
-        InferenceEngine::Builder::PReLULayer ieLayer("");
+        InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
         const size_t numChannels = scale.total();
-        ieLayer.setWeights(wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C));
-        return ieLayer;
+        addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
+        return l;
     }
 #else
     InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp
index 3a71a872fe5eeb86cb4fadebeaad2e5a81a61b5e..dcfa7d1dacae1d446a9bd3be0bea8aaf97bfde51 100644
--- a/modules/dnn/src/layers/fully_connected_layer.cpp
+++ b/modules/dnn/src/layers/fully_connected_layer.cpp
@@ -448,11 +448,12 @@ public:
         const int outNum = blobs[0].size[0];
         ieLayer.setOutputNum(outNum);
 
-        ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW));
+        InferenceEngine::Builder::Layer l = ieLayer;
+        addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
         if (blobs.size() > 1)
-            ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C));
+            addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
 
-        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+        return Ptr<BackendNode>(new InfEngineBackendNode(l));
 #else
         InferenceEngine::LayerParams lp;
         lp.name = name;
diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp
index 8e21f116e4b21138099b2f5c793d51450ae4fb58..cf968f823fe7975d979ed7aabcda4d863822db89 100644
--- a/modules/dnn/src/layers/normalize_bbox_layer.cpp
+++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp
@@ -295,7 +295,7 @@ public:
                 l.getParameters()["channel_shared"] = blobs[0].total() == 1;
             }
 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
-            l.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)weights;
+            l.getParameters()["weights"] = weights;
 #else
             l.addConstantData("weights", weights);
 #endif
diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp
index ac11fe7adadc13fa40d9dbbffde931a73b71072a..b2907b7b8b5d377400efd4efac40a4bac5f384aa 100644
--- a/modules/dnn/src/layers/prior_box_layer.cpp
+++ b/modules/dnn/src/layers/prior_box_layer.cpp
@@ -524,12 +524,12 @@ public:
             if (_stepX == _stepY)
             {
                 l.getParameters()["step"] = _stepX;
-                l.getParameters()["step_h"] = 0.0;
-                l.getParameters()["step_w"] = 0.0;
+                l.getParameters()["step_h"] = 0.0f;
+                l.getParameters()["step_w"] = 0.0f;
             }
             else
             {
-                l.getParameters()["step"] = 0.0;
+                l.getParameters()["step"] = 0.0f;
                 l.getParameters()["step_h"] = _stepY;
                 l.getParameters()["step_w"] = _stepX;
             }
diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp
index a11fd379a2e51e1d05600848f4a61be52e598a5d..d911905d36d36d45e2617a703689694abda1ecdd 100644
--- a/modules/dnn/src/layers/scale_layer.cpp
+++ b/modules/dnn/src/layers/scale_layer.cpp
@@ -198,13 +198,13 @@ public:
     {
 #ifdef HAVE_INF_ENGINE
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
-        InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
+        InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
 
         CV_Assert(!blobs.empty());
         const size_t numChannels = blobs[0].total();
         if (hasWeights)
         {
-            ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C));
+            addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
         }
         else
         {
@@ -214,11 +214,11 @@ public:
 
             std::vector<float> ones(numChannels, 1);
             weights->set(ones);
-            ieLayer.setWeights(weights);
+            addConstantData("weights", weights, l);
         }
         if (hasBias)
-            ieLayer.setBiases(wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C));
-        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+            addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
+        return Ptr<BackendNode>(new InfEngineBackendNode(l));
 #else
         InferenceEngine::LayerParams lp;
         lp.name = name;
diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp
index a45206433705fc715232b0fd1d00cf0059431d41..ddaab41cdc994294cd421893b62f0d4b66bc1c01 100644
--- a/modules/dnn/src/op_inf_engine.cpp
+++ b/modules/dnn/src/op_inf_engine.cpp
@@ -18,6 +18,11 @@ namespace cv { namespace dnn {
 
 #ifdef HAVE_INF_ENGINE
 
+// For networks with input layer which has an empty name, IE generates a name id[some_number].
+// OpenCV lets users use an empty input name and to prevent unexpected naming,
+// we can use some predefined name.
+static std::string kDefaultInpLayerName = "empty_inp_layer_name";
+
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
 InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
     : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
@@ -90,7 +95,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
         it = layers.find(inpName);
         if (it == layers.end())
         {
-            InferenceEngine::Builder::InputLayer inpLayer(inpName);
+            InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
 
             std::vector<size_t> shape(inp->blob->dims());
             std::reverse(shape.begin(), shape.end());
@@ -119,6 +124,14 @@ void InfEngineBackendNet::init(int targetId)
         for (int id : unconnectedLayersIds)
         {
             InferenceEngine::Builder::OutputLayer outLayer("myconv1");
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
+            // Inference Engine determines network precision by ports.
+            InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
+                                            targetId == DNN_TARGET_OPENCL_FP16) ?
+                                           InferenceEngine::Precision::FP16 :
+                                           InferenceEngine::Precision::FP32;
+            outLayer.setPort(InferenceEngine::Port({}, p));
+#endif
             netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
         }
         cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
@@ -167,12 +180,56 @@ void InfEngineBackendNet::init(int targetId)
     initPlugin(cnn);
 }
 
-void InfEngineBackendNet::addLayer(const InferenceEngine::Builder::Layer& layer)
+void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
 {
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
+    // Add weights to network and connect them after input blobs.
+    std::map<std::string, InferenceEngine::Parameter>& params = layer.getParameters();
+    std::vector<int> blobsIds;
+    std::vector<int> portIds;
+    for (const std::string& name : {"weights", "biases"})
+    {
+        bool asInput = false;
+        int portId = 0;
+        for (int i = 0; i < layer.getInputPorts().size(); ++i)
+        {
+            const auto& port = layer.getInputPorts()[i];
+            auto it = port.getParameters().find("type");
+            if (it != port.getParameters().end() && it->second == name)
+            {
+                portId = i;
+                asInput = true;
+                break;
+            }
+        }
+
+        if (!asInput)
+            continue;
+
+        auto it = params.find(name);
+        if (it != params.end())
+        {
+            InferenceEngine::Blob::Ptr blob = it->second.as<InferenceEngine::Blob::Ptr>();
+            params.erase(it);
+            int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob));
+            blobsIds.push_back(blobId);
+            portIds.push_back(portId);
+        }
+    }
+#endif
+
     int id = netBuilder.addLayer(layer);
     const std::string& layerName = layer.getName();
     CV_Assert(layers.insert({layerName, id}).second);
     unconnectedLayersIds.insert(id);
+
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
+    // By default, all the weights are connected to last ports ids.
+    for (int i = 0; i < blobsIds.size(); ++i)
+    {
+        netBuilder.connect((size_t)blobsIds[i], {(size_t)id, portIds[i]});
+    }
+#endif
 }
 
 void InfEngineBackendNet::addOutput(const std::string& name)
@@ -705,7 +762,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
     {
         std::string name = wrapper->dataPtr->name;
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
-        name = name.empty() ? "id1" : name;  // TODO: drop the magic input name.
+        name = name.empty() ? kDefaultInpLayerName : name;
 #endif
         allBlobs.insert({name, wrapper->blob});
     }
@@ -776,6 +833,18 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
     return halfs;
 }
 
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
+void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
+                     InferenceEngine::Builder::Layer& l)
+{
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
+    l.getParameters()[name] = data;
+#else
+    l.addConstantData(name, data);
+#endif
+}
+#endif
+
 #endif  // HAVE_INF_ENGINE
 
 bool haveInfEngine()
diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp
index e912725296f29df17caf8d8c32290a785b7a0840..ac72c0c69c341da18f022cae6b393edc055d7ed4 100644
--- a/modules/dnn/src/op_inf_engine.hpp
+++ b/modules/dnn/src/op_inf_engine.hpp
@@ -162,7 +162,7 @@ public:
 
     InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
 
-    void addLayer(const InferenceEngine::Builder::Layer& layer);
+    void addLayer(InferenceEngine::Builder::Layer& layer);
 
     void addOutput(const std::string& name);
 
@@ -255,6 +255,10 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
 // Allocates memory for a new blob.
 InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
 
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
+void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
+#endif
+
 // This is a fake class to run networks from Model Optimizer. Objects of that
 // class simulate responses of layers are imported by OpenCV and supported by
 // Inference Engine. The main difference is that they do not perform forward pass.
diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp
index 879dd7bbf03781280b7940f56f5c66b702695d7c..92af2e94ee53dcc364a540d77b47199b4d99fbd2 100644
--- a/modules/dnn/test/test_halide_layers.cpp
+++ b/modules/dnn/test/test_halide_layers.cpp
@@ -695,7 +695,8 @@ TEST_P(Eltwise, Accuracy)
     Target targetId = get<1>(get<4>(GetParam()));
 
 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
-    if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL)
+    if (backendId == DNN_BACKEND_INFERENCE_ENGINE &&
+        (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
         throw SkipTestException("");
 #endif