Commit 47c5ee5d authored by Dmitry Kurtaev's avatar Dmitry Kurtaev Committed by LaurentBerger

Fixes for OpenCV face detection network

parent 97681d06
...@@ -154,8 +154,13 @@ if args.proto and args.model: ...@@ -154,8 +154,13 @@ if args.proto and args.model:
top = int(out[0, 0, i, 4] * img.shape[0]) top = int(out[0, 0, i, 4] * img.shape[0])
right = int(out[0, 0, i, 5] * img.shape[1]) right = int(out[0, 0, i, 5] * img.shape[1])
bottom = int(out[0, 0, i, 6] * img.shape[0]) bottom = int(out[0, 0, i, 6] * img.shape[0])
addDetection(detections, imageId, left, top, width=right - left + 1,
height=bottom - top + 1, score=confidence) x = max(0, min(left, img.shape[1] - 1))
y = max(0, min(top, img.shape[0] - 1))
w = max(0, min(right - x + 1, img.shape[1] - x))
h = max(0, min(bottom - y + 1, img.shape[0] - y))
addDetection(detections, imageId, x, y, w, h, score=confidence)
elif args.cascade: elif args.cascade:
cascade = cv.CascadeClassifier(args.cascade) cascade = cv.CascadeClassifier(args.cascade)
......
...@@ -173,9 +173,9 @@ conv7_2_h = tf.space_to_batch_nd(conv7_1_h, [1, 1], [[1, 1], [1, 1]], name='Pad_ ...@@ -173,9 +173,9 @@ conv7_2_h = tf.space_to_batch_nd(conv7_1_h, [1, 1], [[1, 1], [1, 1]], name='Pad_
conv7_2_h = conv(conv7_2_h, stride=2, pad='VALID', name='conv7_2_h', activ=tf.nn.relu) conv7_2_h = conv(conv7_2_h, stride=2, pad='VALID', name='conv7_2_h', activ=tf.nn.relu)
conv8_1_h = conv(conv7_2_h, pad='SAME', name='conv8_1_h', activ=tf.nn.relu) conv8_1_h = conv(conv7_2_h, pad='SAME', name='conv8_1_h', activ=tf.nn.relu)
conv8_2_h = conv(conv8_1_h, pad='SAME', name='conv8_2_h', activ=tf.nn.relu) conv8_2_h = conv(conv8_1_h, pad='VALID', name='conv8_2_h', activ=tf.nn.relu)
conv9_1_h = conv(conv8_2_h, 'conv9_1_h', activ=tf.nn.relu) conv9_1_h = conv(conv8_2_h, 'conv9_1_h', activ=tf.nn.relu)
conv9_2_h = conv(conv9_1_h, pad='SAME', name='conv9_2_h', activ=tf.nn.relu) conv9_2_h = conv(conv9_1_h, pad='VALID', name='conv9_2_h', activ=tf.nn.relu)
conv4_3_norm = l2norm(layer_256_1_relu1, 'conv4_3_norm') conv4_3_norm = l2norm(layer_256_1_relu1, 'conv4_3_norm')
......
...@@ -198,7 +198,7 @@ public: ...@@ -198,7 +198,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized && !_clip); (backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized);
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
...@@ -936,6 +936,7 @@ public: ...@@ -936,6 +936,7 @@ public:
InferenceEngine::Builder::Layer l = ieLayer; InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["eta"] = std::string("1.0"); l.getParameters()["eta"] = std::string("1.0");
l.getParameters()["clip"] = _clip;
return Ptr<BackendNode>(new InfEngineBackendNode(l)); return Ptr<BackendNode>(new InfEngineBackendNode(l));
} }
......
...@@ -1784,7 +1784,7 @@ layer { ...@@ -1784,7 +1784,7 @@ layer {
} }
code_type: CENTER_SIZE code_type: CENTER_SIZE
keep_top_k: 200 keep_top_k: 200
confidence_threshold: 0.2 confidence_threshold: 0.01
clip: 1 clip: 1
} }
} }
...@@ -1221,7 +1221,7 @@ node { ...@@ -1221,7 +1221,7 @@ node {
attr { attr {
key: "padding" key: "padding"
value { value {
s: "SAME" s: "VALID"
} }
} }
attr { attr {
...@@ -1311,7 +1311,7 @@ node { ...@@ -1311,7 +1311,7 @@ node {
attr { attr {
key: "padding" key: "padding"
value { value {
s: "SAME" s: "VALID"
} }
} }
attr { attr {
...@@ -2337,6 +2337,12 @@ node { ...@@ -2337,6 +2337,12 @@ node {
i: 400 i: 400
} }
} }
attr {
key: "clip"
value {
b: true
}
}
} }
node { node {
name: "reshape_before_softmax" name: "reshape_before_softmax"
......
...@@ -917,7 +917,7 @@ layer { ...@@ -917,7 +917,7 @@ layer {
} }
convolution_param { convolution_param {
num_output: 128 num_output: 128
pad: 1 pad: 0
kernel_size: 3 kernel_size: 3
stride: 1 stride: 1
weight_filler { weight_filler {
...@@ -983,7 +983,7 @@ layer { ...@@ -983,7 +983,7 @@ layer {
} }
convolution_param { convolution_param {
num_output: 128 num_output: 128
pad: 1 pad: 0
kernel_size: 3 kernel_size: 3
stride: 1 stride: 1
weight_filler { weight_filler {
...@@ -1810,6 +1810,7 @@ layer { ...@@ -1810,6 +1810,7 @@ layer {
code_type: CENTER_SIZE code_type: CENTER_SIZE
keep_top_k: 200 keep_top_k: 200
confidence_threshold: 0.01 confidence_threshold: 0.01
clip: 1
} }
} }
layer { layer {
......
...@@ -1086,7 +1086,7 @@ layer { ...@@ -1086,7 +1086,7 @@ layer {
} }
convolution_param { convolution_param {
num_output: 128 num_output: 128
pad: 1 pad: 0
kernel_size: 3 kernel_size: 3
stride: 1 stride: 1
weight_filler { weight_filler {
...@@ -1600,7 +1600,7 @@ layer { ...@@ -1600,7 +1600,7 @@ layer {
} }
convolution_param { convolution_param {
num_output: 16 num_output: 16
pad: 1 pad: 0
kernel_size: 3 kernel_size: 3
stride: 1 stride: 1
weight_filler { weight_filler {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment