Commit e099c399 authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of RNN layer

parent 893a5659
...@@ -49,10 +49,10 @@ namespace dnn ...@@ -49,10 +49,10 @@ namespace dnn
{ {
//! LSTM recurrent layer //! LSTM recurrent layer
CV_EXPORTS class LSTMLayer : public Layer class LSTMLayer : public Layer
{ {
public: public:
CV_EXPORTS static Ptr<LSTMLayer> create(); CV_EXPORTS_W static Ptr<LSTMLayer> create();
/** Set trained weights for LSTM layer. /** Set trained weights for LSTM layer.
LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights. LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights.
...@@ -91,15 +91,15 @@ namespace dnn ...@@ -91,15 +91,15 @@ namespace dnn
@param output computed outputs: h_t and c_t. @param output computed outputs: h_t and c_t.
*/ */
CV_EXPORTS void forward(std::vector<Blob*> &input, std::vector<Blob> &output); CV_EXPORTS_W void forward(std::vector<Blob*> &input, std::vector<Blob> &output);
}; };
//! Classical recurrent layer //! Classical recurrent layer
CV_EXPORTS class RNNLayer : public Layer class RNNLayer : public Layer
{ {
public: public:
CV_EXPORTS Ptr<RNNLayer> create(); CV_EXPORTS_W static Ptr<RNNLayer> create();
/** Setups learned weights. /** Setups learned weights.
...@@ -113,16 +113,17 @@ namespace dnn ...@@ -113,16 +113,17 @@ namespace dnn
@param Who is W_xo matrix @param Who is W_xo matrix
@param bo is b_o vector @param bo is b_o vector
*/ */
CV_EXPORTS virtual void setWeights(const Blob &Whh, const Blob &Wxh, const Blob &bh, const Blob &Who, const Blob &bo) = 0; CV_EXPORTS_W virtual void setWeights(const Blob &Whh, const Blob &Wxh, const Blob &bh, const Blob &Who, const Blob &bo) = 0;
/** Accept two inputs x_t and h_{t-1} and compute two outputs o_t and h_t. /** Accepts two inputs x_t and h_{t-1} and compute two outputs o_t and h_t.
@param input should contain x_t and h_{t-1} @param input could contain inputs x_t and h_{t-1}. x_t is required whereas h_{t-1} is optional.
@param output should contain o_t and h_t If the second input h_{t-1} isn't specified a layer will use internal h_{t-1} from the previous calls, at the first call h_{t-1} will be filled by zeros.
@param output should contain outputs o_t and h_t
*/ */
virtual void forward(std::vector<Blob*> &input, std::vector<Blob> &output); void forward(std::vector<Blob*> &input, std::vector<Blob> &output);
}; };
} }
} }
#endif #endif
\ No newline at end of file
...@@ -122,6 +122,7 @@ public: ...@@ -122,6 +122,7 @@ public:
Mat ep, em; Mat ep, em;
void tanh(Mat &x, Mat &d) void tanh(Mat &x, Mat &d)
{ {
//TODO: two exp() is bad idea
cv::exp(-x, em); cv::exp(-x, em);
cv::exp( x, ep); cv::exp( x, ep);
cv::divide(ep - em, ep + em, d); cv::divide(ep - em, ep + em, d);
...@@ -183,16 +184,18 @@ Ptr<LSTMLayer> LSTMLayer::create() ...@@ -183,16 +184,18 @@ Ptr<LSTMLayer> LSTMLayer::create()
return Ptr<LSTMLayer>(new LSTMLayerImpl()); return Ptr<LSTMLayer>(new LSTMLayerImpl());
} }
void LSTMLayer::forward(std::vector<Blob*> &input, std::vector<Blob> &output) void LSTMLayer::forward(std::vector<Blob*>&, std::vector<Blob>&)
{ {
CV_Error(Error::StsNotImplemented, "This function should be unreached"); CV_Error(Error::StsInternal, "This function should be unreached");
} }
class RNNLayerImpl : public RNNLayer class RNNLayerImpl : public RNNLayer
{ {
int nX, nH; int nX, nH, nO, nSamples;
Mat Whh, Wxh, bh; Mat Whh, Wxh, bh;
Mat Who, bo; Mat Who, bo;
Mat hPrevInternal, dummyBiasOnes;
public: public:
...@@ -201,36 +204,114 @@ public: ...@@ -201,36 +204,114 @@ public:
type = "RNN"; type = "RNN";
} }
void setWeights(const Blob &Whh, const Blob &Wxh, const Blob &bh, const Blob &Who, const Blob &bo) void setWeights(const Blob &W_hh, const Blob &W_xh, const Blob &b_h, const Blob &W_ho, const Blob &b_o)
{ {
CV_Assert(Whh.dims() == 2 && Wxh.dims() == 2); CV_Assert(W_hh.dims() == 2 && W_xh.dims() == 2);
CV_Assert(Whh.size(0) == Wxh.size(0) && Whh.size(0) == Whh.size(1) && bh.total() == Wxh.size(0)); CV_Assert(W_hh.size(0) == W_xh.size(0) && W_hh.size(0) == W_hh.size(1) && b_h.total() == W_xh.size(0));
CV_Assert(Who.size(0) == bo.total()); CV_Assert(W_ho.size(0) == b_o.total());
CV_Assert(Who.size(1) == Whh.size(1)); CV_Assert(W_ho.size(1) == W_hh.size(1));
//TODO: Check type
blobs.reserve(5);
blobs[0] = Whh; blobs.resize(5);
blobs[1] = Wxh; blobs[0] = W_hh;
blobs[2] = bh; blobs[1] = W_xh;
blobs[3] = Who; blobs[2] = b_h;
blobs[4] = bo; blobs[3] = W_ho;
blobs[4] = b_o;
} }
void allocate(const std::vector<Blob*> &input, std::vector<Blob> &output) void allocate(const std::vector<Blob*> &input, std::vector<Blob> &output)
{ {
CV_Assert(input.size() >= 1 && input.size() <= 2);
Whh = blobs[0].matRefConst();
Wxh = blobs[1].matRefConst();
bh = blobs[2].matRefConst();
Who = blobs[3].matRefConst();
bo = blobs[4].matRefConst();
nH = Wxh.rows;
nX = Wxh.cols;
nO = Who.rows;
CV_Assert(input[0]->size(-1) == Wxh.cols);
nSamples = input[0]->total(0, input[0]->dims() - 1);
BlobShape xShape = input[0]->shape();
BlobShape hShape = xShape;
BlobShape oShape = xShape;
hShape[-1] = nH;
oShape[-1] = nO;
if (input.size() == 2)
{
CV_Assert(input[1]->shape() == hShape);
} }
else
void tanh(Mat &x)
{ {
hPrevInternal.create(nSamples, nH, input[0]->type());
hPrevInternal.setTo(0);
}
output.resize(2);
output[0].create(oShape, input[0]->type());
output[1].create(hShape, input[0]->type());
dummyBiasOnes.create(nSamples, 1, bh.type());
dummyBiasOnes.setTo(1);
bh = bh.reshape(1, 1); //is 1 x nH mat
bo = bo.reshape(1, 1); //is 1 x nO mat
} }
void forward(std::vector<Blob*> &input, std::vector<Blob> &output) //in-place tanh function
static void tanh(Mat &x) // 2 / (1 + e^(-2x)) - 1
{ {
x.convertTo(x, x.type(), -2); // -2x
cv::exp(x, x); // e^(-2x)
x.convertTo(x, x.type(), 1, 1); // 1 + e^(-2x)
cv::pow(x, -1, x); // 1 / (1 + e^(-2x))
x.convertTo(x, x.type(), 2, -1);// 2 / (1 + e^(-2x)) - 1
}
void forward(std::vector<Blob*> &input, std::vector<Blob> &output)
{
Mat xCurr = input[0]->matRefConst();
Mat hPrev = (input.size() >= 2) ? input[1]->matRefConst() : hPrevInternal;
Mat oCurr = output[0].matRef();
Mat hCurr = output[1].matRef();
//TODO: Check types
int xsz[] = {nSamples, nX};
int hsz[] = {nSamples, nH};
int osz[] = {nSamples, nO};
if (xCurr.dims != 2) xCurr = xCurr.reshape(1, 2, xsz);
if (hPrev.dims != 2) hPrev = hPrev.reshape(1, 2, hsz);
if (oCurr.dims != 2) oCurr = oCurr.reshape(1, 2, osz);
if (hCurr.dims != 2) hCurr = hCurr.reshape(1, 2, hsz);
gemmCPU(hPrev, Whh, 1, hCurr, 0, GEMM_2_T); // W_{hh} * h_{prev}
gemmCPU(xCurr, Wxh, 1, hCurr, 1, GEMM_2_T); //+W_{xh} * x_{curr}
gemmCPU(dummyBiasOnes, bh, 1, hCurr, 1); //+bh
tanh(hCurr);
gemmCPU(hPrev, Who, 1, oCurr, 0, GEMM_2_T); // W_{ho} * h_{prev}
gemmCPU(dummyBiasOnes, bo, 1, oCurr, 1); //+b_o
tanh(oCurr);
if (input.size() < 2) //save h_{prev}
hCurr.copyTo(hPrevInternal);
} }
}; };
void RNNLayer::forward(std::vector<Blob*>&, std::vector<Blob>&)
{
CV_Error(Error::StsInternal, "This function should be unreached");
}
CV_EXPORTS_W Ptr<RNNLayer> RNNLayer::create()
{
return Ptr<RNNLayer>(new RNNLayerImpl());
}
} }
} }
\ No newline at end of file
...@@ -180,10 +180,9 @@ class Layer_LSTM_Test : public ::testing::Test ...@@ -180,10 +180,9 @@ class Layer_LSTM_Test : public ::testing::Test
public: public:
int Nx, Nc; int Nx, Nc;
Blob Wh, Wx, b; Blob Wh, Wx, b;
Ptr<LSTMLayer> lstm; Ptr<LSTMLayer> layer;
std::vector<Blob> inputs; std::vector<Blob> inputs, outputs;
std::vector<Blob> outputs;
std::vector<Blob*> inputsPtr; std::vector<Blob*> inputsPtr;
Layer_LSTM_Test(int _Nx = 31, int _Nc = 100) Layer_LSTM_Test(int _Nx = 31, int _Nc = 100)
...@@ -195,8 +194,8 @@ public: ...@@ -195,8 +194,8 @@ public:
Wx = Blob(BlobShape(Vec2i(4 * Nc, Nx))); Wx = Blob(BlobShape(Vec2i(4 * Nc, Nx)));
b = Blob(BlobShape(Vec2i(4 * Nc, 1))); b = Blob(BlobShape(Vec2i(4 * Nc, 1)));
lstm = LSTMLayer::create(); layer = LSTMLayer::create();
lstm->setWeights(Wh, Wx, b); layer->setWeights(Wh, Wx, b);
} }
void allocateAndForward() void allocateAndForward()
...@@ -205,8 +204,8 @@ public: ...@@ -205,8 +204,8 @@ public:
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
inputsPtr.push_back(&inputs[i]); inputsPtr.push_back(&inputs[i]);
lstm->allocate(inputsPtr, outputs); layer->allocate(inputsPtr, outputs);
lstm->forward(inputsPtr, outputs); layer->forward(inputsPtr, outputs);
} }
}; };
...@@ -232,4 +231,63 @@ TEST_F(Layer_LSTM_Test, BasicTest_2) ...@@ -232,4 +231,63 @@ TEST_F(Layer_LSTM_Test, BasicTest_2)
EXPECT_EQ(outputs[1].shape(), BlobShape(1, 2, 3, Nc)); EXPECT_EQ(outputs[1].shape(), BlobShape(1, 2, 3, Nc));
} }
class Layer_RNN_Test : public ::testing::Test
{
public:
int Nx, Nh, No;
Blob Whh, Wxh, bh, Who, bo;
Ptr<RNNLayer> layer;
std::vector<Blob> inputs, outputs;
std::vector<Blob*> inputsPtr;
Layer_RNN_Test(int _Nx = 31, int _Nh = 64, int _No = 100)
{
Nx = _Nx;
Nh = _Nh;
No = _No;
Whh = Blob(BlobShape(Vec2i(Nh, Nh)));
Wxh = Blob(BlobShape(Vec2i(Nh, Nx)));
bh = Blob(BlobShape(Vec2i(Nh, 1)));
Who = Blob(BlobShape(Vec2i(No, Nh)));
bo = Blob(BlobShape(Vec2i(No, 1)));
layer = RNNLayer::create();
layer->setWeights(Whh, Wxh, bh, Who, bo);
}
void allocateAndForward()
{
inputsPtr.clear();
for (size_t i = 0; i < inputs.size(); i++)
inputsPtr.push_back(&inputs[i]);
layer->allocate(inputsPtr, outputs);
layer->forward(inputsPtr, outputs);
}
};
TEST_F(Layer_RNN_Test, BasicTest_1)
{
inputs.push_back(Blob(BlobShape(1, 2, 3, Nx)));
allocateAndForward();
EXPECT_EQ(outputs.size(), 2);
EXPECT_EQ(outputs[0].shape(), BlobShape(1, 2, 3, No));
EXPECT_EQ(outputs[1].shape(), BlobShape(1, 2, 3, Nh));
}
TEST_F(Layer_RNN_Test, BasicTest_2)
{
inputs.push_back(Blob(BlobShape(1, 2, 3, Nx)));
inputs.push_back(Blob(BlobShape(1, 2, 3, Nh)));
allocateAndForward();
EXPECT_EQ(outputs.size(), 2);
EXPECT_EQ(outputs[0].shape(), BlobShape(1, 2, 3, No));
EXPECT_EQ(outputs[1].shape(), BlobShape(1, 2, 3, Nh));
}
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment