Commit 8ad92a06 authored by Adam Rogowiec's avatar Adam Rogowiec

Fix compilation on CentOS and on GPU.

parent 1bc3b21a
...@@ -141,7 +141,7 @@ namespace ngraph ...@@ -141,7 +141,7 @@ namespace ngraph
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
bool get_input_forget() const { return m_input_forget; }; bool get_input_forget() const { return m_input_forget; }
private: private:
/// ///
/// \brief The input data tensor. Shape: [batch_size, input_size]. /// \brief The input data tensor. Shape: [batch_size, input_size].
......
...@@ -52,11 +52,11 @@ namespace ngraph ...@@ -52,11 +52,11 @@ namespace ngraph
const std::vector<float>& activation_alpha, const std::vector<float>& activation_alpha,
const std::vector<float>& activation_beta); const std::vector<float>& activation_beta);
std::size_t get_hidden_size() const { return m_hidden_size; }; std::size_t get_hidden_size() const { return m_hidden_size; }
float get_clip() const { return m_clip; }; float get_clip() const { return m_clip; }
const std::vector<std::string>& get_activations() const { return m_activations; }; const std::vector<std::string>& get_activations() const { return m_activations; }
const std::vector<float>& get_activation_alpha() const { return m_activation_alpha; }; const std::vector<float>& get_activation_alpha() const { return m_activation_alpha; }
const std::vector<float>& get_activation_beta() const { return m_activation_beta; }; const std::vector<float>& get_activation_beta() const { return m_activation_beta; }
protected: protected:
/// ///
/// \brief Constructs activation function object. /// \brief Constructs activation function object.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment