Unverified Commit 4a861b77 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

update unit tests (#2444)

parent 1b52a67f
...@@ -55,7 +55,8 @@ int main() ...@@ -55,7 +55,8 @@ int main()
t_c->write(&v_c, 0, sizeof(v_c)); t_c->write(&v_c, 0, sizeof(v_c));
// Invoke the function // Invoke the function
backend->call(f, {t_result}, {t_a, t_b, t_c}); auto exec = backend->compile(f);
exec->call({t_result}, {t_a, t_b, t_c});
// Get the result // Get the result
float r[2][3]; float r[2][3];
......
...@@ -54,7 +54,8 @@ int main() ...@@ -54,7 +54,8 @@ int main()
t_c->write(&v_c, 0, sizeof(v_c)); t_c->write(&v_c, 0, sizeof(v_c));
// Invoke the function // Invoke the function
backend->call(f, {t_result}, {t_a, t_b, t_c}); auto exec = backend->compile(f);
exec->call({t_result}, {t_a, t_b, t_c});
// Get the result // Get the result
float r[2][3]; float r[2][3];
......
...@@ -73,8 +73,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax, ...@@ -73,8 +73,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
} }
float test_accuracy(MNistDataLoader& loader, float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend, std::shared_ptr<runtime::Executable> exec,
std::shared_ptr<Function> function,
const std::shared_ptr<runtime::Tensor>& t_X, const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::Tensor>& t_Y, const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::Tensor>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_softmax,
...@@ -96,8 +95,7 @@ float test_accuracy(MNistDataLoader& loader, ...@@ -96,8 +95,7 @@ float test_accuracy(MNistDataLoader& loader,
t_Y->write(loader.get_label_floats(), t_Y->write(loader.get_label_floats(),
0, 0,
loader.get_label_batch_size() * sizeof(float)); loader.get_label_batch_size() * sizeof(float));
backend->call( exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
function, {t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y); size_t acc = accuracy_count(t_softmax, t_Y);
acc_count += acc; acc_count += acc;
sample_count += batch_size; sample_count += batch_size;
...@@ -236,6 +234,7 @@ int main(int argc, char* argv[]) ...@@ -236,6 +234,7 @@ int main(int argc, char* argv[])
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next}, NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}), ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map); train_node_map);
auto train_exec = backend->compile(train_function);
// Plain inference // Plain inference
// X, W0, b0, W1, b1 -> softmax // X, W0, b0, W1, b1 -> softmax
...@@ -243,6 +242,7 @@ int main(int argc, char* argv[]) ...@@ -243,6 +242,7 @@ int main(int argc, char* argv[])
auto inference_function = clone_function( auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}), Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map); inference_node_map);
auto inference_exec = backend->compile(inference_function);
set_scalar(t_learning_rate, .03f); set_scalar(t_learning_rate, .03f);
...@@ -256,8 +256,7 @@ int main(int argc, char* argv[]) ...@@ -256,8 +256,7 @@ int main(int argc, char* argv[])
t_Y->write(train_loader.get_label_floats(), t_Y->write(train_loader.get_label_floats(),
0, 0,
train_loader.get_label_batch_size() * sizeof(float)); train_loader.get_label_batch_size() * sizeof(float));
backend->call( train_exec->call(
train_function,
{t_loss, {t_loss,
t_softmax, t_softmax,
t_W0_next, t_W0_next,
...@@ -274,17 +273,15 @@ int main(int argc, char* argv[]) ...@@ -274,17 +273,15 @@ int main(int argc, char* argv[])
if (train_loader.get_epoch() != last_epoch) if (train_loader.get_epoch() != last_epoch)
{ {
last_epoch = train_loader.get_epoch(); last_epoch = train_loader.get_epoch();
std::cout << "Test accuracy: " std::cout << "Test accuracy: " << test_accuracy(test_loader,
<< test_accuracy(test_loader, inference_exec,
backend, t_X,
inference_function, t_Y,
t_X, t_softmax,
t_Y, t_W0,
t_softmax, t_b0,
t_W0, t_W1,
t_b0, t_b1)
t_W1,
t_b1)
<< std::endl; << std::endl;
} }
} }
......
...@@ -72,8 +72,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax, ...@@ -72,8 +72,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
} }
float test_accuracy(MNistDataLoader& loader, float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend, std::shared_ptr<runtime::Executable> exec,
std::shared_ptr<Function> function,
const std::shared_ptr<runtime::Tensor>& t_X, const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::Tensor>& t_Y, const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::Tensor>& t_softmax, const std::shared_ptr<runtime::Tensor>& t_softmax,
...@@ -95,8 +94,7 @@ float test_accuracy(MNistDataLoader& loader, ...@@ -95,8 +94,7 @@ float test_accuracy(MNistDataLoader& loader,
t_Y->write(loader.get_label_floats(), t_Y->write(loader.get_label_floats(),
0, 0,
loader.get_label_batch_size() * sizeof(float)); loader.get_label_batch_size() * sizeof(float));
backend->call( exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
function, {t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y); size_t acc = accuracy_count(t_softmax, t_Y);
acc_count += acc; acc_count += acc;
sample_count += batch_size; sample_count += batch_size;
...@@ -223,6 +221,7 @@ int main(int argc, const char* argv[]) ...@@ -223,6 +221,7 @@ int main(int argc, const char* argv[])
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next}, NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}), ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map); train_node_map);
auto train_exec = backend->compile(train_function);
// Plain inference // Plain inference
// X, W0, b0, W1, b1 -> softmax // X, W0, b0, W1, b1 -> softmax
...@@ -230,6 +229,7 @@ int main(int argc, const char* argv[]) ...@@ -230,6 +229,7 @@ int main(int argc, const char* argv[])
auto inference_function = clone_function( auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}), Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map); inference_node_map);
auto inference_exe = backend->compile(inference_function);
set_scalar(t_learning_rate, .03f); set_scalar(t_learning_rate, .03f);
...@@ -243,8 +243,7 @@ int main(int argc, const char* argv[]) ...@@ -243,8 +243,7 @@ int main(int argc, const char* argv[])
t_Y->write(train_loader.get_label_floats(), t_Y->write(train_loader.get_label_floats(),
0, 0,
train_loader.get_label_batch_size() * sizeof(float)); train_loader.get_label_batch_size() * sizeof(float));
backend->call( train_exec->call(
train_function,
{t_loss, {t_loss,
t_softmax, t_softmax,
t_W0_next, t_W0_next,
...@@ -261,17 +260,15 @@ int main(int argc, const char* argv[]) ...@@ -261,17 +260,15 @@ int main(int argc, const char* argv[])
if (train_loader.get_epoch() != last_epoch) if (train_loader.get_epoch() != last_epoch)
{ {
last_epoch = train_loader.get_epoch(); last_epoch = train_loader.get_epoch();
std::cout << "Test accuracy: " std::cout << "Test accuracy: " << test_accuracy(test_loader,
<< test_accuracy(test_loader, exec,
backend, t_X,
inference_function, t_Y,
t_X, t_softmax,
t_Y, t_W0,
t_softmax, t_b0,
t_W0, t_W1,
t_b0, t_b1)
t_W1,
t_b1)
<< std::endl; << std::endl;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment