Unverified Commit 4a861b77 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

update unit tests (#2444)

parent 1b52a67f
......@@ -55,7 +55,8 @@ int main()
t_c->write(&v_c, 0, sizeof(v_c));
// Invoke the function
backend->call(f, {t_result}, {t_a, t_b, t_c});
auto exec = backend->compile(f);
exec->call({t_result}, {t_a, t_b, t_c});
// Get the result
float r[2][3];
......
......@@ -54,7 +54,8 @@ int main()
t_c->write(&v_c, 0, sizeof(v_c));
// Invoke the function
backend->call(f, {t_result}, {t_a, t_b, t_c});
auto exec = backend->compile(f);
exec->call({t_result}, {t_a, t_b, t_c});
// Get the result
float r[2][3];
......
......@@ -73,8 +73,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
}
float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend,
std::shared_ptr<Function> function,
std::shared_ptr<runtime::Executable> exec,
const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::Tensor>& t_softmax,
......@@ -96,8 +95,7 @@ float test_accuracy(MNistDataLoader& loader,
t_Y->write(loader.get_label_floats(),
0,
loader.get_label_batch_size() * sizeof(float));
backend->call(
function, {t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y);
acc_count += acc;
sample_count += batch_size;
......@@ -236,6 +234,7 @@ int main(int argc, char* argv[])
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map);
auto train_exec = backend->compile(train_function);
// Plain inference
// X, W0, b0, W1, b1 -> softmax
......@@ -243,6 +242,7 @@ int main(int argc, char* argv[])
auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map);
auto inference_exec = backend->compile(inference_function);
set_scalar(t_learning_rate, .03f);
......@@ -256,8 +256,7 @@ int main(int argc, char* argv[])
t_Y->write(train_loader.get_label_floats(),
0,
train_loader.get_label_batch_size() * sizeof(float));
backend->call(
train_function,
train_exec->call(
{t_loss,
t_softmax,
t_W0_next,
......@@ -274,17 +273,15 @@ int main(int argc, char* argv[])
if (train_loader.get_epoch() != last_epoch)
{
last_epoch = train_loader.get_epoch();
std::cout << "Test accuracy: "
<< test_accuracy(test_loader,
backend,
inference_function,
t_X,
t_Y,
t_softmax,
t_W0,
t_b0,
t_W1,
t_b1)
std::cout << "Test accuracy: " << test_accuracy(test_loader,
inference_exec,
t_X,
t_Y,
t_softmax,
t_W0,
t_b0,
t_W1,
t_b1)
<< std::endl;
}
}
......
......@@ -72,8 +72,7 @@ size_t accuracy_count(const std::shared_ptr<runtime::Tensor>& t_softmax,
}
float test_accuracy(MNistDataLoader& loader,
std::shared_ptr<runtime::Backend> backend,
std::shared_ptr<Function> function,
std::shared_ptr<runtime::Executable> exec,
const std::shared_ptr<runtime::Tensor>& t_X,
const std::shared_ptr<runtime::Tensor>& t_Y,
const std::shared_ptr<runtime::Tensor>& t_softmax,
......@@ -95,8 +94,7 @@ float test_accuracy(MNistDataLoader& loader,
t_Y->write(loader.get_label_floats(),
0,
loader.get_label_batch_size() * sizeof(float));
backend->call(
function, {t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y);
acc_count += acc;
sample_count += batch_size;
......@@ -223,6 +221,7 @@ int main(int argc, const char* argv[])
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map);
auto train_exec = backend->compile(train_function);
// Plain inference
// X, W0, b0, W1, b1 -> softmax
......@@ -230,6 +229,7 @@ int main(int argc, const char* argv[])
auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map);
auto inference_exe = backend->compile(inference_function);
set_scalar(t_learning_rate, .03f);
......@@ -243,8 +243,7 @@ int main(int argc, const char* argv[])
t_Y->write(train_loader.get_label_floats(),
0,
train_loader.get_label_batch_size() * sizeof(float));
backend->call(
train_function,
train_exec->call(
{t_loss,
t_softmax,
t_W0_next,
......@@ -261,17 +260,15 @@ int main(int argc, const char* argv[])
if (train_loader.get_epoch() != last_epoch)
{
last_epoch = train_loader.get_epoch();
std::cout << "Test accuracy: "
<< test_accuracy(test_loader,
backend,
inference_function,
t_X,
t_Y,
t_softmax,
t_W0,
t_b0,
t_W1,
t_b1)
std::cout << "Test accuracy: " << test_accuracy(test_loader,
exec,
t_X,
t_Y,
t_softmax,
t_W0,
t_b0,
t_W1,
t_b1)
<< std::endl;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment