#include <math.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "mnist_common.h"
using std::vector;
floataccuracy(
const array&predicted,
const array&target) {
arrayval, plabels, tlabels;
max(val, tlabels, target, 1);
max(val, plabels, predicted, 1);
return100 * count<float>(plabels == tlabels) / tlabels.
elements();
}
arrayderiv(
const array&out) {
returnout * (1 - out); }
doubleerror(
const array&out,
const array&pred) {
arraydif = (out - pred);
return sqrt((
double)(sum<float>(dif * dif)));
}
}
class rbm {
private:
public:
rbm(int v_size, int h_size)
: weights(
randu(h_size, v_size) / 100.f)
}
voidtrain(
const array&in,
doublelr,
intnum_epochs,
intbatch_size,
bool verbose) {
const intnum_samples = in.
dims(0);
const int num_batches = num_samples / batch_size;
for (int i = 0; i < num_epochs; i++) {
double err = 0;
for (int j = 0; j < num_batches - 1; j++) {
int st = j * batch_size;
int en = std::min(num_samples - 1, st + batch_size - 1);
int num = en - st + 1;
arrayh_pos = sigmoid_binary(
tile(h_bias, num) +
sigmoid_binary(
tile(v_bias, num) +
matmul(h_pos, weights));
arrayh_neg = sigmoid_binary(
tile(h_bias, num) +
arraydelta_w = lr * (c_pos - c_neg) / num;
arraydelta_vb = lr *
sum(v_pos - v_neg) / num;
arraydelta_hb = lr *
sum(h_pos - h_neg) / num;
weights += delta_w;
v_bias += delta_vb;
h_bias += delta_hb;
if (verbose) { err += error(v_pos, v_neg); }
}
if (verbose) {
printf("Epoch %d: Reconstruction error: %0.4f\n", i + 1,
err / num_batches);
}
}
}
}
};
class dbn {
private:
const int in_size;
const int out_size;
const int num_hidden;
const int num_total;
std::vector<array> weights;
std::vector<int> hidden;
}
vector<array> forward_propagate(
const array&input) {
vector<array> signal(num_total);
signal[0] = input;
for (int i = 0; i < num_total - 1; i++) {
arrayin = add_bias(signal[i]);
}
return signal;
}
voidback_propagate(
constvector<array> signal,
const array&target,
const double &alpha) {
arrayout = signal[num_total - 1];
arrayerr = (out - target);
intm = target.
dims(0);
for (int i = num_total - 2; i >= 0; i--) {
arrayin = add_bias(signal[i]);
arraydelta = (deriv(out) * err).T();
out = signal[i];
err = err(span,
seq(1, out.
dims(1)));
}
}
public:
dbn(const int in_sz, const int out_sz, const std::vector<int> hidden_layers)
: in_size(in_sz)
, out_size(out_sz)
, num_hidden(hidden_layers.size())
, num_total(hidden_layers.size() + 2)
, weights(hidden_layers.size() + 1)
, hidden(hidden_layers) {}
voidtrain(
const array&input,
const array&target,
doublelr_rbm = 1.0,
double lr_nn = 1.0, const int epochs_rbm = 15,
const int epochs_nn = 300, const int batch_size = 100,
double maxerr = 1.0, bool verbose = false) {
for (int i = 0; i < num_hidden; i++) {
if (verbose) { printf("Training Hidden Layer %d\n", i); }
int visible = (i == 0) ? in_size : hidden[i - 1];
rbm r(visible, hidden[i]);
r.train(X, lr_rbm, epochs_rbm, batch_size, verbose);
X = r.prop_up(X);
weights[i] = r.get_weights();
if (verbose) { printf("\n"); }
}
weights[num_hidden] =
0.05 *
randu(hidden[num_hidden - 1] + 1, out_size) - 0.0025;
const intnum_samples = input.
dims(0);
const int num_batches = num_samples / batch_size;
for (int i = 0; i < epochs_nn; i++) {
for (int j = 0; j < num_batches; j++) {
int st = j * batch_size;
int en = std::min(num_samples - 1, st + batch_size - 1);
vector<array> signals = forward_propagate(x);
arrayout = signals[num_total - 1];
back_propagate(signals, y, lr_nn);
}
int st = (num_batches - 1) * batch_size;
int en = num_samples - 1;
arrayout = predict(input(
seq(st, en), span));
doubleerr = error(out, target(
seq(st, en), span));
if (err < maxerr) {
printf("Converged on Epoch: %4d\n", i + 1);
return;
}
if (verbose) {
if ((i + 1) % 10 == 0)
printf("Epoch: %4d, Error: %0.4f\n", i + 1, err);
}
}
}
vector<array> signal = forward_propagate(input);
arrayout = signal[num_total - 1];
return out;
}
};
int dbn_demo(bool console, int perc) {
printf("** ArrayFire DBN Demo **\n\n");
arraytrain_images, test_images;
arraytrain_target, test_target;
int num_classes, num_train, num_test;
float frac = (float)(perc) / 100.0;
setup_mnist<true>(&num_classes, &num_train, &num_test, train_images,
test_images, train_target, test_target, frac);
intfeature_size = train_images.
elements() / num_train;
arraytrain_feats =
moddims(train_images, feature_size, num_train).
T();
arraytest_feats =
moddims(test_images, feature_size, num_test).
T();
train_target = train_target.
T();
test_target = test_target.
T();
vector<int> layers;
layers.push_back(100);
layers.push_back(50);
dbn network(train_feats.
dims(1), num_classes, layers);
timer::start();
network.train(train_feats, train_target,
0.2,
4.0,
15,
250,
100,
0.5,
true);
double train_time = timer::stop();
arraytrain_output = network.predict(train_feats);
arraytest_output = network.predict(test_feats);
timer::start();
for (int i = 0; i < 100; i++) { network.predict(test_feats); }
double test_time = timer::stop() / 100;
printf("\nTraining set:\n");
printf("Accuracy on training data: %2.2f\n",
accuracy(train_output, train_target));
printf("\nTest set:\n");
printf("Accuracy on testing data: %2.2f\n",
accuracy(test_output, test_target));
printf("\nTraining time: %4.4lf s\n", train_time);
printf("Prediction time: %4.4lf s\n\n", test_time);
if (!console) {
test_output = test_output.
T();
display_results<true>(test_images, test_output, test_target.
T(), 20);
}
return 0;
}
int main(int argc, char **argv) {
int device = argc > 1 ? atoi(argv[1]) : 0;
bool console = argc > 2 ? argv[2][0] == '-' : false;
int perc = argc > 3 ? atoi(argv[3]) : 60;
try {
return dbn_demo(console, perc);
return 0;
}
A multi dimensional data container.
dim4 dims() const
Get dimensions of the array.
const array as(dtype type) const
Casts the array into another data type.
array T() const
Get the transposed the array.
dim_t elements() const
Get the total number of elements across all dimensions of the array.
An ArrayFire exception class.
virtual const char * what() const
Returns an error message for the exception in a string format.
seq is used to create sequences for indexing af::array
@ f32
32-bit floating point values
AFAPI array sigmoid(const array &in)
C++ Interface to evaluate the logistical sigmoid function.
AFAPI array sqrt(const array &in)
C++ Interface to evaluate the square root.
AFAPI array matmulTT(const array &lhs, const array &rhs)
C++ Interface to multiply two matrices.
AFAPI array matmulTN(const array &lhs, const array &rhs)
C++ Interface to multiply two matrices.
AFAPI array matmul(const array &lhs, const array &rhs, const matProp optLhs=AF_MAT_NONE, const matProp optRhs=AF_MAT_NONE)
C++ Interface to multiply two matrices.
AFAPI array matmulNT(const array &lhs, const array &rhs)
C++ Interface to multiply two matrices.
AFAPI array transpose(const array &in, const bool conjugate=false)
C++ Interface to transpose a matrix.
AFAPI void grad(array &dx, array &dy, const array &in)
C++ Interface for calculating the gradients.
array constant(T val, const dim4 &dims, const dtype ty=(af_dtype) dtype_traits< T >::ctype)
C++ Interface to generate an array with elements set to a specified value.
AFAPI void setDevice(const int device)
Sets the current device.
AFAPI void sync(const int device=-1)
Blocks until the device is finished processing.
AFAPI array join(const int dim, const array &first, const array &second)
C++ Interface to join 2 arrays along a dimension.
AFAPI array moddims(const array &in, const dim4 &dims)
C++ Interface to modify the dimensions of an input array to a specified shape.
AFAPI array tile(const array &in, const unsigned x, const unsigned y=1, const unsigned z=1, const unsigned w=1)
C++ Interface to generate a tiled array.
AFAPI array randu(const dim4 &dims, const dtype ty, randomEngine &r)
C++ Interface to create an array of random numbers uniformly distributed.
AFAPI array sum(const array &in, const int dim=-1)
C++ Interface to sum array elements over a given dimension.
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4