ext/convolver/convolver.c in convolver-0.2.0 vs ext/convolver/convolver.c in convolver-0.3.0

- old
+ new

@@ -5,11 +5,10 @@ #include <stdio.h> #include <xmmintrin.h> #include "narray_shared.h" #include "convolve_raw.h" -#include "cnn_components.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // To hold the module object VALUE Convolver = Qnil; @@ -109,65 +108,12 @@ target_rank, target_shape, (float*) na_c->ptr ); return val_c; } -/* @overload nn_run_layer( inputs, weights, thresholds ) - * Calculates activations of a fully-connected neural network layer. The transfer function after - * summing weights and applying threshold is a "ReLU", equivalent to - * y = x < 0.0 ? 0.0 : x - * this is less sophisticated than many other neural net functions (such as sigma), but is fast to - * calculate and to train. - * @param [NArray] inputs must be rank 1 array of floats - * @param [NArray] weights must be rank 2 array of floats, with first dimension size of inputs, and second dimension size equal to number of outputs - * @param [NArray] thresholds must be rank 1 array of floats, size equal to number of outputs desired - * @return [NArray] neuron activations - */ -static VALUE narray_nn_run_single_layer( VALUE self, VALUE inputs, VALUE weights, VALUE thresholds ) { - struct NARRAY *na_inputs, *na_weights, *na_thresholds, *na_outputs; - volatile VALUE val_inputs, val_weights, val_thresholds, val_outputs; - int input_size, output_size; - int output_shape[1]; - - val_inputs = na_cast_object(inputs, NA_SFLOAT); - GetNArray( val_inputs, na_inputs ); - if ( na_inputs->rank != 1 ) { - rb_raise( rb_eArgError, "input must be array of rank 1" ); - } - input_size = na_inputs->total; - - val_weights = na_cast_object(weights, NA_SFLOAT); - GetNArray( val_weights, na_weights ); - if ( na_weights->rank != 2 ) { - rb_raise( rb_eArgError, "weights must be array of rank 2" ); - } - if ( na_weights->shape[0] != input_size ) { - rb_raise( rb_eArgError, "weights shape mismatch, expected %d across, got %d", input_size, na_weights->shape[0] ); - } - output_size = na_weights->shape[1]; - - val_thresholds = na_cast_object(thresholds, NA_SFLOAT); - GetNArray( val_thresholds, na_thresholds ); - if ( na_thresholds->rank != 1 ) { - rb_raise( rb_eArgError, "thresholds must be narray of rank 1" ); - } - if ( na_thresholds->shape[0] != output_size ) { - rb_raise( rb_eArgError, "thresholds expected size %d, but got %d", output_size, na_thresholds->shape[0] ); - } - - output_shape[0] = output_size; - val_outputs = na_make_object( NA_SFLOAT, 1, output_shape, CLASS_OF( val_inputs ) ); - GetNArray( val_outputs, na_outputs ); - - nn_run_layer_raw( input_size, output_size, (float*) na_inputs->ptr, (float*) na_weights->ptr, - (float*) na_thresholds->ptr, (float*) na_outputs->ptr ); - - return val_outputs; -} - - void Init_convolver() { Convolver = rb_define_module( "Convolver" ); rb_define_singleton_method( Convolver, "convolve_basic", narray_convolve, 2 ); - rb_define_singleton_method( Convolver, "nn_run_layer", narray_nn_run_single_layer, 3 ); + + // private method rb_define_singleton_method( Convolver, "fit_kernel_backwards", narray_fit_backwards, 2 ); }