# frozen_string_literal: true require 'svmkit/validation' require 'svmkit/linear_model/sgd_linear_estimator' require 'svmkit/base/regressor' module SVMKit module LinearModel # LinearRegression is a class that implements ordinary least square linear regression # with mini-batch stochastic gradient descent optimization. # # @example # estimator = # SVMKit::LinearModel::LinearRegression.new(max_iter: 1000, batch_size: 20, random_seed: 1) # estimator.fit(training_samples, traininig_values) # results = estimator.predict(testing_samples) # class LinearRegression < SGDLinearEstimator include Base::Regressor include Validation # Return the weight vector. # @return [Numo::DFloat] (shape: [n_outputs, n_features]) attr_reader :weight_vec # Return the bias term (a.k.a. intercept). # @return [Numo::DFloat] (shape: [n_outputs]) attr_reader :bias_term # Return the random generator for random sampling. # @return [Random] attr_reader :rng # Create a new ordinary least square linear regressor. # # @param fit_bias [Boolean] The flag indicating whether to fit the bias term. # @param bias_scale [Float] The scale of the bias term. # @param max_iter [Integer] The maximum number of iterations. # @param batch_size [Integer] The size of the mini batches. # @param optimizer [Optimizer] The optimizer to calculate adaptive learning rate. # If nil is given, Nadam is used. # @param random_seed [Integer] The seed value using to initialize the random generator. def initialize(fit_bias: false, bias_scale: 1.0, max_iter: 1000, batch_size: 10, optimizer: nil, random_seed: nil) check_params_float(bias_scale: bias_scale) check_params_integer(max_iter: max_iter, batch_size: batch_size) check_params_boolean(fit_bias: fit_bias) check_params_type_or_nil(Integer, random_seed: random_seed) check_params_positive(max_iter: max_iter, batch_size: batch_size) super(reg_param: 0.0, fit_bias: fit_bias, bias_scale: bias_scale, max_iter: max_iter, batch_size: batch_size, optimizer: optimizer, random_seed: random_seed) end # Fit the model with given training data. # # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model. # @param y [Numo::Int32] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model. # @return [LinearRegression] The learned regressor itself. def fit(x, y) check_sample_array(x) check_tvalue_array(y) check_sample_tvalue_size(x, y) n_outputs = y.shape[1].nil? ? 1 : y.shape[1] n_features = x.shape[1] if n_outputs > 1 @weight_vec = Numo::DFloat.zeros(n_outputs, n_features) @bias_term = Numo::DFloat.zeros(n_outputs) n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = partial_fit(x, y[true, n]) } else @weight_vec, @bias_term = partial_fit(x, y) end self end # Predict values for samples. # # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values. # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample. def predict(x) check_sample_array(x) x.dot(@weight_vec.transpose) + @bias_term end # Dump marshal data. # @return [Hash] The marshal data about LinearRegression. def marshal_dump { params: @params, weight_vec: @weight_vec, bias_term: @bias_term, rng: @rng } end # Load marshal data. # @return [nil] def marshal_load(obj) @params = obj[:params] @weight_vec = obj[:weight_vec] @bias_term = obj[:bias_term] @rng = obj[:rng] nil end private def calc_loss_gradient(x, y, weight) 2.0 * (x.dot(weight) - y) end end end end