lib/rumale/svm/svr.rb in rumale-svm-0.1.0 vs lib/rumale/svm/svr.rb in rumale-svm-0.2.0

- old
+ new

@@ -29,38 +29,39 @@ # @param tol [Float] The tolerance of termination criterion. # @param verbose [Boolean] The flag indicating whether to output learning process message # @param random_seed [Integer/Nil] The seed value using to initialize the random generator. def initialize(reg_param: 1.0, epsilon: 0.1, kernel: 'rbf', degree: 3, gamma: 1.0, coef0: 0.0, shrinking: true, cache_size: 200.0, tol: 1e-3, verbose: false, random_seed: nil) - check_params_float(reg_param: reg_param, epsilon: epsilon, gamma: gamma, coef0: coef0, cache_size: cache_size, tol: tol) - check_params_integer(degree: degree) + check_params_numeric(reg_param: reg_param, degree: degree, epsilon: epsilon, gamma: gamma, coef0: coef0, + cache_size: cache_size, tol: tol) + check_params_string(kernel: kernel) check_params_boolean(shrinking: shrinking, verbose: verbose) - check_params_type_or_nil(Integer, random_seed: random_seed) + check_params_numeric_or_nil(random_seed: random_seed) @params = {} - @params[:reg_param] = reg_param - @params[:epsilon] = epsilon + @params[:reg_param] = reg_param.to_f + @params[:epsilon] = epsilon.to_f @params[:kernel] = kernel - @params[:degree] = degree - @params[:gamma] = gamma - @params[:coef0] = coef0 + @params[:degree] = degree.to_i + @params[:gamma] = gamma.to_f + @params[:coef0] = coef0.to_f @params[:shrinking] = shrinking - @params[:cache_size] = cache_size - @params[:tol] = tol + @params[:cache_size] = cache_size.to_f + @params[:tol] = tol.to_f @params[:verbose] = verbose - @params[:random_seed] = random_seed + @params[:random_seed] = random_seed.nil? ? nil : random_seed.to_i @model = nil end # Fit the model with given training data. # # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model. # If the kernel is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]). # @param y [Numo::DFloat] (shape: [n_samples]) The target values to be used for fitting the model. # @return [SVR] The learned regressor itself. def fit(x, y) - check_sample_array(x) - check_tvalue_array(y) + x = check_convert_sample_array(x) + y = check_convert_tvalue_array(y) check_sample_tvalue_size(x, y) xx = precomputed_kernel? ? add_index_col(x) : x @model = Numo::Libsvm.train(xx, y, libsvm_params) self end @@ -69,10 +70,10 @@ # # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels. # If the kernel is 'precomputed', the shape of x must be [n_samples, n_training_samples]. # @return [Numo::DFloat] (shape: [n_samples]) Predicted value per sample. def predict(x) - check_sample_array(x) + x = check_convert_sample_array(x) xx = precomputed_kernel? ? add_index_col(x) : x Numo::Libsvm.predict(xx, libsvm_params, @model) end # Dump marshal data.