%% Comprehensive Model Comparison for Car Evaluation Dataset % Comparing BDT, K-SVCR, TKSVC, LSK-SVCR, KWMSVM, RSSVM, SRSSVM, and Proposed clear; clc; close all; %% Main Execution Function function main() fprintf('=== Comprehensive Model Comparison ===\n'); fprintf('Dataset: Car Evaluation\n\n'); % Load and preprocess Car dataset [X, y, feature_names, class_names] = load_car_data(); % Display dataset information fprintf('Dataset Information:\n'); fprintf(' Samples: %d\n', size(X, 1)); fprintf(' Features: %d\n', size(X, 2)); fprintf(' Classes: %d\n', length(unique(y))); fprintf(' Class distribution:\n'); for i = 1:length(class_names) fprintf(' %s: %d samples (%.2f%%)\n', class_names{i}, sum(y == i), sum(y == i)/length(y)*100); end fprintf('\n'); % Handle class imbalance fprintf('Class Imbalance Analysis:\n'); analyze_class_imbalance(y, class_names); % Feature correlation analysis fprintf('\nFeature Correlation Analysis:\n'); analyze_feature_correlations(X, y, feature_names); % Single train-test split evaluation fprintf('\n1. Train-Test Split Evaluation:\n'); fprintf('==================================================\n'); results = comprehensive_comparison(X, y); % Cross-validation evaluation fprintf('\n2. Cross-Validation Evaluation:\n'); fprintf('==================================================\n'); cv_results = cross_validation_comparison(X, y); % Display final summary display_final_summary(results, cv_results); % Statistical significance testing fprintf('\n3. Statistical Significance Analysis:\n'); fprintf('==================================================\n'); statistical_analysis(cv_results); % Feature importance analysis fprintf('\n4. Feature Importance Analysis:\n'); fprintf('==================================================\n'); analyze_feature_importance_car(X, y, feature_names); end %% Load Car Evaluation Dataset from Excel function [X, y, feature_names, class_names] = load_car_data() try % Read the Excel file fprintf('Loading Car.xlsx...\n'); data = readtable('Car.xlsx'); % Display table information to understand the structure fprintf('Table size: %d rows x %d columns\n', size(data, 1), size(data, 2)); fprintf('Variable names:\n'); disp(data.Properties.VariableNames); % Check the data type of each column fprintf('Data types (first 5 columns):\n'); for i = 1:min(5, size(data, 2)) fprintf(' Column %d (%s): %s\n', i, data.Properties.VariableNames{i}, class(data{1,i})); end % Try to identify the label column label_columns = {'acceptability', 'class', 'evaluation', 'target', 'label', 'decision'}; found_label = false; for i = 1:length(label_columns) if any(strcmpi(data.Properties.VariableNames, label_columns{i})) label_col = label_columns{i}; X = table2array(data(:, ~strcmpi(data.Properties.VariableNames, label_col))); y_raw = data{:, label_col}; found_label = true; fprintf('Using label column: %s\n', label_col); break; end end if ~found_label % If no specific label column found, use the last column X = table2array(data(:, 1:end-1)); y_raw = data{:, end}; fprintf('Using last column as labels\n'); end % Convert categorical features to numeric if needed for i = 1:size(X, 2) if iscell(X(:, i)) || iscategorical(X(:, i)) [~, ~, X(:, i)] = unique(X(:, i)); end end % Convert labels to numeric if they are categorical/string if iscell(y_raw) || isstring(y_raw) || iscategorical(y_raw) [y, class_names] = grp2idx(y_raw); fprintf('Converted categorical labels to numeric:\n'); for i = 1:length(class_names) fprintf(' %s -> %d\n', class_names{i}, i); end else y = y_raw; unique_classes = unique(y); class_names = arrayfun(@num2str, unique_classes, 'UniformOutput', false); fprintf('Numeric labels detected. Unique classes: '); fprintf('%d ', unique_classes); fprintf('\n'); end % Get feature names (exclude the label column) feature_names = data.Properties.VariableNames(1:end-1); % If feature names are generic, use Car evaluation specific names if all(startsWith(feature_names, {'Var', 'x', 'Feature', 'att'})) feature_names = {'buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety'}; end % If class names are generic, use Car evaluation specific names if length(class_names) == 4 class_names = {'unacc', 'acc', 'good', 'vgood'}; end fprintf('Car evaluation dataset loaded successfully:\n'); fprintf(' Features: %d\n', size(X, 2)); fprintf(' Samples: %d\n', size(X, 1)); fprintf(' Classes: %d\n', length(unique(y))); catch ME fprintf('Error loading Car.xlsx: %s\n', ME.message); fprintf('Generating synthetic car evaluation-like data...\n'); [X, y, feature_names, class_names] = generate_synthetic_car_data(); end % Remove any NaN values nan_mask = any(isnan(X), 2) | isnan(y); if any(nan_mask) fprintf('Removing %d samples with NaN values\n', sum(nan_mask)); X = X(~nan_mask, :); y = y(~nan_mask); end % Standardize features X = zscore(X); end %% Generate Synthetic Car Evaluation-like Data function [X, y, feature_names, class_names] = generate_synthetic_car_data() rng(42); % Car evaluation dataset typical characteristics n_samples = 1728; n_features = 6; n_classes = 4; % Generate synthetic car evaluation data X = zeros(n_samples, n_features); % Class distribution from original Car dataset class_distribution = [0.700, 0.222, 0.040, 0.038]; % unacc, acc, good, vgood samples_per_class = round(n_samples * class_distribution); % Class-specific feature patterns for car acceptability class_means = [ % unacc: Low safety, high maintenance, low capacity 3.2, 3.1, 2.5, 1.8, 2.2, 1.5; % buying=high, maint=high, doors=4, persons=2, lug_boot=small, safety=low % acc: Medium safety, medium maintenance, medium capacity 2.1, 2.2, 2.8, 2.5, 2.5, 2.2; % buying=med, maint=med, doors=4, persons=4, lug_boot=med, safety=med % good: Good safety, low maintenance, good capacity 1.5, 1.6, 3.2, 3.2, 2.8, 2.8; % buying=low, maint=low, doors=5, persons=4, lug_boot=big, safety=high % vgood: Excellent safety, low maintenance, excellent capacity 1.2, 1.3, 3.5, 3.8, 3.2, 3.5 % buying=vlow, maint=vlow, doors=5more, persons=more, lug_boot=big, safety=vhigh ]; % Generate samples for each class y = []; start_idx = 1; for class_idx = 1:n_classes n_class_samples = samples_per_class(class_idx); class_data = zeros(n_class_samples, n_features); for feature_idx = 1:n_features mean_val = class_means(class_idx, feature_idx); std_val = 0.4; % Moderate variance for categorical-like data class_data(:, feature_idx) = max(1, min(4, normrnd(mean_val, std_val, n_class_samples, 1))); end end_idx = start_idx + n_class_samples - 1; X(start_idx:end_idx, :) = class_data; y = [y; class_idx * ones(n_class_samples, 1)]; start_idx = end_idx + 1; end % Feature names based on Car evaluation description feature_names = {'buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety'}; % Class names class_names = {'unacc', 'acc', 'good', 'vgood'}; fprintf('Generated synthetic car evaluation data:\n'); for i = 1:n_classes fprintf(' %s: %d samples (%.1f%%)\n', class_names{i}, samples_per_class(i), ... samples_per_class(i)/n_samples*100); end end %% Class Imbalance Analysis function analyze_class_imbalance(y, class_names) unique_classes = unique(y); n_classes = length(unique_classes); % Calculate class distribution class_counts = zeros(n_classes, 1); for i = 1:n_classes class_counts(i) = sum(y == unique_classes(i)); end % Calculate imbalance ratios max_count = max(class_counts); min_count = min(class_counts); imbalance_ratio = max_count / min_count; fprintf(' Total samples: %d\n', length(y)); fprintf(' Majority class: %s (%d samples)\n', class_names{class_counts == max_count}, max_count); fprintf(' Minority class: %s (%d samples)\n', class_names{class_counts == min_count}, min_count); fprintf(' Imbalance ratio: %.2f:1\n', imbalance_ratio); if imbalance_ratio > 10 fprintf(' WARNING: High class imbalance detected!\n'); elseif imbalance_ratio > 5 fprintf(' NOTE: Moderate class imbalance present\n'); else fprintf(' Dataset is relatively balanced\n'); end % Plot class distribution figure('Position', [100, 100, 800, 400]); subplot(1, 2, 1); bar(class_counts, 'FaceColor', [0.3, 0.6, 0.9]); set(gca, 'XTickLabel', class_names, 'XTickLabelRotation', 45); ylabel('Number of Samples'); title('Class Distribution - Car', 'FontSize', 12, 'FontWeight', 'bold'); grid on; subplot(1, 2, 2); pie(class_counts, class_names); title('Class Proportion - Car', 'FontSize', 12, 'FontWeight', 'bold'); sgtitle('Car Evaluation Dataset Class Distribution Analysis', 'FontSize', 14, 'FontWeight', 'bold'); saveas(gcf, 'car_class_distribution.png'); end %% Feature Correlation Analysis function analyze_feature_correlations(X, y, feature_names) fprintf(' Calculating feature correlations with target...\n'); % Calculate correlation between each feature and target correlations = zeros(length(feature_names), 1); for i = 1:length(feature_names) correlations(i) = abs(corr(X(:, i), y)); end % Sort by correlation strength [sorted_corr, corr_idx] = sort(correlations, 'descend'); fprintf(' Top correlated features:\n'); for i = 1:min(5, length(feature_names)) fprintf(' %s: %.4f\n', feature_names{corr_idx(i)}, sorted_corr(i)); end % Plot feature correlations figure('Position', [200, 200, 1000, 600]); % Feature-target correlations subplot(2, 2, 1); barh(sorted_corr, 'FaceColor', [0.2, 0.7, 0.5]); set(gca, 'YTickLabel', feature_names(corr_idx)); xlabel('Absolute Correlation with Target'); title('Feature-Target Correlation - Car', 'FontSize', 12, 'FontWeight', 'bold'); grid on; % Feature correlation matrix subplot(2, 2, 2); feature_corr = corr(X); imagesc(feature_corr); colorbar; set(gca, 'XTick', 1:length(feature_names), 'XTickLabel', feature_names, ... 'YTick', 1:length(feature_names), 'YTickLabel', feature_names); xtickangle(45); title('Feature Correlation Matrix - Car', 'FontSize', 12, 'FontWeight', 'bold'); % Most important features distribution by class subplot(2, 2, [3, 4]); top_feature_idx = corr_idx(1); unique_classes = unique(y); box_data = []; group_data = []; for i = 1:length(unique_classes) class_data = X(y == unique_classes(i), top_feature_idx); box_data = [box_data; class_data]; group_data = [group_data; i * ones(length(class_data), 1)]; end boxplot(box_data, group_data, 'Labels', class_names); ylabel(feature_names{top_feature_idx}); xlabel('Car Acceptability Class'); title(sprintf('Distribution of Top Feature (%s) by Class', feature_names{top_feature_idx}), ... 'FontSize', 12, 'FontWeight', 'bold'); grid on; sgtitle('Car Evaluation Feature Analysis', 'FontSize', 14, 'FontWeight', 'bold'); saveas(gcf, 'car_feature_correlations.png'); end %% Comprehensive Comparison Function for Car Evaluation function results = comprehensive_comparison(X, y) [X_train, X_test, y_train, y_test] = train_test_split_stratified(X, y, 0.3); % Model configurations optimized for Car evaluation dataset models = struct(); models(1).name = 'BDT'; models(1).model = @() BDT('MaxDepth', 10, 'MinLeafSize', 5); models(2).name = 'K-SVCR'; models(2).model = @() KSVCR('C', 1.0, 'epsilon', 0.1); models(3).name = 'TKSVC'; models(3).model = @() TKSVC('C1', 1.0, 'C2', 1.0, 'epsilon', 0.1); models(4).name = 'LSK-SVCR'; models(4).model = @() LSK_SVCR('C', 1.0, 'gamma', 0.1); models(5).name = 'KWMSVM'; models(5).model = @() KWMSVM('C', 1.0, 'gamma', 0.1); models(6).name = 'RSSVM'; models(6).model = @() RSSVM('C', 1.0, 'gamma', 0.05); models(7).name = 'SRSSVM'; models(7).model = @() SRSSVM('C', 1.0, 'gamma', 0.05, 'delta', 0.5, 'epsilon', 0.1); models(8).name = 'Proposed'; models(8).model = @() ProposedModel('gamma', 1.0, 'r', 1.0, 'epsilon', 0.1); results = struct(); for i = 1:length(models) fprintf('\nEvaluating %s...\n', models(i).name); try [accuracy, train_time, test_time, additional_metrics] = evaluate_model_imbalanced(... models(i).model, X_train, X_test, y_train, y_test, models(i).name); results(i).name = models(i).name; results(i).accuracy = accuracy; results(i).train_time = train_time; results(i).test_time = test_time; results(i).additional_metrics = additional_metrics; catch ME fprintf('Error evaluating %s: %s\n', models(i).name, ME.message); results(i).name = models(i).name; results(i).accuracy = 0; results(i).train_time = 0; results(i).test_time = 0; results(i).additional_metrics = struct('precision', 0, 'recall', 0, 'f1_score', 0, 'gmean', 0); end end plot_car_comparison(results); end %% Enhanced Plotting for Car Evaluation Results function plot_car_comparison(results) figure('Position', [100, 100, 1500, 1000]); model_names = {results.name}; accuracies = [results.accuracy]; train_times = [results.train_time]; test_times = [results.test_time]; precisions = [results.additional_metrics.precision]; recalls = [results.additional_metrics.recall]; f1_scores = [results.additional_metrics.f1_score]; gmeans = [results.additional_metrics.gmean]; % Colors for different model types colors = lines(length(model_names)); % 1. Accuracy Comparison (Sorted) subplot(2, 3, 1); [sorted_acc, idx] = sort(accuracies, 'descend'); bars = bar(sorted_acc, 'FaceColor', 'flat'); for i = 1:length(bars) bars(i).CData = colors(idx(i), :); end set(gca, 'XTickLabel', model_names(idx), 'XTickLabelRotation', 45); title('Accuracy Comparison - Car', 'FontSize', 12, 'FontWeight', 'bold'); ylabel('Accuracy'); ylim([0, 1]); grid on; for i = 1:length(sorted_acc) text(i, sorted_acc(i) + 0.02, sprintf('%.4f', sorted_acc(i)), ... 'HorizontalAlignment', 'center', 'FontWeight', 'bold', 'FontSize', 8); end % 2. F1-Score and G-Mean Comparison subplot(2, 3, 2); metrics_matrix = [f1_scores; gmeans]'; h = bar(metrics_matrix); set(gca, 'XTickLabel', model_names, 'XTickLabelRotation', 45); ylabel('Score'); ylim([0, 1]); title('F1-Score & G-Mean - Car', 'FontSize', 12, 'FontWeight', 'bold'); legend('F1-Score', 'G-Mean', 'Location', 'southoutside', 'Orientation', 'horizontal'); grid on; % 3. Training Time Comparison subplot(2, 3, 3); bars = bar(train_times, 'FaceColor', 'flat'); for i = 1:length(bars) bars(i).CData = colors(i, :); end set(gca, 'XTickLabel', model_names, 'XTickLabelRotation', 45); title('Training Time - Car', 'FontSize', 12, 'FontWeight', 'bold'); ylabel('Time (seconds)'); grid on; % 4. Performance vs Training Time subplot(2, 3, 4); scatter(train_times, f1_scores, 150, 1:length(model_names), 'filled', 's'); hold on; scatter(train_times, gmeans, 150, 1:length(model_names), 'filled', 'd'); for i = 1:length(model_names) text(train_times(i), f1_scores(i), model_names{i}, ... 'HorizontalAlignment', 'center', 'VerticalAlignment', 'bottom', ... 'FontSize', 8, 'FontWeight', 'bold'); end xlabel('Training Time (s)'); ylabel('Score'); title('Performance vs Training Time - Car', 'FontSize', 12, 'FontWeight', 'bold'); legend('F1-Score', 'G-Mean', 'Location', 'best'); grid on; % 5. Detailed Metrics Radar Plot subplot(2, 3, 5); metrics_radar = [accuracies; precisions; recalls; f1_scores; gmeans]; radar_plot_car(metrics_radar, model_names, ... {'Accuracy', 'Precision', 'Recall', 'F1-Score', 'G-Mean'}); title('Performance Metrics Radar - Car', 'FontSize', 12, 'FontWeight', 'bold'); % 6. Summary Table subplot(2, 3, 6); axis off; summary_text = sprintf('CAR EVALUATION RESULTS\n\n'); for i = 1:length(results) summary_text = sprintf('%s%s:\n', summary_text, results(i).name); summary_text = sprintf('%s Acc: %.4f F1: %.4f\n', summary_text, ... results(i).accuracy, results(i).additional_metrics.f1_score); summary_text = sprintf('%s G-M: %.4f Prec: %.4f\n', summary_text, ... results(i).additional_metrics.gmean, results(i).additional_metrics.precision); summary_text = sprintf('%s Rec: %.4f Time: %.2fs\n\n', summary_text, ... results(i).additional_metrics.recall, results(i).train_time); end text(0.05, 0.95, summary_text, 'VerticalAlignment', 'top', ... 'FontSize', 7, 'FontName', 'FixedWidth', 'FontWeight', 'bold'); sgtitle('Comprehensive Model Comparison on Car Evaluation Dataset', ... 'FontSize', 14, 'FontWeight', 'bold'); % Save figure saveas(gcf, 'car_comprehensive_comparison.png'); end %% Radar Plot Function for Car Evaluation function radar_plot_car(data, model_names, metric_names) % Normalize data for radar plot normalized_data = data ./ max(data, [], 2); n_metrics = size(data, 1); n_models = size(data, 2); % Create angles for each metric angles = linspace(0, 2*pi, n_metrics + 1); % Create polar axes polaraxes; hold on; % Plot each model for i = 1:n_models polarplot(angles, [normalized_data(:, i); normalized_data(1, i)], ... 'LineWidth', 2, 'DisplayName', model_names{i}); end % Add metric labels thetaticks(angles(1:end-1) * 180/pi); thetaticklabels(metric_names); % Add legend legend('Location', 'southoutside', 'NumColumns', 2, 'FontSize', 8); rlim([0, 1]); rticks(0:0.2:1); end %% Feature Importance Analysis for Car Evaluation function analyze_feature_importance_car(X, y, feature_names) fprintf('Feature Importance Analysis using Proposed Model:\n'); % Train proposed model model = ProposedModel('gamma', 1.0, 'r', 1.0, 'epsilon', 0.1); model = model.fit(X, y); % Analyze feature importance if ~isempty(model.alpha) feature_importance = mean(abs(model.alpha), 2); % Select top features [sorted_importance, top_indices] = sort(feature_importance, 'descend'); n_top = min(6, length(feature_importance)); fprintf('\nTop %d Most Important Features for Car Acceptability:\n', n_top); fprintf('%-15s %-12s %s\n', 'Feature', 'Importance', 'Description'); fprintf('%-15s %-12s %s\n', '-------', '----------', '-----------'); for i = 1:n_top idx = top_indices(i); if idx <= length(feature_names) feature_desc = get_car_feature_description(feature_names{idx}); fprintf('%-15s %-12.4f %s\n', feature_names{idx}, sorted_importance(i), feature_desc); else fprintf('%-15s %-12.4f %s\n', sprintf('Feature_%d', idx), sorted_importance(i), 'Synthetic feature'); end end % Plot feature importance figure('Position', [200, 200, 1200, 500]); subplot(1, 2, 1); barh(sorted_importance(1:n_top), 'FaceColor', [0.2, 0.6, 0.8]); set(gca, 'YTickLabel', feature_names(top_indices(1:n_top))); xlabel('Importance Score'); title('Top Feature Importance - Car', 'FontSize', 12, 'FontWeight', 'bold'); grid on; % Feature correlation with target subplot(1, 2, 2); correlations = zeros(length(feature_names), 1); for i = 1:length(feature_names) correlations(i) = abs(corr(X(:, i), y)); end [sorted_corr, corr_idx] = sort(correlations, 'descend'); barh(sorted_corr(1:n_top), 'FaceColor', [0.8, 0.4, 0.2]); set(gca, 'YTickLabel', feature_names(corr_idx(1:n_top))); xlabel('Absolute Correlation with Target'); title('Feature-Target Correlation - Car', 'FontSize', 12, 'FontWeight', 'bold'); grid on; sgtitle('Car Evaluation Feature Analysis', 'FontSize', 14, 'FontWeight', 'bold'); saveas(gcf, 'car_feature_analysis.png'); else fprintf('Feature importance analysis not available for this model configuration.\n'); end end %% Get Feature Descriptions for Car Evaluation function desc = get_car_feature_description(feature_name) descriptions = containers.Map(); descriptions('buying') = 'Car buying price (vhigh, high, med, low)'; descriptions('maint') = 'Maintenance price (vhigh, high, med, low)'; descriptions('doors') = 'Number of doors (2, 3, 4, 5more)'; descriptions('persons') = 'Person capacity (2, 4, more)'; descriptions('lug_boot') = 'Luggage boot size (small, med, big)'; descriptions('safety') = 'Estimated safety (low, med, high)'; if isKey(descriptions, feature_name) desc = descriptions(feature_name); else desc = 'Car attribute feature'; end end %% Enhanced Cross-Validation for Car Evaluation function cv_results = cross_validation_comparison(X, y) k = 5; n_samples = size(X, 1); indices = crossvalind('Kfold', y, k); models = {'BDT', 'K-SVCR', 'TKSVC', 'LSK-SVCR', 'KWMSVM', 'RSSVM', 'SRSSVM', 'Proposed'}; cv_results = struct(); for m = 1:length(models) accuracies = zeros(k, 1); f1_scores = zeros(k, 1); gmeans = zeros(k, 1); for i = 1:k test_mask = (indices == i); train_mask = ~test_mask; X_train = X(train_mask, :); X_test = X(test_mask, :); y_train = y(train_mask); y_test = y(test_mask); try switch models{m} case 'BDT' model = BDT('MaxDepth', 10, 'MinLeafSize', 5); case 'K-SVCR' model = KSVCR('C', 1.0, 'epsilon', 0.1); case 'TKSVC' model = TKSVC('C1', 1.0, 'C2', 1.0, 'epsilon', 0.1); case 'LSK-SVCR' model = LSK_SVCR('C', 1.0, 'gamma', 0.1); case 'KWMSVM' model = KWMSVM('C', 1.0, 'gamma', 0.1); case 'RSSVM' model = RSSVM('C', 1.0, 'gamma', 0.05); case 'SRSSVM' model = SRSSVM('C', 1.0, 'gamma', 0.05, 'delta', 0.5, 'epsilon', 0.1); case 'Proposed' model = ProposedModel('gamma', 1.0, 'r', 1.0, 'epsilon', 0.1); end model = model.fit(X_train, y_train); y_pred = model.predict(X_test); accuracies(i) = sum(y_pred == y_test) / length(y_test); metrics = calculate_imbalanced_metrics(y_test, y_pred); f1_scores(i) = metrics.f1_score; gmeans(i) = metrics.gmean; catch accuracies(i) = 0; f1_scores(i) = 0; gmeans(i) = 0; end end cv_results(m).name = models{m}; cv_results(m).mean_accuracy = mean(accuracies); cv_results(m).std_accuracy = std(accuracies); cv_results(m).mean_f1 = mean(f1_scores); cv_results(m).mean_gmean = mean(gmeans); cv_results(m).all_scores = accuracies; cv_results(m).all_f1 = f1_scores; fprintf('%s CV - Acc: %.4f (+/- %.4f), F1: %.4f, G-Mean: %.4f\n', ... models{m}, mean(accuracies), std(accuracies), mean(f1_scores), mean(gmeans)); end end %% Statistical Analysis function statistical_analysis(cv_results) fprintf('Statistical Significance Analysis (Pairwise t-tests):\n'); fprintf('----------------------------------------------------\n'); n_models = length(cv_results); p_values = zeros(n_models, n_models); % Calculate all p-values for i = 1:n_models for j = 1:n_models if i ~= j [~, p] = ttest2(cv_results(i).all_scores, cv_results(j).all_scores); p_values(i, j) = p; else p_values(i, j) = 1; end end end % Display significant differences significance_level = 0.05; significant_pairs = {}; for i = 1:n_models for j = i+1:n_models if p_values(i, j) < significance_level mean_i = cv_results(i).mean_accuracy; mean_j = cv_results(j).mean_accuracy; if mean_i > mean_j significant_pairs{end+1} = sprintf('%s > %s (p=%.4f)', ... cv_results(i).name, cv_results(j).name, p_values(i, j)); else significant_pairs{end+1} = sprintf('%s < %s (p=%.4f)', ... cv_results(i).name, cv_results(j).name, p_values(i, j)); end end end end if ~isempty(significant_pairs) fprintf('Significant differences found:\n'); for i = 1:length(significant_pairs) fprintf(' %s\n', significant_pairs{i}); end else fprintf('No significant differences found at alpha=0.05\n'); end end %% Display Final Summary function display_final_summary(results, cv_results) fprintf('\nFINAL SUMMARY - CAR EVALUATION DATASET\n'); fprintf('==================================================\n'); % Find best models [best_acc, best_acc_idx] = max([results.accuracy]); [best_f1, best_f1_idx] = max([results.additional_metrics.f1_score]); [best_gmean, best_gmean_idx] = max([results.additional_metrics.gmean]); fprintf('Best Models:\n'); fprintf(' Accuracy: %s (%.4f)\n', results(best_acc_idx).name, best_acc); fprintf(' F1-Score: %s (%.4f)\n', results(best_f1_idx).name, best_f1); fprintf(' G-Mean: %s (%.4f)\n', results(best_gmean_idx).name, best_gmean); fprintf('\n'); for i = 1:length(results) fprintf('%s:\n', results(i).name); fprintf(' Single Split - Accuracy: %.4f, F1: %.4f, G-Mean: %.4f\n', ... results(i).accuracy, results(i).additional_metrics.f1_score, ... results(i).additional_metrics.gmean); % Find corresponding CV result cv_idx = find(strcmp({cv_results.name}, results(i).name)); if ~isempty(cv_idx) fprintf(' Cross-Validation - Accuracy: %.4f (+/- %.4f), F1: %.4f\n', ... cv_results(cv_idx).mean_accuracy, cv_results(cv_idx).std_accuracy, ... cv_results(cv_idx).mean_f1); end fprintf(' Training Time: %.4f s, Prediction Time: %.4f s\n', ... results(i).train_time, results(i).test_time); fprintf('\n'); end end %% Include all utility functions (from previous implementations) function [X_train, X_test, y_train, y_test] = train_test_split_stratified(X, y, test_size) rng(42); unique_classes = unique(y); train_indices = []; test_indices = []; for i = 1:length(unique_classes) class_idx = find(y == unique_classes(i)); n_class = length(class_idx); n_test_class = round(test_size * n_class); class_idx = class_idx(randperm(n_class)); test_indices = [test_indices; class_idx(1:n_test_class)]; train_indices = [train_indices; class_idx(n_test_class+1:end)]; end X_train = X(train_indices, :); X_test = X(test_indices, :); y_train = y(train_indices); y_test = y(test_indices); end % [Include all model implementations: BDT, KSVCR, TKSVC, LSK_SVCR, KWMSVM, RSSVM, SRSSVM, ProposedModel] % [Include calculate_imbalanced_metrics and evaluate_model_imbalanced functions] % Run the main function main();
An Error occurred while handling another error:
yii\web\HeadersAlreadySentException: Headers already sent in  on line 0. in /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/Response.php:366
Stack trace:
#0 /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/Response.php(339): yii\web\Response->sendHeaders()
#1 /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/ErrorHandler.php(136): yii\web\Response->send()
#2 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/ErrorHandler.php(135): yii\web\ErrorHandler->renderException()
#3 [internal function]: yii\base\ErrorHandler->handleException()
#4 {main}
Previous exception:
yii\web\HeadersAlreadySentException: Headers already sent in  on line 0. in /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/Response.php:366
Stack trace:
#0 /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/Response.php(339): yii\web\Response->sendHeaders()
#1 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/Application.php(656): yii\web\Response->send()
#2 /var/www/html/prof-homepages/vendor/faravaghi/yii2-filemanager/models/Files.php(696): yii\base\Application->end()
#3 /var/www/html/prof-homepages/vendor/faravaghi/yii2-filemanager/controllers/FilesController.php(484): faravaghi\filemanager\models\Files->getFile()
#4 [internal function]: faravaghi\filemanager\controllers\FilesController->actionGetFile()
#5 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/InlineAction.php(57): call_user_func_array()
#6 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/Controller.php(180): yii\base\InlineAction->runWithParams()
#7 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/Module.php(528): yii\base\Controller->runAction()
#8 /var/www/html/prof-homepages/vendor/yiisoft/yii2/web/Application.php(103): yii\base\Module->runAction()
#9 /var/www/html/prof-homepages/vendor/yiisoft/yii2/base/Application.php(386): yii\web\Application->handleRequest()
#10 /var/www/html/prof-homepages/backend/web/index.php(16): yii\base\Application->run()
#11 {main}