From dc2dc753a245f87a61e8e2f48a1ce3d43f92e6d3 Mon Sep 17 00:00:00 2001 From: Matt Blenkinsop Date: Wed, 21 Aug 2024 12:42:55 +0000 Subject: [PATCH] Bug 36831: Move logic from the background job to the Title object class This patch moves logic out of the background job file and into the Koha::ERM::EHoldings::Title class where it should belong. Methods are also renamed to differentiate between object methods and helper methods --- Koha/BackgroundJob/ImportKBARTFile.pm | 166 ++------------------- Koha/ERM/EHoldings/Title.pm | 145 ++++++++++++++++++ Koha/REST/V1/ERM/EHoldings/Titles/Local.pm | 19 +-- 3 files changed, 168 insertions(+), 162 deletions(-) diff --git a/Koha/BackgroundJob/ImportKBARTFile.pm b/Koha/BackgroundJob/ImportKBARTFile.pm index 890a3c926a8..6e5900573fa 100644 --- a/Koha/BackgroundJob/ImportKBARTFile.pm +++ b/Koha/BackgroundJob/ImportKBARTFile.pm @@ -16,10 +16,7 @@ package Koha::BackgroundJob::ImportKBARTFile; # along with Koha; if not, see . use Modern::Perl; -use JSON qw( decode_json encode_json ); use Try::Tiny qw( catch try ); -use MIME::Base64 qw( decode_base64 ); -use POSIX qw( floor ); use C4::Context; @@ -98,8 +95,8 @@ sub process { foreach my $row ( @{$rows} ) { next if !$row; - my $new_title = create_title_hash_from_line_data( $row, $column_headers, $invalid_columns ); - my $title_match = check_for_matching_title( $new_title, $package_id ); + my $new_title = _create_title_hash_from_line_data( $row, $column_headers, $invalid_columns ); + my $title_match = _check_for_matching_title( $new_title, $package_id ); if ($title_match) { $duplicate_titles++; @@ -111,7 +108,7 @@ sub process { }; } else { try { - my $formatted_title = format_title($new_title); + my $formatted_title = _format_title($new_title); if ( !$formatted_title->{publication_title} ) { push @messages, { code => 'no_title_found', @@ -124,7 +121,7 @@ sub process { my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title) ->store( { create_linked_biblio => $create_linked_biblio } ); push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio; - create_linked_resource( + _create_linked_resource( { title => $imported_title, package_id => $package_id @@ -192,7 +189,7 @@ sub enqueue { ); } -=head3 format_title +=head3 _format_title Formats a title to fit the names of the database fields in Koha @@ -201,7 +198,7 @@ Kbart field "coverage_notes" = "notes" in Koha =cut -sub format_title { +sub _format_title { my ($title) = @_; $title->{external_id} = $title->{title_id}; @@ -216,44 +213,7 @@ sub format_title { return $title; } -=head3 read_file - -Reads a file to provide report headers and lines to be processed - -=cut - -sub read_file { - my ($file) = @_; - - my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : ""; - my $delimiter = $file->{filename} =~ /\.tsv$/ ? "\t" : ","; - my $quote_char = $file->{filename} =~ /\.tsv$/ ? "\"" : "\""; - - open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!"; - my $csv = Text::CSV_XS->new( - { - sep_char => $delimiter, - quote_char => $quote_char, - binary => 1, - allow_loose_quotes => 1 - } - ); - my $headers_to_check = $csv->getline($fh); - my $column_headers = rescue_EBSCO_files($headers_to_check); - my $lines = $csv->getline_all( $fh, 0 ); - close($fh); - - unless($csv->eof()) { - my ( $cde, $str, $pos ) = $csv->error_diag(); - my $error = $cde ? "$cde, $str, $pos" : ""; - warn $error if $error; - return ( $column_headers, $lines, $error ); - } - - return ( $column_headers, $lines ); -} - -=head3 create_title_hash_from_line_data +=head3 _create_title_hash_from_line_data Takes a line and creates a hash of the values mapped to the column headings Only accepts fields that are in the list of permitted KBART fields, other fields are ignored @@ -261,7 +221,7 @@ Only accepts fields that are in the list of permitted KBART fields, other fields =cut -sub create_title_hash_from_line_data { +sub _create_title_hash_from_line_data { my ( $row, $column_headers, $invalid_columns ) = @_; my %new_title; @@ -283,13 +243,13 @@ sub create_title_hash_from_line_data { return \%new_title; } -=head3 check_for_matching_title +=head3 _check_for_matching_title Checks whether this title already exists to avoid duplicates =cut -sub check_for_matching_title { +sub _check_for_matching_title { my ( $title, $package_id ) = @_; my $match_parameters = {}; @@ -316,20 +276,20 @@ sub check_for_matching_title { return $matching_title_found; } -=head3 create_linked_resource +=head3 _create_linked_resource Creates a resource for a newly stored title. =cut -sub create_linked_resource { +sub _create_linked_resource { my ($args) = @_; my $title = $args->{title}; my $package_id = $args->{package_id}; my $title_id = $title->title_id; - my ( $date_first_issue_online, $date_last_issue_online ) = get_first_and_last_issue_dates($title); + my ( $date_first_issue_online, $date_last_issue_online ) = _get_first_and_last_issue_dates($title); my $resource = Koha::ERM::EHoldings::Resource->new( { title_id => $title_id, @@ -342,13 +302,13 @@ sub create_linked_resource { return; } -=head3 get_first_and_last_issue_dates +=head3 _get_first_and_last_issue_dates Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format =cut -sub get_first_and_last_issue_dates { +sub _get_first_and_last_issue_dates { my ($title) = @_; return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online ); @@ -372,100 +332,4 @@ sub get_first_and_last_issue_dates { return ( $date_first_issue_online, $date_last_issue_online ); } -=head3 get_valid_headers - -Returns a list of permitted headers in a KBART phase II file - -=cut - -sub get_valid_headers { - return ( - 'publication_title', - 'print_identifier', - 'online_identifier', - 'date_first_issue_online', - 'num_first_vol_online', - 'num_first_issue_online', - 'date_last_issue_online', - 'num_last_vol_online', - 'num_last_issue_online', - 'title_url', - 'first_author', - 'title_id', - 'embargo_info', - 'coverage_depth', - 'coverage_notes', - 'publisher_name', - 'publication_type', - 'date_monograph_published_print', - 'date_monograph_published_online', - 'monograph_volume', - 'monograph_edition', - 'first_editor', - 'parent_publication_title_id', - 'preceding_publication_title_id', - 'access_type', - 'notes' - ); -} - -=head3 calculate_chunked_params_size - -Calculates average line size to work out how many lines to chunk a large file into -Uses only 75% of the max_allowed_packet as an upper limit - -=cut - -sub calculate_chunked_params_size { - my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_; - - my $average_line_size = $params_size / $number_of_rows; - my $lines_possible = ( $max_allowed_packet * 0.75 ) / $average_line_size; - my $rounded_value = floor($lines_possible); - return $rounded_value; -} - -=head3 is_file_too_large - -Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet - -=cut - -sub is_file_too_large { - my ( $params_to_store, $max_allowed_packet ) = @_; - - my $json = JSON->new->utf8(0); - my $encoded_params = $json->encode($params_to_store); - my $params_size = length $encoded_params; - - # A lot more than just the params are stored in the background job table and this is difficult to calculate - # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts - return { - file_too_large => 1, - params_size => $params_size - } if $params_size > ( $max_allowed_packet * 0.75 ); - - return { - file_too_large => 0, - params_size => $params_size - }; -} - -=head3 rescue_EBSCO_files - -EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e'). -This means all of their KBART files fail to import using the current methodology. -There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected. - -=cut - -sub rescue_EBSCO_files { - my ($column_headers) = @_; - - my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 ); - @$column_headers[$index] = 'preceding_publication_title_id' if $index; - - return $column_headers; -} - 1; diff --git a/Koha/ERM/EHoldings/Title.pm b/Koha/ERM/EHoldings/Title.pm index 33c5c289ad8..73e4d53846b 100644 --- a/Koha/ERM/EHoldings/Title.pm +++ b/Koha/ERM/EHoldings/Title.pm @@ -17,6 +17,9 @@ package Koha::ERM::EHoldings::Title; use Modern::Perl; +use POSIX qw( floor ); +use MIME::Base64 qw( decode_base64 ); + use Koha::Database; use base qw(Koha::Object); @@ -100,6 +103,148 @@ sub resources { return Koha::ERM::EHoldings::Resources->_new_from_dbic($resources_rs); } + +=head3 read_file + +Reads a KBART file of titles to provide report headers and lines to be processed. +Automatically detects whether the file is TSV or CSV based on the first 5 lines + +=cut + +sub read_file { + my ($file) = @_; + + my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : ""; + my ( $delimiter, $quote_char ) = _detect_delimiter_and_quote($file_content); + + return ( undef, undef, "unknown_delimiter" ) unless $delimiter; + + open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!"; + my $csv = Text::CSV_XS->new( + { + sep_char => $delimiter, + quote_char => $quote_char, + binary => 1, + allow_loose_quotes => 1 + } + ); + + my $headers_to_check = $csv->getline($fh); + my $column_headers = _rescue_EBSCO_files($headers_to_check); + my $lines = $csv->getline_all( $fh, 0 ); + close($fh); + + unless ( $csv->eof() ) { + my ( $cde, $str, $pos ) = $csv->error_diag(); + my $error = $cde ? "$cde, $str, $pos" : ""; + warn $error if $error; + return ( $column_headers, $lines, $error ); + } + + return ( $column_headers, $lines ); +} + + +=head3 get_valid_headers + +Returns a list of permitted headers in a KBART phase II file + +=cut + +sub get_valid_headers { + return ( + 'publication_title', + 'print_identifier', + 'online_identifier', + 'date_first_issue_online', + 'num_first_vol_online', + 'num_first_issue_online', + 'date_last_issue_online', + 'num_last_vol_online', + 'num_last_issue_online', + 'title_url', + 'first_author', + 'title_id', + 'embargo_info', + 'coverage_depth', + 'coverage_notes', + 'publisher_name', + 'publication_type', + 'date_monograph_published_print', + 'date_monograph_published_online', + 'monograph_volume', + 'monograph_edition', + 'first_editor', + 'parent_publication_title_id', + 'preceding_publication_title_id', + 'access_type', + 'notes' + ); +} + + +=head3 calculate_chunked_params_size + +Calculates average line size to work out how many lines to chunk a large file into +Uses only 75% of the max_allowed_packet as an upper limit + +=cut + +sub calculate_chunked_params_size { + my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_; + + my $average_line_size = $params_size / $number_of_rows; + my $lines_possible = ( $max_allowed_packet * 0.75 ) / $average_line_size; + my $rounded_value = floor($lines_possible); + return $rounded_value; +} + + +=head3 is_file_too_large + +Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet + +=cut + +sub is_file_too_large { + my ( $params_to_store, $max_allowed_packet ) = @_; + + my $json = JSON->new->utf8(0); + my $encoded_params = $json->encode($params_to_store); + my $params_size = length $encoded_params; + + # A lot more than just the params are stored in the background job table and this is difficult to calculate + # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts + return { + file_too_large => 1, + params_size => $params_size + } if $params_size > ( $max_allowed_packet * 0.75 ); + + return { + file_too_large => 0, + params_size => $params_size + }; +} + + +=head3 _rescue_EBSCO_files + +EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e'). +This means all of their KBART files fail to import using the current methodology. +There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected. + +=cut + +sub _rescue_EBSCO_files { + my ($column_headers) = @_; + + my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 ); + @$column_headers[$index] = 'preceding_publication_title_id' if $index; + + return $column_headers; +} + + =head3 _detect_delimiter_and_quote Identifies the delimiter and the quote character used in the KBART file and returns both. diff --git a/Koha/REST/V1/ERM/EHoldings/Titles/Local.pm b/Koha/REST/V1/ERM/EHoldings/Titles/Local.pm index c45d4d69140..5f67ae3319e 100644 --- a/Koha/REST/V1/ERM/EHoldings/Titles/Local.pm +++ b/Koha/REST/V1/ERM/EHoldings/Titles/Local.pm @@ -281,20 +281,17 @@ sub import_from_kbart_file { my @invalid_columns; my $max_allowed_packet = C4::Context->dbh->selectrow_array(q{SELECT @@max_allowed_packet}); - # Check if file is in TSV or CSV format and send an error back if not - if ( $file->{filename} !~ /\.csv$/ && $file->{filename} !~ /\.tsv$/ ) { - return $c->render( - status => 201, - openapi => { invalid_filetype => 1 } - ); - } + my ( $column_headers, $rows, $error ) = Koha::ERM::EHoldings::Title::read_file($file); - my ( $column_headers, $rows ) = Koha::BackgroundJob::ImportKBARTFile::read_file($file); + return $c->render( + status => 201, + openapi => { invalid_filetype => 1 } + ) if $error eq 'unknown_delimiter'; # Check that the column headers in the file match the standardised KBART phase II columns # If not, return a warning my $warnings = {}; - my @valid_headers = Koha::BackgroundJob::ImportKBARTFile::get_valid_headers(); + my @valid_headers = Koha::ERM::EHoldings::Title::get_valid_headers(); foreach my $header (@$column_headers) { if ( !grep { $_ eq $header } @valid_headers ) { $header = 'Empty column' if $header eq ''; @@ -311,11 +308,11 @@ sub import_from_kbart_file { file_name => $file->{filename}, create_linked_biblio => $create_linked_biblio }; - my $outcome = Koha::BackgroundJob::ImportKBARTFile::is_file_too_large( $params, $max_allowed_packet ); + my $outcome = Koha::ERM::EHoldings::Title::is_file_too_large( $params, $max_allowed_packet ); # If the file is too large, we can break the file into smaller chunks and enqueue one job per chunk if ( $outcome->{file_too_large} ) { - my $max_number_of_rows = Koha::BackgroundJob::ImportKBARTFile::calculate_chunked_params_size( + my $max_number_of_rows = Koha::ERM::EHoldings::Title::calculate_chunked_params_size( $outcome->{params_size}, $max_allowed_packet, scalar(@$rows) ); -- 2.39.3 (Apple Git-146)