|
Line 0
Link Here
|
|
|
1 |
package Koha::BackgroundJob::ImportKBARTFile; |
| 2 |
|
| 3 |
# This file is part of Koha. |
| 4 |
# |
| 5 |
# Koha is free software; you can redistribute it and/or modify it |
| 6 |
# under the terms of the GNU General Public License as published by |
| 7 |
# the Free Software Foundation; either version 3 of the License, or |
| 8 |
# (at your option) any later version. |
| 9 |
# |
| 10 |
# Koha is distributed in the hope that it will be useful, but |
| 11 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 |
# GNU General Public License for more details. |
| 14 |
# |
| 15 |
# You should have received a copy of the GNU General Public License |
| 16 |
# along with Koha; if not, see <http://www.gnu.org/licenses>. |
| 17 |
|
| 18 |
use Modern::Perl; |
| 19 |
use JSON qw( decode_json encode_json ); |
| 20 |
use Try::Tiny qw( catch try ); |
| 21 |
use MIME::Base64 qw( decode_base64 ); |
| 22 |
use POSIX qw( floor ); |
| 23 |
|
| 24 |
use C4::Context; |
| 25 |
|
| 26 |
use Koha::ERM::EHoldings::Titles; |
| 27 |
|
| 28 |
use base 'Koha::BackgroundJob'; |
| 29 |
|
| 30 |
=head1 NAME |
| 31 |
|
| 32 |
ImportKBARTFile - Create new eHoldings titles from a KBART file |
| 33 |
|
| 34 |
This is a subclass of Koha::BackgroundJob. |
| 35 |
|
| 36 |
=head1 API |
| 37 |
|
| 38 |
=head2 Class methods |
| 39 |
|
| 40 |
=head3 job_type |
| 41 |
|
| 42 |
Define the job type of this job. |
| 43 |
|
| 44 |
=cut |
| 45 |
|
| 46 |
sub job_type { |
| 47 |
return 'import_from_kbart_file'; |
| 48 |
} |
| 49 |
|
| 50 |
=head3 process |
| 51 |
|
| 52 |
Process the import. |
| 53 |
|
| 54 |
=cut |
| 55 |
|
| 56 |
sub process { |
| 57 |
my ( $self, $args ) = @_; |
| 58 |
|
| 59 |
if ( $self->status eq 'cancelled' ) { |
| 60 |
return; |
| 61 |
} |
| 62 |
|
| 63 |
$self->start; |
| 64 |
my @messages; |
| 65 |
my $titles_imported = 0; |
| 66 |
my $duplicate_titles = 0; |
| 67 |
my $failed_imports = 0; |
| 68 |
my $total_lines; |
| 69 |
my $file_name = $args->{file}->{filename}; |
| 70 |
my $report = { |
| 71 |
duplicates_found => undef, |
| 72 |
titles_imported => undef, |
| 73 |
file_name => $file_name, |
| 74 |
total_lines => undef, |
| 75 |
failed_imports => undef |
| 76 |
}; |
| 77 |
|
| 78 |
try { |
| 79 |
my $file = $args->{file}; |
| 80 |
my ( $column_headers, $lines ) = format_file($file); |
| 81 |
|
| 82 |
if ( scalar( @{$lines} ) == 0 ) { |
| 83 |
push @messages, { |
| 84 |
code => 'job_failed', |
| 85 |
type => 'error', |
| 86 |
error_message => 'No valid lines were found in this file. Please check the file formatting.', |
| 87 |
}; |
| 88 |
$self->status('failed')->store; |
| 89 |
} |
| 90 |
|
| 91 |
$self->size( scalar( @{$lines} ) )->store; |
| 92 |
$total_lines = scalar( @{$lines} ); |
| 93 |
|
| 94 |
foreach my $line ( @{$lines} ) { |
| 95 |
next if !$line; |
| 96 |
my $new_title = create_title_hash_from_line_data( $line, $column_headers ); |
| 97 |
my $title_match = Koha::ERM::EHoldings::Titles->search( { external_id => $new_title->{title_id} } )->count; |
| 98 |
|
| 99 |
if ($title_match) { |
| 100 |
$duplicate_titles++; |
| 101 |
push @messages, { |
| 102 |
code => 'title_already_exists', |
| 103 |
type => 'warning', |
| 104 |
error_message => undef, |
| 105 |
title => $new_title->{publication_title} |
| 106 |
}; |
| 107 |
} else { |
| 108 |
try { |
| 109 |
my $formatted_title = format_title($new_title); |
| 110 |
if ( !$formatted_title->{publication_title} ) { |
| 111 |
push @messages, { |
| 112 |
code => 'title_failed', |
| 113 |
type => 'error', |
| 114 |
error_message => "No publication_title found for title_id: ", |
| 115 |
title => '(Unknown)', |
| 116 |
title_id => $formatted_title->{external_id} |
| 117 |
}; |
| 118 |
$failed_imports++; |
| 119 |
} else { |
| 120 |
my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)->store; |
| 121 |
|
| 122 |
# No need to add a message for a successful import, |
| 123 |
# files could have 1000s of titles which will lead to lots of messages in background_job->data |
| 124 |
$titles_imported++ if $imported_title; |
| 125 |
} |
| 126 |
} catch { |
| 127 |
$failed_imports++; |
| 128 |
push @messages, { |
| 129 |
code => 'title_failed', |
| 130 |
type => 'error', |
| 131 |
error_message => $_->{msg}, |
| 132 |
title => $new_title->{publication_title} |
| 133 |
} |
| 134 |
}; |
| 135 |
} |
| 136 |
$self->step; |
| 137 |
} |
| 138 |
|
| 139 |
$report->{duplicates_found} = $duplicate_titles; |
| 140 |
$report->{titles_imported} = $titles_imported; |
| 141 |
$report->{total_lines} = $total_lines; |
| 142 |
$report->{failed_imports} = $failed_imports; |
| 143 |
|
| 144 |
my $data = $self->decoded_data; |
| 145 |
$data->{messages} = \@messages; |
| 146 |
$data->{report} = $report; |
| 147 |
|
| 148 |
# Remove the file content as this is no longer needed and can be very large |
| 149 |
$data->{file}->{file_content} = undef; |
| 150 |
|
| 151 |
$self->finish($data); |
| 152 |
} catch { |
| 153 |
warn $_; |
| 154 |
} |
| 155 |
} |
| 156 |
|
| 157 |
=head3 enqueue |
| 158 |
|
| 159 |
Enqueue the new job |
| 160 |
|
| 161 |
=cut |
| 162 |
|
| 163 |
sub enqueue { |
| 164 |
my ( $self, $args ) = @_; |
| 165 |
|
| 166 |
return unless exists $args->{file}; |
| 167 |
|
| 168 |
$self->SUPER::enqueue( |
| 169 |
{ |
| 170 |
job_size => 1, |
| 171 |
job_args => $args, |
| 172 |
job_queue => 'long_tasks', |
| 173 |
} |
| 174 |
); |
| 175 |
} |
| 176 |
|
| 177 |
=head3 format_title |
| 178 |
|
| 179 |
Formats a title to fit the names of the database fields in Koha |
| 180 |
|
| 181 |
Kbart field "title_id" = "external_id" in Koha |
| 182 |
Kbart field "coverage_notes" = "notes" in Koha |
| 183 |
|
| 184 |
=cut |
| 185 |
|
| 186 |
sub format_title { |
| 187 |
my ($title) = @_; |
| 188 |
|
| 189 |
$title->{external_id} = $title->{title_id}; |
| 190 |
delete $title->{title_id}; |
| 191 |
|
| 192 |
# Some files appear to use coverage_notes instead of "notes" as in the KBART standard |
| 193 |
if ( $title->{coverage_notes} ) { |
| 194 |
$title->{notes} = $title->{coverage_notes}; |
| 195 |
delete $title->{coverage_notes}; |
| 196 |
} |
| 197 |
|
| 198 |
return $title; |
| 199 |
} |
| 200 |
|
| 201 |
=head3 format_file |
| 202 |
|
| 203 |
Formats a file to provide report headers and lines to be processed |
| 204 |
|
| 205 |
=cut |
| 206 |
|
| 207 |
sub format_file { |
| 208 |
my ($file) = @_; |
| 209 |
|
| 210 |
my $file_content = decode_base64( $file->{file_content} ); |
| 211 |
$file_content =~ s/\n/\r/g; |
| 212 |
my @lines = split /\r/, $file_content; |
| 213 |
my @column_headers = split /\t/, $lines[0]; |
| 214 |
shift @lines; # Remove headers row |
| 215 |
my @remove_null_lines = grep $_ ne '', @lines; |
| 216 |
|
| 217 |
return ( \@column_headers, \@remove_null_lines ); |
| 218 |
} |
| 219 |
|
| 220 |
=head3 create_title_hash_from_line_data |
| 221 |
|
| 222 |
Takes a line and creates a hash of the values mapped to the column headings |
| 223 |
|
| 224 |
=cut |
| 225 |
|
| 226 |
sub create_title_hash_from_line_data { |
| 227 |
my ( $line, $column_headers ) = @_; |
| 228 |
|
| 229 |
my %new_title; |
| 230 |
my @values = split /\t/, $line; |
| 231 |
|
| 232 |
@new_title{ @{$column_headers} } = @values; |
| 233 |
|
| 234 |
return \%new_title; |
| 235 |
} |
| 236 |
|
| 237 |
=head3 get_valid_headers |
| 238 |
|
| 239 |
Returns a list of permitted headers in a KBART phase II file |
| 240 |
|
| 241 |
=cut |
| 242 |
|
| 243 |
sub get_valid_headers { |
| 244 |
return ( |
| 245 |
'publication_title', |
| 246 |
'print_identifier', |
| 247 |
'online_identifier', |
| 248 |
'date_first_issue_online', |
| 249 |
'num_first_vol_online', |
| 250 |
'num_first_issue_online', |
| 251 |
'date_last_issue_online', |
| 252 |
'num_last_vol_online', |
| 253 |
'num_last_issue_online', |
| 254 |
'title_url', |
| 255 |
'first_author', |
| 256 |
'title_id', |
| 257 |
'embargo_info', |
| 258 |
'coverage_depth', |
| 259 |
'coverage_notes', |
| 260 |
'publisher_name', |
| 261 |
'publication_type', |
| 262 |
'date_monograph_published_print', |
| 263 |
'date_monograph_published_online', |
| 264 |
'monograph_volume', |
| 265 |
'monograph_edition', |
| 266 |
'first_editor', |
| 267 |
'parent_publication_title_id', |
| 268 |
'preceding_publication_title_id', |
| 269 |
'access_type', |
| 270 |
'notes' |
| 271 |
); |
| 272 |
} |
| 273 |
|
| 274 |
=head3 calculate_chunked_file_size |
| 275 |
|
| 276 |
Calculates average line size to work out how many lines to chunk a large file into |
| 277 |
Knocks 10% off the final result to give some margin for error |
| 278 |
|
| 279 |
=cut |
| 280 |
|
| 281 |
sub calculate_chunked_file_size { |
| 282 |
my ( $file_size, $max_allowed_packet, $number_of_lines ) = @_; |
| 283 |
|
| 284 |
my $average_line_size = $file_size / $number_of_lines; |
| 285 |
my $lines_possible = $max_allowed_packet / $average_line_size; |
| 286 |
my $moderated_value = floor( $lines_possible * 0.9 ); |
| 287 |
return $moderated_value; |
| 288 |
} |
| 289 |
|
| 290 |
1; |