Lines 16-25
package Koha::BackgroundJob::ImportKBARTFile;
Link Here
|
16 |
# along with Koha; if not, see <http://www.gnu.org/licenses>. |
16 |
# along with Koha; if not, see <http://www.gnu.org/licenses>. |
17 |
|
17 |
|
18 |
use Modern::Perl; |
18 |
use Modern::Perl; |
19 |
use JSON qw( decode_json encode_json ); |
19 |
use JSON qw( decode_json encode_json ); |
20 |
use Try::Tiny qw( catch try ); |
20 |
use Try::Tiny qw( catch try ); |
21 |
use MIME::Base64 qw( decode_base64 ); |
21 |
use MIME::Base64 qw( decode_base64 ); |
22 |
use POSIX qw( floor ); |
22 |
use POSIX qw( floor ); |
23 |
|
23 |
|
24 |
use C4::Context; |
24 |
use C4::Context; |
25 |
|
25 |
|
Lines 65-101
sub process {
Link Here
|
65 |
my $titles_imported = 0; |
65 |
my $titles_imported = 0; |
66 |
my $duplicate_titles = 0; |
66 |
my $duplicate_titles = 0; |
67 |
my $failed_imports = 0; |
67 |
my $failed_imports = 0; |
68 |
my $total_lines; |
68 |
my $total_rows; |
69 |
my $file_name = $args->{file}->{filename}; |
69 |
my $file_name = $args->{file_name}; |
70 |
my $report = { |
70 |
my $report = { |
71 |
duplicates_found => undef, |
71 |
duplicates_found => undef, |
72 |
titles_imported => undef, |
72 |
titles_imported => undef, |
73 |
file_name => $file_name, |
73 |
file_name => $file_name, |
74 |
total_lines => undef, |
74 |
total_rows => undef, |
75 |
failed_imports => undef |
75 |
failed_imports => undef |
76 |
}; |
76 |
}; |
77 |
|
77 |
|
78 |
try { |
78 |
try { |
79 |
my $file = $args->{file}; |
79 |
my $column_headers = $args->{column_headers}; |
80 |
my $package_id = $args->{package_id}; |
80 |
my $rows = $args->{rows}; |
81 |
my ( $column_headers, $lines ) = format_file($file); |
81 |
my $package_id = $args->{package_id}; |
82 |
|
82 |
|
83 |
if ( scalar( @{$lines} ) == 0 ) { |
83 |
if ( scalar( @{$rows} ) == 0 ) { |
84 |
push @messages, { |
84 |
push @messages, { |
85 |
code => 'job_failed', |
85 |
code => 'job_failed', |
86 |
type => 'error', |
86 |
type => 'error', |
87 |
error_message => 'No valid lines were found in this file. Please check the file formatting.', |
87 |
error_message => 'No valid rows were found in this file. Please check the file formatting.', |
88 |
}; |
88 |
}; |
89 |
$self->status('failed')->store; |
89 |
$self->status('failed')->store; |
90 |
} |
90 |
} |
91 |
|
91 |
|
92 |
$self->size( scalar( @{$lines} ) )->store; |
92 |
$self->size( scalar( @{$rows} ) )->store; |
93 |
$total_lines = scalar( @{$lines} ); |
93 |
$total_rows = scalar( @{$rows} ); |
94 |
|
94 |
|
95 |
foreach my $line ( @{$lines} ) { |
95 |
foreach my $row ( @{$rows} ) { |
96 |
next if !$line; |
96 |
next if !$row; |
97 |
my $new_title = create_title_hash_from_line_data( $line, $column_headers ); |
97 |
my $new_title = create_title_hash_from_line_data( $row, $column_headers ); |
98 |
my $title_match = Koha::ERM::EHoldings::Titles->search( { external_id => $new_title->{title_id} } )->count; |
98 |
my $title_match = check_for_matching_title($new_title); |
99 |
|
99 |
|
100 |
if ($title_match) { |
100 |
if ($title_match) { |
101 |
$duplicate_titles++; |
101 |
$duplicate_titles++; |
Lines 119-127
sub process {
Link Here
|
119 |
$failed_imports++; |
119 |
$failed_imports++; |
120 |
} else { |
120 |
} else { |
121 |
my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)->store; |
121 |
my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)->store; |
122 |
my $title_id = $imported_title->title_id; |
122 |
create_linked_resource( |
123 |
Koha::ERM::EHoldings::Resource->new( { title_id => $title_id, package_id => $package_id } ) |
123 |
{ |
124 |
->store; |
124 |
title => $imported_title, |
|
|
125 |
package_id => $package_id |
126 |
} |
127 |
); |
125 |
|
128 |
|
126 |
# No need to add a message for a successful import, |
129 |
# No need to add a message for a successful import, |
127 |
# files could have 1000s of titles which will lead to lots of messages in background_job->data |
130 |
# files could have 1000s of titles which will lead to lots of messages in background_job->data |
Lines 132-138
sub process {
Link Here
|
132 |
push @messages, { |
135 |
push @messages, { |
133 |
code => 'title_failed', |
136 |
code => 'title_failed', |
134 |
type => 'error', |
137 |
type => 'error', |
135 |
error_message => $_->{msg}, |
138 |
error_message => $_->{msg} || "Please check your file", |
136 |
title => $new_title->{publication_title} |
139 |
title => $new_title->{publication_title} |
137 |
} |
140 |
} |
138 |
}; |
141 |
}; |
Lines 142-148
sub process {
Link Here
|
142 |
|
145 |
|
143 |
$report->{duplicates_found} = $duplicate_titles; |
146 |
$report->{duplicates_found} = $duplicate_titles; |
144 |
$report->{titles_imported} = $titles_imported; |
147 |
$report->{titles_imported} = $titles_imported; |
145 |
$report->{total_lines} = $total_lines; |
148 |
$report->{total_rows} = $total_rows; |
146 |
$report->{failed_imports} = $failed_imports; |
149 |
$report->{failed_imports} = $failed_imports; |
147 |
|
150 |
|
148 |
my $data = $self->decoded_data; |
151 |
my $data = $self->decoded_data; |
Lines 167-173
Enqueue the new job
Link Here
|
167 |
sub enqueue { |
170 |
sub enqueue { |
168 |
my ( $self, $args ) = @_; |
171 |
my ( $self, $args ) = @_; |
169 |
|
172 |
|
170 |
return unless exists $args->{file}; |
173 |
return unless exists $args->{column_headers}; |
171 |
|
174 |
|
172 |
$self->SUPER::enqueue( |
175 |
$self->SUPER::enqueue( |
173 |
{ |
176 |
{ |
Lines 194-200
sub format_title {
Link Here
|
194 |
delete $title->{title_id}; |
197 |
delete $title->{title_id}; |
195 |
|
198 |
|
196 |
# Some files appear to use coverage_notes instead of "notes" as in the KBART standard |
199 |
# Some files appear to use coverage_notes instead of "notes" as in the KBART standard |
197 |
if ( $title->{coverage_notes} ) { |
200 |
if ( exists $title->{coverage_notes} ) { |
198 |
$title->{notes} = $title->{coverage_notes}; |
201 |
$title->{notes} = $title->{coverage_notes}; |
199 |
delete $title->{coverage_notes}; |
202 |
delete $title->{coverage_notes}; |
200 |
} |
203 |
} |
Lines 202-224
sub format_title {
Link Here
|
202 |
return $title; |
205 |
return $title; |
203 |
} |
206 |
} |
204 |
|
207 |
|
205 |
=head3 format_file |
208 |
=head3 read_file |
206 |
|
209 |
|
207 |
Formats a file to provide report headers and lines to be processed |
210 |
Reads a file to provide report headers and lines to be processed |
208 |
|
211 |
|
209 |
=cut |
212 |
=cut |
210 |
|
213 |
|
211 |
sub format_file { |
214 |
sub read_file { |
212 |
my ($file) = @_; |
215 |
my ($file) = @_; |
213 |
|
216 |
|
214 |
my $file_content = decode_base64( $file->{file_content} ); |
217 |
my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : ""; |
215 |
$file_content =~ s/\n/\r/g; |
218 |
my $delimiter = $file->{filename} =~ /\.tsv$/ ? "\t" : ","; |
216 |
my @lines = split /\r/, $file_content; |
219 |
my $quote_char = $file->{filename} =~ /\.tsv$/ ? "" : '"'; |
217 |
my @column_headers = split /\t/, $lines[0]; |
|
|
218 |
shift @lines; # Remove headers row |
219 |
my @remove_null_lines = grep $_ ne '', @lines; |
220 |
|
220 |
|
221 |
return ( \@column_headers, \@remove_null_lines ); |
221 |
open my $fh, "<", \$file_content or die; |
|
|
222 |
my $csv = Text::CSV_XS->new( |
223 |
{ |
224 |
sep_char => $delimiter, |
225 |
quote_char => $quote_char, |
226 |
binary => 1, |
227 |
allow_loose_quotes => 1 |
228 |
} |
229 |
); |
230 |
my $headers_to_check = $csv->getline($fh); |
231 |
my $column_headers = rescue_EBSCO_files($headers_to_check); |
232 |
my $lines = $csv->getline_all( $fh, 0 ); |
233 |
|
234 |
my ( $cde, $str, $pos ) = $csv->error_diag(); |
235 |
my $error = $cde ? "$cde, $str, $pos" : ""; |
236 |
warn $error if $error; |
237 |
|
238 |
close($fh); |
239 |
|
240 |
return ( $column_headers, $lines, $error ); |
222 |
} |
241 |
} |
223 |
|
242 |
|
224 |
=head3 create_title_hash_from_line_data |
243 |
=head3 create_title_hash_from_line_data |
Lines 228-243
Takes a line and creates a hash of the values mapped to the column headings
Link Here
|
228 |
=cut |
247 |
=cut |
229 |
|
248 |
|
230 |
sub create_title_hash_from_line_data { |
249 |
sub create_title_hash_from_line_data { |
231 |
my ( $line, $column_headers ) = @_; |
250 |
my ( $row, $column_headers ) = @_; |
232 |
|
251 |
|
233 |
my %new_title; |
252 |
my %new_title; |
234 |
my @values = split /\t/, $line; |
|
|
235 |
|
253 |
|
236 |
@new_title{ @{$column_headers} } = @values; |
254 |
@new_title{ @{$column_headers} } = @$row; |
|
|
255 |
|
256 |
# If the file has been converted from CSV to TSV for import, then some titles containing commas will be enclosed in "" |
257 |
my $first_char = substr( $new_title{publication_title}, 0, 1 ); |
258 |
my $last_char = substr( $new_title{publication_title}, -1 ); |
259 |
if ( $first_char eq '"' && $last_char eq '"' ) { |
260 |
$new_title{publication_title} =~ s/^"|"$//g; |
261 |
} |
237 |
|
262 |
|
238 |
return \%new_title; |
263 |
return \%new_title; |
239 |
} |
264 |
} |
240 |
|
265 |
|
|
|
266 |
=head3 check_for_matching_title |
267 |
|
268 |
Checks whether this title already exists to avoid duplicates |
269 |
|
270 |
=cut |
271 |
|
272 |
sub check_for_matching_title { |
273 |
my ($title) = @_; |
274 |
|
275 |
my $match_parameters = {}; |
276 |
$match_parameters->{print_identifier} = $title->{print_identifier} if $title->{print_identifier}; |
277 |
$match_parameters->{online_identifier} = $title->{online_identifier} if $title->{online_identifier}; |
278 |
|
279 |
# Use external_id in case title exists for a different provider, we want to add it for the new provider |
280 |
$match_parameters->{external_id} = $title->{title_id} if $title->{title_id}; |
281 |
|
282 |
# If no match parameters are provided in the file we should add the new title |
283 |
return 0 if !%$match_parameters; |
284 |
|
285 |
my $title_match = Koha::ERM::EHoldings::Titles->search($match_parameters)->count; |
286 |
|
287 |
return $title_match; |
288 |
} |
289 |
|
290 |
=head3 create_linked_resource |
291 |
|
292 |
Creates a resource for a newly stored title. |
293 |
|
294 |
=cut |
295 |
|
296 |
sub create_linked_resource { |
297 |
my ($args) = @_; |
298 |
|
299 |
my $title = $args->{title}; |
300 |
my $package_id = $args->{package_id}; |
301 |
|
302 |
my $title_id = $title->title_id; |
303 |
my ( $date_first_issue_online, $date_last_issue_online ) = get_first_and_last_issue_dates($title); |
304 |
my $resource = Koha::ERM::EHoldings::Resource->new( |
305 |
{ |
306 |
title_id => $title_id, |
307 |
package_id => $package_id, |
308 |
started_on => $date_first_issue_online, |
309 |
ended_on => $date_last_issue_online, |
310 |
} |
311 |
)->store; |
312 |
|
313 |
return; |
314 |
} |
315 |
|
316 |
=head3 get_first_and_last_issue_dates |
317 |
|
318 |
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format |
319 |
|
320 |
=cut |
321 |
|
322 |
sub get_first_and_last_issue_dates { |
323 |
my ($title) = @_; |
324 |
|
325 |
return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online ); |
326 |
|
327 |
my $date_first_issue_online = |
328 |
$title->date_first_issue_online =~ /^\d{4}((-\d{2}-\d{2}$|-\d{2}$)|$)$/ |
329 |
? $title->date_first_issue_online |
330 |
: undef; |
331 |
my $date_last_issue_online = |
332 |
$title->date_last_issue_online =~ /^\d{4}((-\d{2}-\d{2}$|-\d{2}$)|$)$/ ? $title->date_last_issue_online : undef; |
333 |
|
334 |
$date_first_issue_online = $date_first_issue_online . '-01-01' |
335 |
if $date_first_issue_online && $date_first_issue_online =~ /^\d{4}$/; |
336 |
$date_last_issue_online = $date_last_issue_online . '-01-01' |
337 |
if $date_last_issue_online && $date_last_issue_online =~ /^\d{4}$/; |
338 |
$date_first_issue_online = $date_first_issue_online . '-01' |
339 |
if $date_first_issue_online && $date_first_issue_online =~ /^\d{4}-\d{2}$/; |
340 |
$date_last_issue_online = $date_last_issue_online . '-01' |
341 |
if $date_last_issue_online && $date_last_issue_online =~ /^\d{4}-\d{2}$/; |
342 |
|
343 |
return ( $date_first_issue_online, $date_last_issue_online ); |
344 |
} |
345 |
|
241 |
=head3 get_valid_headers |
346 |
=head3 get_valid_headers |
242 |
|
347 |
|
243 |
Returns a list of permitted headers in a KBART phase II file |
348 |
Returns a list of permitted headers in a KBART phase II file |
Lines 275-294
sub get_valid_headers {
Link Here
|
275 |
); |
380 |
); |
276 |
} |
381 |
} |
277 |
|
382 |
|
278 |
=head3 calculate_chunked_file_size |
383 |
=head3 calculate_chunked_params_size |
279 |
|
384 |
|
280 |
Calculates average line size to work out how many lines to chunk a large file into |
385 |
Calculates average line size to work out how many lines to chunk a large file into |
281 |
Knocks 10% off the final result to give some margin for error |
386 |
Uses only 75% of the max_allowed_packet as an upper limit |
|
|
387 |
|
388 |
=cut |
389 |
|
390 |
sub calculate_chunked_params_size { |
391 |
my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_; |
392 |
|
393 |
my $average_line_size = $params_size / $number_of_rows; |
394 |
my $lines_possible = ( $max_allowed_packet * 0.75 ) / $average_line_size; |
395 |
my $rounded_value = floor($lines_possible); |
396 |
return $rounded_value; |
397 |
} |
398 |
|
399 |
=head3 is_file_too_large |
400 |
|
401 |
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet |
402 |
|
403 |
=cut |
404 |
|
405 |
sub is_file_too_large { |
406 |
my ( $params_to_store, $max_allowed_packet ) = @_; |
407 |
|
408 |
my $json = JSON->new->utf8(0); |
409 |
my $encoded_params = $json->encode($params_to_store); |
410 |
my $params_size = length $encoded_params; |
411 |
|
412 |
# A lot more than just the params are stored in the background job table and this is difficult to calculate |
413 |
# We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts |
414 |
return { |
415 |
file_too_large => 1, |
416 |
params_size => $params_size |
417 |
} if $params_size > ( $max_allowed_packet * 0.75 ); |
418 |
|
419 |
return { |
420 |
file_too_large => 0, |
421 |
params_size => $params_size |
422 |
}; |
423 |
} |
424 |
|
425 |
=head3 |
426 |
|
427 |
EBSCO have an incorrect spelling of "preceding_publication_title_id" in all of their KBART files ("preceeding" instead of "preceding"). |
428 |
This is very annoying because it means all of their KBART files fail to import using the current methodology. |
429 |
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected. |
282 |
|
430 |
|
283 |
=cut |
431 |
=cut |
284 |
|
432 |
|
285 |
sub calculate_chunked_file_size { |
433 |
sub rescue_EBSCO_files { |
286 |
my ( $file_size, $max_allowed_packet, $number_of_lines ) = @_; |
434 |
my ($column_headers) = @_; |
|
|
435 |
|
436 |
my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 ); |
437 |
@$column_headers[$index] = 'preceding_publication_title_id' if $index; |
287 |
|
438 |
|
288 |
my $average_line_size = $file_size / $number_of_lines; |
439 |
return $column_headers; |
289 |
my $lines_possible = $max_allowed_packet / $average_line_size; |
|
|
290 |
my $moderated_value = floor( $lines_possible * 0.9 ); |
291 |
return $moderated_value; |
292 |
} |
440 |
} |
293 |
|
441 |
|
294 |
1; |
442 |
1; |