Lines 16-25
package Koha::BackgroundJob::ImportKBARTFile;
Link Here
|
16 |
# along with Koha; if not, see <http://www.gnu.org/licenses>. |
16 |
# along with Koha; if not, see <http://www.gnu.org/licenses>. |
17 |
|
17 |
|
18 |
use Modern::Perl; |
18 |
use Modern::Perl; |
19 |
use JSON qw( decode_json encode_json ); |
|
|
20 |
use Try::Tiny qw( catch try ); |
19 |
use Try::Tiny qw( catch try ); |
21 |
use MIME::Base64 qw( decode_base64 ); |
|
|
22 |
use POSIX qw( floor ); |
23 |
|
20 |
|
24 |
use C4::Context; |
21 |
use C4::Context; |
25 |
|
22 |
|
Lines 98-105
sub process {
Link Here
|
98 |
|
95 |
|
99 |
foreach my $row ( @{$rows} ) { |
96 |
foreach my $row ( @{$rows} ) { |
100 |
next if !$row; |
97 |
next if !$row; |
101 |
my $new_title = create_title_hash_from_line_data( $row, $column_headers, $invalid_columns ); |
98 |
my $new_title = _create_title_hash_from_line_data( $row, $column_headers, $invalid_columns ); |
102 |
my $title_match = check_for_matching_title( $new_title, $package_id ); |
99 |
my $title_match = _check_for_matching_title( $new_title, $package_id ); |
103 |
|
100 |
|
104 |
if ($title_match) { |
101 |
if ($title_match) { |
105 |
$duplicate_titles++; |
102 |
$duplicate_titles++; |
Lines 111-117
sub process {
Link Here
|
111 |
}; |
108 |
}; |
112 |
} else { |
109 |
} else { |
113 |
try { |
110 |
try { |
114 |
my $formatted_title = format_title($new_title); |
111 |
my $formatted_title = _format_title($new_title); |
115 |
if ( !$formatted_title->{publication_title} ) { |
112 |
if ( !$formatted_title->{publication_title} ) { |
116 |
push @messages, { |
113 |
push @messages, { |
117 |
code => 'no_title_found', |
114 |
code => 'no_title_found', |
Lines 124-130
sub process {
Link Here
|
124 |
my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title) |
121 |
my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title) |
125 |
->store( { create_linked_biblio => $create_linked_biblio } ); |
122 |
->store( { create_linked_biblio => $create_linked_biblio } ); |
126 |
push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio; |
123 |
push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio; |
127 |
create_linked_resource( |
124 |
_create_linked_resource( |
128 |
{ |
125 |
{ |
129 |
title => $imported_title, |
126 |
title => $imported_title, |
130 |
package_id => $package_id |
127 |
package_id => $package_id |
Lines 192-198
sub enqueue {
Link Here
|
192 |
); |
189 |
); |
193 |
} |
190 |
} |
194 |
|
191 |
|
195 |
=head3 format_title |
192 |
=head3 _format_title |
196 |
|
193 |
|
197 |
Formats a title to fit the names of the database fields in Koha |
194 |
Formats a title to fit the names of the database fields in Koha |
198 |
|
195 |
|
Lines 201-207
Kbart field "coverage_notes" = "notes" in Koha
Link Here
|
201 |
|
198 |
|
202 |
=cut |
199 |
=cut |
203 |
|
200 |
|
204 |
sub format_title { |
201 |
sub _format_title { |
205 |
my ($title) = @_; |
202 |
my ($title) = @_; |
206 |
|
203 |
|
207 |
$title->{external_id} = $title->{title_id}; |
204 |
$title->{external_id} = $title->{title_id}; |
Lines 216-259
sub format_title {
Link Here
|
216 |
return $title; |
213 |
return $title; |
217 |
} |
214 |
} |
218 |
|
215 |
|
219 |
=head3 read_file |
216 |
=head3 _create_title_hash_from_line_data |
220 |
|
|
|
221 |
Reads a file to provide report headers and lines to be processed |
222 |
|
223 |
=cut |
224 |
|
225 |
sub read_file { |
226 |
my ($file) = @_; |
227 |
|
228 |
my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : ""; |
229 |
my $delimiter = $file->{filename} =~ /\.tsv$/ ? "\t" : ","; |
230 |
my $quote_char = $file->{filename} =~ /\.tsv$/ ? "\"" : "\""; |
231 |
|
232 |
open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!"; |
233 |
my $csv = Text::CSV_XS->new( |
234 |
{ |
235 |
sep_char => $delimiter, |
236 |
quote_char => $quote_char, |
237 |
binary => 1, |
238 |
allow_loose_quotes => 1 |
239 |
} |
240 |
); |
241 |
my $headers_to_check = $csv->getline($fh); |
242 |
my $column_headers = rescue_EBSCO_files($headers_to_check); |
243 |
my $lines = $csv->getline_all( $fh, 0 ); |
244 |
close($fh); |
245 |
|
246 |
unless($csv->eof()) { |
247 |
my ( $cde, $str, $pos ) = $csv->error_diag(); |
248 |
my $error = $cde ? "$cde, $str, $pos" : ""; |
249 |
warn $error if $error; |
250 |
return ( $column_headers, $lines, $error ); |
251 |
} |
252 |
|
253 |
return ( $column_headers, $lines ); |
254 |
} |
255 |
|
256 |
=head3 create_title_hash_from_line_data |
257 |
|
217 |
|
258 |
Takes a line and creates a hash of the values mapped to the column headings |
218 |
Takes a line and creates a hash of the values mapped to the column headings |
259 |
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored |
219 |
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored |
Lines 261-267
Only accepts fields that are in the list of permitted KBART fields, other fields
Link Here
|
261 |
|
221 |
|
262 |
=cut |
222 |
=cut |
263 |
|
223 |
|
264 |
sub create_title_hash_from_line_data { |
224 |
sub _create_title_hash_from_line_data { |
265 |
my ( $row, $column_headers, $invalid_columns ) = @_; |
225 |
my ( $row, $column_headers, $invalid_columns ) = @_; |
266 |
|
226 |
|
267 |
my %new_title; |
227 |
my %new_title; |
Lines 283-295
sub create_title_hash_from_line_data {
Link Here
|
283 |
return \%new_title; |
243 |
return \%new_title; |
284 |
} |
244 |
} |
285 |
|
245 |
|
286 |
=head3 check_for_matching_title |
246 |
=head3 _check_for_matching_title |
287 |
|
247 |
|
288 |
Checks whether this title already exists to avoid duplicates |
248 |
Checks whether this title already exists to avoid duplicates |
289 |
|
249 |
|
290 |
=cut |
250 |
=cut |
291 |
|
251 |
|
292 |
sub check_for_matching_title { |
252 |
sub _check_for_matching_title { |
293 |
my ( $title, $package_id ) = @_; |
253 |
my ( $title, $package_id ) = @_; |
294 |
|
254 |
|
295 |
my $match_parameters = {}; |
255 |
my $match_parameters = {}; |
Lines 316-335
sub check_for_matching_title {
Link Here
|
316 |
return $matching_title_found; |
276 |
return $matching_title_found; |
317 |
} |
277 |
} |
318 |
|
278 |
|
319 |
=head3 create_linked_resource |
279 |
=head3 _create_linked_resource |
320 |
|
280 |
|
321 |
Creates a resource for a newly stored title. |
281 |
Creates a resource for a newly stored title. |
322 |
|
282 |
|
323 |
=cut |
283 |
=cut |
324 |
|
284 |
|
325 |
sub create_linked_resource { |
285 |
sub _create_linked_resource { |
326 |
my ($args) = @_; |
286 |
my ($args) = @_; |
327 |
|
287 |
|
328 |
my $title = $args->{title}; |
288 |
my $title = $args->{title}; |
329 |
my $package_id = $args->{package_id}; |
289 |
my $package_id = $args->{package_id}; |
330 |
|
290 |
|
331 |
my $title_id = $title->title_id; |
291 |
my $title_id = $title->title_id; |
332 |
my ( $date_first_issue_online, $date_last_issue_online ) = get_first_and_last_issue_dates($title); |
292 |
my ( $date_first_issue_online, $date_last_issue_online ) = _get_first_and_last_issue_dates($title); |
333 |
my $resource = Koha::ERM::EHoldings::Resource->new( |
293 |
my $resource = Koha::ERM::EHoldings::Resource->new( |
334 |
{ |
294 |
{ |
335 |
title_id => $title_id, |
295 |
title_id => $title_id, |
Lines 342-354
sub create_linked_resource {
Link Here
|
342 |
return; |
302 |
return; |
343 |
} |
303 |
} |
344 |
|
304 |
|
345 |
=head3 get_first_and_last_issue_dates |
305 |
=head3 _get_first_and_last_issue_dates |
346 |
|
306 |
|
347 |
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format |
307 |
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format |
348 |
|
308 |
|
349 |
=cut |
309 |
=cut |
350 |
|
310 |
|
351 |
sub get_first_and_last_issue_dates { |
311 |
sub _get_first_and_last_issue_dates { |
352 |
my ($title) = @_; |
312 |
my ($title) = @_; |
353 |
|
313 |
|
354 |
return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online ); |
314 |
return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online ); |
Lines 372-471
sub get_first_and_last_issue_dates {
Link Here
|
372 |
return ( $date_first_issue_online, $date_last_issue_online ); |
332 |
return ( $date_first_issue_online, $date_last_issue_online ); |
373 |
} |
333 |
} |
374 |
|
334 |
|
375 |
=head3 get_valid_headers |
|
|
376 |
|
377 |
Returns a list of permitted headers in a KBART phase II file |
378 |
|
379 |
=cut |
380 |
|
381 |
sub get_valid_headers { |
382 |
return ( |
383 |
'publication_title', |
384 |
'print_identifier', |
385 |
'online_identifier', |
386 |
'date_first_issue_online', |
387 |
'num_first_vol_online', |
388 |
'num_first_issue_online', |
389 |
'date_last_issue_online', |
390 |
'num_last_vol_online', |
391 |
'num_last_issue_online', |
392 |
'title_url', |
393 |
'first_author', |
394 |
'title_id', |
395 |
'embargo_info', |
396 |
'coverage_depth', |
397 |
'coverage_notes', |
398 |
'publisher_name', |
399 |
'publication_type', |
400 |
'date_monograph_published_print', |
401 |
'date_monograph_published_online', |
402 |
'monograph_volume', |
403 |
'monograph_edition', |
404 |
'first_editor', |
405 |
'parent_publication_title_id', |
406 |
'preceding_publication_title_id', |
407 |
'access_type', |
408 |
'notes' |
409 |
); |
410 |
} |
411 |
|
412 |
=head3 calculate_chunked_params_size |
413 |
|
414 |
Calculates average line size to work out how many lines to chunk a large file into |
415 |
Uses only 75% of the max_allowed_packet as an upper limit |
416 |
|
417 |
=cut |
418 |
|
419 |
sub calculate_chunked_params_size { |
420 |
my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_; |
421 |
|
422 |
my $average_line_size = $params_size / $number_of_rows; |
423 |
my $lines_possible = ( $max_allowed_packet * 0.75 ) / $average_line_size; |
424 |
my $rounded_value = floor($lines_possible); |
425 |
return $rounded_value; |
426 |
} |
427 |
|
428 |
=head3 is_file_too_large |
429 |
|
430 |
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet |
431 |
|
432 |
=cut |
433 |
|
434 |
sub is_file_too_large { |
435 |
my ( $params_to_store, $max_allowed_packet ) = @_; |
436 |
|
437 |
my $json = JSON->new->utf8(0); |
438 |
my $encoded_params = $json->encode($params_to_store); |
439 |
my $params_size = length $encoded_params; |
440 |
|
441 |
# A lot more than just the params are stored in the background job table and this is difficult to calculate |
442 |
# We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts |
443 |
return { |
444 |
file_too_large => 1, |
445 |
params_size => $params_size |
446 |
} if $params_size > ( $max_allowed_packet * 0.75 ); |
447 |
|
448 |
return { |
449 |
file_too_large => 0, |
450 |
params_size => $params_size |
451 |
}; |
452 |
} |
453 |
|
454 |
=head3 rescue_EBSCO_files |
455 |
|
456 |
EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e'). |
457 |
This means all of their KBART files fail to import using the current methodology. |
458 |
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected. |
459 |
|
460 |
=cut |
461 |
|
462 |
sub rescue_EBSCO_files { |
463 |
my ($column_headers) = @_; |
464 |
|
465 |
my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 ); |
466 |
@$column_headers[$index] = 'preceding_publication_title_id' if $index; |
467 |
|
468 |
return $column_headers; |
469 |
} |
470 |
|
471 |
1; |
335 |
1; |