View | Details | Raw Unified | Return to bug 36831
Collapse All | Expand All

(-)a/Koha/BackgroundJob/ImportKBARTFile.pm (-153 / +16 lines)
Lines 16-25 package Koha::BackgroundJob::ImportKBARTFile; Link Here
16
# along with Koha; if not, see <http://www.gnu.org/licenses>.
16
# along with Koha; if not, see <http://www.gnu.org/licenses>.
17
17
18
use Modern::Perl;
18
use Modern::Perl;
19
use JSON         qw( decode_json encode_json );
19
use Try::Tiny qw( catch try );
20
use Try::Tiny    qw( catch try );
21
use MIME::Base64 qw( decode_base64 );
22
use POSIX        qw( floor );
23
20
24
use C4::Context;
21
use C4::Context;
25
22
Lines 98-105 sub process { Link Here
98
95
99
        foreach my $row ( @{$rows} ) {
96
        foreach my $row ( @{$rows} ) {
100
            next if !$row;
97
            next if !$row;
101
            my $new_title   = create_title_hash_from_line_data( $row, $column_headers, $invalid_columns );
98
            my $new_title   = _create_title_hash_from_line_data( $row, $column_headers, $invalid_columns );
102
            my $title_match = check_for_matching_title( $new_title, $package_id );
99
            my $title_match = _check_for_matching_title( $new_title, $package_id );
103
100
104
            if ($title_match) {
101
            if ($title_match) {
105
                $duplicate_titles++;
102
                $duplicate_titles++;
Lines 111-117 sub process { Link Here
111
                };
108
                };
112
            } else {
109
            } else {
113
                try {
110
                try {
114
                    my $formatted_title = format_title($new_title);
111
                    my $formatted_title = _format_title($new_title);
115
                    if ( !$formatted_title->{publication_title} ) {
112
                    if ( !$formatted_title->{publication_title} ) {
116
                        push @messages, {
113
                        push @messages, {
117
                            code     => 'no_title_found',
114
                            code     => 'no_title_found',
Lines 124-130 sub process { Link Here
124
                        my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)
121
                        my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)
125
                            ->store( { create_linked_biblio => $create_linked_biblio } );
122
                            ->store( { create_linked_biblio => $create_linked_biblio } );
126
                        push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio;
123
                        push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio;
127
                        create_linked_resource(
124
                        _create_linked_resource(
128
                            {
125
                            {
129
                                title      => $imported_title,
126
                                title      => $imported_title,
130
                                package_id => $package_id
127
                                package_id => $package_id
Lines 192-198 sub enqueue { Link Here
192
    );
189
    );
193
}
190
}
194
191
195
=head3 format_title
192
=head3 _format_title
196
193
197
Formats a title to fit the names of the database fields in Koha
194
Formats a title to fit the names of the database fields in Koha
198
195
Lines 201-207 Kbart field "coverage_notes" = "notes" in Koha Link Here
201
198
202
=cut
199
=cut
203
200
204
sub format_title {
201
sub _format_title {
205
    my ($title) = @_;
202
    my ($title) = @_;
206
203
207
    $title->{external_id} = $title->{title_id};
204
    $title->{external_id} = $title->{title_id};
Lines 216-260 sub format_title { Link Here
216
    return $title;
213
    return $title;
217
}
214
}
218
215
219
=head3 read_file
216
=head3 _create_title_hash_from_line_data
220
221
Reads a file to provide report headers and lines to be processed
222
223
=cut
224
225
sub read_file {
226
    my ($file) = @_;
227
228
    my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : "";
229
    my $delimiter    = $file->{filename} =~ /\.tsv$/    ? "\t"                                   : ",";
230
    my $quote_char   = $file->{filename} =~ /\.tsv$/    ? "\""                                   : "\"";
231
232
    open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!";
233
    my $csv = Text::CSV_XS->new(
234
        {
235
            sep_char           => $delimiter,
236
            quote_char         => $quote_char,
237
            binary             => 1,
238
            allow_loose_quotes => 1,
239
            formula            => 'empty'
240
        }
241
    );
242
    my $headers_to_check = $csv->getline($fh);
243
    my $column_headers   = rescue_EBSCO_files($headers_to_check);
244
    my $lines            = $csv->getline_all( $fh, 0 );
245
    close($fh);
246
247
    unless ( $csv->eof() ) {
248
        my ( $cde, $str, $pos ) = $csv->error_diag();
249
        my $error = $cde ? "$cde, $str, $pos" : "";
250
        warn $error if $error;
251
        return ( $column_headers, $lines, $error );
252
    }
253
254
    return ( $column_headers, $lines );
255
}
256
257
=head3 create_title_hash_from_line_data
258
217
259
Takes a line and creates a hash of the values mapped to the column headings
218
Takes a line and creates a hash of the values mapped to the column headings
260
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored
219
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored
Lines 262-268 Only accepts fields that are in the list of permitted KBART fields, other fields Link Here
262
221
263
=cut
222
=cut
264
223
265
sub create_title_hash_from_line_data {
224
sub _create_title_hash_from_line_data {
266
    my ( $row, $column_headers, $invalid_columns ) = @_;
225
    my ( $row, $column_headers, $invalid_columns ) = @_;
267
226
268
    my %new_title;
227
    my %new_title;
Lines 284-296 sub create_title_hash_from_line_data { Link Here
284
    return \%new_title;
243
    return \%new_title;
285
}
244
}
286
245
287
=head3 check_for_matching_title
246
=head3 _check_for_matching_title
288
247
289
Checks whether this title already exists to avoid duplicates
248
Checks whether this title already exists to avoid duplicates
290
249
291
=cut
250
=cut
292
251
293
sub check_for_matching_title {
252
sub _check_for_matching_title {
294
    my ( $title, $package_id ) = @_;
253
    my ( $title, $package_id ) = @_;
295
254
296
    my $match_parameters = {};
255
    my $match_parameters = {};
Lines 317-336 sub check_for_matching_title { Link Here
317
    return $matching_title_found;
276
    return $matching_title_found;
318
}
277
}
319
278
320
=head3 create_linked_resource
279
=head3 _create_linked_resource
321
280
322
Creates a resource for a newly stored title.
281
Creates a resource for a newly stored title.
323
282
324
=cut
283
=cut
325
284
326
sub create_linked_resource {
285
sub _create_linked_resource {
327
    my ($args) = @_;
286
    my ($args) = @_;
328
287
329
    my $title      = $args->{title};
288
    my $title      = $args->{title};
330
    my $package_id = $args->{package_id};
289
    my $package_id = $args->{package_id};
331
290
332
    my $title_id = $title->title_id;
291
    my $title_id = $title->title_id;
333
    my ( $date_first_issue_online, $date_last_issue_online ) = get_first_and_last_issue_dates($title);
292
    my ( $date_first_issue_online, $date_last_issue_online ) = _get_first_and_last_issue_dates($title);
334
    my $resource = Koha::ERM::EHoldings::Resource->new(
293
    my $resource = Koha::ERM::EHoldings::Resource->new(
335
        {
294
        {
336
            title_id   => $title_id,
295
            title_id   => $title_id,
Lines 343-355 sub create_linked_resource { Link Here
343
    return;
302
    return;
344
}
303
}
345
304
346
=head3 get_first_and_last_issue_dates
305
=head3 _get_first_and_last_issue_dates
347
306
348
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format
307
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format
349
308
350
=cut
309
=cut
351
310
352
sub get_first_and_last_issue_dates {
311
sub _get_first_and_last_issue_dates {
353
    my ($title) = @_;
312
    my ($title) = @_;
354
313
355
    return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online );
314
    return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online );
Lines 373-472 sub get_first_and_last_issue_dates { Link Here
373
    return ( $date_first_issue_online, $date_last_issue_online );
332
    return ( $date_first_issue_online, $date_last_issue_online );
374
}
333
}
375
334
376
=head3 get_valid_headers
377
378
Returns a list of permitted headers in a KBART phase II file
379
380
=cut
381
382
sub get_valid_headers {
383
    return (
384
        'publication_title',
385
        'print_identifier',
386
        'online_identifier',
387
        'date_first_issue_online',
388
        'num_first_vol_online',
389
        'num_first_issue_online',
390
        'date_last_issue_online',
391
        'num_last_vol_online',
392
        'num_last_issue_online',
393
        'title_url',
394
        'first_author',
395
        'title_id',
396
        'embargo_info',
397
        'coverage_depth',
398
        'coverage_notes',
399
        'publisher_name',
400
        'publication_type',
401
        'date_monograph_published_print',
402
        'date_monograph_published_online',
403
        'monograph_volume',
404
        'monograph_edition',
405
        'first_editor',
406
        'parent_publication_title_id',
407
        'preceding_publication_title_id',
408
        'access_type',
409
        'notes'
410
    );
411
}
412
413
=head3 calculate_chunked_params_size
414
415
Calculates average line size to work out how many lines to chunk a large file into
416
Uses only 75% of the max_allowed_packet as an upper limit
417
418
=cut
419
420
sub calculate_chunked_params_size {
421
    my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_;
422
423
    my $average_line_size = $params_size / $number_of_rows;
424
    my $lines_possible    = ( $max_allowed_packet * 0.75 ) / $average_line_size;
425
    my $rounded_value     = floor($lines_possible);
426
    return $rounded_value;
427
}
428
429
=head3 is_file_too_large
430
431
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet
432
433
=cut
434
435
sub is_file_too_large {
436
    my ( $params_to_store, $max_allowed_packet ) = @_;
437
438
    my $json           = JSON->new->utf8(0);
439
    my $encoded_params = $json->encode($params_to_store);
440
    my $params_size    = length $encoded_params;
441
442
    # A lot more than just the params are stored in the background job table and this is difficult to calculate
443
    # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts
444
    return {
445
        file_too_large => 1,
446
        params_size    => $params_size
447
    } if $params_size > ( $max_allowed_packet * 0.75 );
448
449
    return {
450
        file_too_large => 0,
451
        params_size    => $params_size
452
    };
453
}
454
455
=head3 rescue_EBSCO_files
456
457
EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e').
458
This means all of their KBART files fail to import using the current methodology.
459
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected.
460
461
=cut
462
463
sub rescue_EBSCO_files {
464
    my ($column_headers) = @_;
465
466
    my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 );
467
    @$column_headers[$index] = 'preceding_publication_title_id' if $index;
468
469
    return $column_headers;
470
}
471
472
1;
335
1;
(-)a/Koha/ERM/EHoldings/Title.pm (+139 lines)
Lines 17-22 package Koha::ERM::EHoldings::Title; Link Here
17
17
18
use Modern::Perl;
18
use Modern::Perl;
19
19
20
use POSIX        qw( floor );
21
use MIME::Base64 qw( decode_base64 );
22
20
use Koha::Database;
23
use Koha::Database;
21
24
22
use base qw(Koha::Object);
25
use base qw(Koha::Object);
Lines 100-105 sub resources { Link Here
100
    return Koha::ERM::EHoldings::Resources->_new_from_dbic($resources_rs);
103
    return Koha::ERM::EHoldings::Resources->_new_from_dbic($resources_rs);
101
}
104
}
102
105
106
=head3 read_file
107
108
Reads a KBART file of titles to provide report headers and lines to be processed.
109
Automatically detects whether the file is TSV or CSV based on the first 5 lines
110
111
=cut
112
113
sub read_file {
114
    my ($file) = @_;
115
116
    my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : "";
117
    my ( $delimiter, $quote_char ) = _detect_delimiter_and_quote($file_content);
118
119
    return ( undef, undef, "unknown_delimiter" ) unless $delimiter;
120
121
    open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!";
122
    my $csv = Text::CSV_XS->new(
123
        {
124
            sep_char           => $delimiter,
125
            quote_char         => $quote_char,
126
            binary             => 1,
127
            allow_loose_quotes => 1
128
        }
129
    );
130
131
    my $headers_to_check = $csv->getline($fh);
132
    my $column_headers   = _rescue_EBSCO_files($headers_to_check);
133
    my $lines            = $csv->getline_all( $fh, 0 );
134
    close($fh);
135
136
    unless ( $csv->eof() ) {
137
        my ( $cde, $str, $pos ) = $csv->error_diag();
138
        my $error = $cde ? "$cde, $str, $pos" : "";
139
        warn $error if $error;
140
        return ( $column_headers, $lines, $error );
141
    }
142
143
    return ( $column_headers, $lines );
144
}
145
146
=head3 get_valid_headers
147
148
Returns a list of permitted headers in a KBART phase II file
149
150
=cut
151
152
sub get_valid_headers {
153
    return (
154
        'publication_title',
155
        'print_identifier',
156
        'online_identifier',
157
        'date_first_issue_online',
158
        'num_first_vol_online',
159
        'num_first_issue_online',
160
        'date_last_issue_online',
161
        'num_last_vol_online',
162
        'num_last_issue_online',
163
        'title_url',
164
        'first_author',
165
        'title_id',
166
        'embargo_info',
167
        'coverage_depth',
168
        'coverage_notes',
169
        'publisher_name',
170
        'publication_type',
171
        'date_monograph_published_print',
172
        'date_monograph_published_online',
173
        'monograph_volume',
174
        'monograph_edition',
175
        'first_editor',
176
        'parent_publication_title_id',
177
        'preceding_publication_title_id',
178
        'access_type',
179
        'notes'
180
    );
181
}
182
183
=head3 calculate_chunked_params_size
184
185
Calculates average line size to work out how many lines to chunk a large file into
186
Uses only 75% of the max_allowed_packet as an upper limit
187
188
=cut
189
190
sub calculate_chunked_params_size {
191
    my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_;
192
193
    my $average_line_size = $params_size / $number_of_rows;
194
    my $lines_possible    = ( $max_allowed_packet * 0.75 ) / $average_line_size;
195
    my $rounded_value     = floor($lines_possible);
196
    return $rounded_value;
197
}
198
199
=head3 is_file_too_large
200
201
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet
202
203
=cut
204
205
sub is_file_too_large {
206
    my ( $params_to_store, $max_allowed_packet ) = @_;
207
208
    my $json           = JSON->new->utf8(0);
209
    my $encoded_params = $json->encode($params_to_store);
210
    my $params_size    = length $encoded_params;
211
212
    # A lot more than just the params are stored in the background job table and this is difficult to calculate
213
    # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts
214
    return {
215
        file_too_large => 1,
216
        params_size    => $params_size
217
    } if $params_size > ( $max_allowed_packet * 0.75 );
218
219
    return {
220
        file_too_large => 0,
221
        params_size    => $params_size
222
    };
223
}
224
225
=head3 _rescue_EBSCO_files
226
227
EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e').
228
This means all of their KBART files fail to import using the current methodology.
229
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected.
230
231
=cut
232
233
sub _rescue_EBSCO_files {
234
    my ($column_headers) = @_;
235
236
    my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 );
237
    @$column_headers[$index] = 'preceding_publication_title_id' if $index;
238
239
    return $column_headers;
240
}
241
103
=head3 _detect_delimiter_and_quote
242
=head3 _detect_delimiter_and_quote
104
243
105
Identifies the delimiter and the quote character used in the KBART file and returns both.
244
Identifies the delimiter and the quote character used in the KBART file and returns both.
(-)a/Koha/REST/V1/ERM/EHoldings/Titles/Local.pm (-12 / +8 lines)
Lines 255-274 sub import_from_kbart_file { Link Here
255
        my @invalid_columns;
255
        my @invalid_columns;
256
        my $max_allowed_packet = C4::Context->dbh->selectrow_array(q{SELECT @@max_allowed_packet});
256
        my $max_allowed_packet = C4::Context->dbh->selectrow_array(q{SELECT @@max_allowed_packet});
257
257
258
        # Check if file is in TSV or CSV format and send an error back if not
258
        my ( $column_headers, $rows, $error ) = Koha::ERM::EHoldings::Title::read_file($file);
259
        if ( $file->{filename} !~ /\.csv$/ && $file->{filename} !~ /\.tsv$/ ) {
260
            return $c->render(
261
                status  => 201,
262
                openapi => { warnings => { invalid_filetype => 1 } }
263
            );
264
        }
265
259
266
        my ( $column_headers, $rows ) = Koha::BackgroundJob::ImportKBARTFile::read_file($file);
260
        return $c->render(
261
            status  => 201,
262
            openapi => { invalid_filetype => 1 }
263
        ) if $error eq 'unknown_delimiter';
267
264
268
        # Check that the column headers in the file match the standardised KBART phase II columns
265
        # Check that the column headers in the file match the standardised KBART phase II columns
269
        # If not, return a warning
266
        # If not, return a warning
270
        my $warnings      = {};
267
        my $warnings      = {};
271
        my @valid_headers = Koha::BackgroundJob::ImportKBARTFile::get_valid_headers();
268
        my @valid_headers = Koha::ERM::EHoldings::Title::get_valid_headers();
272
        foreach my $header (@$column_headers) {
269
        foreach my $header (@$column_headers) {
273
            if ( !grep { $_ eq $header } @valid_headers ) {
270
            if ( !grep { $_ eq $header } @valid_headers ) {
274
                $header = 'Empty column' if $header eq '';
271
                $header = 'Empty column' if $header eq '';
Lines 285-295 sub import_from_kbart_file { Link Here
285
            file_name            => $file->{filename},
282
            file_name            => $file->{filename},
286
            create_linked_biblio => $create_linked_biblio
283
            create_linked_biblio => $create_linked_biblio
287
        };
284
        };
288
        my $outcome = Koha::BackgroundJob::ImportKBARTFile::is_file_too_large( $params, $max_allowed_packet );
285
        my $outcome = Koha::ERM::EHoldings::Title::is_file_too_large( $params, $max_allowed_packet );
289
286
290
        # If the file is too large, we can break the file into smaller chunks and enqueue one job per chunk
287
        # If the file is too large, we can break the file into smaller chunks and enqueue one job per chunk
291
        if ( $outcome->{file_too_large} ) {
288
        if ( $outcome->{file_too_large} ) {
292
            my $max_number_of_rows = Koha::BackgroundJob::ImportKBARTFile::calculate_chunked_params_size(
289
            my $max_number_of_rows = Koha::ERM::EHoldings::Title::calculate_chunked_params_size(
293
                $outcome->{params_size}, $max_allowed_packet,
290
                $outcome->{params_size}, $max_allowed_packet,
294
                scalar(@$rows)
291
                scalar(@$rows)
295
            );
292
            );
296
- 

Return to bug 36831